text stringlengths 4 1.02M | meta dict |
|---|---|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'eventi.subscriptions.views',
url(r'^$', 'subscribe', name='subscribe'),
url(r'^(\d+)/$', 'detail', name='detail'),
)
| {
"content_hash": "3863e8b26e73c32082a3715a9c0332a4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 46,
"avg_line_length": 24.875,
"alnum_prop": 0.628140703517588,
"repo_name": "klebercode/lionsclub",
"id": "365d562125a795d1bd721b231c0a80c723f29e53",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventi/subscriptions/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4361"
},
{
"name": "HTML",
"bytes": "11852"
},
{
"name": "JavaScript",
"bytes": "3450"
},
{
"name": "Python",
"bytes": "64154"
}
],
"symlink_target": ""
} |
"""
systemundertest.py: Contains the SystemUnderTest class.
A SystemUnderTest instance is an abstraction of an actual computing
system. For instance, a system may be the Open Stack compute
service (nova), or it may be the Linux networking stack, or a software
application. A SystemUnderTest instance will be mapped to a single
fault injection module.
"""
from systemcomponent import SystemComponent
from sessionconfig import SessionConfig
class SystemUnderTest(object):
def __init__(self, session_config_file):
""" Create SystemUnderTest object.
system_config_file: name of the configuration file;
used to create the full path name of the file"""
self._config_file = SessionConfig(session_config_file)
self._system_name = self._config_file.get_system_name()
self._fault_module_name = self._config_file.get_fault_module_name()
self._components = [
SystemComponent(c[0], c[1], self._config_file) for
c in self._config_file.get_active_components()
]
def checkpoint(self):
""" Determines whether any events associated with components need
to be activated.
returns: list of Event instances which are active"""
events = []
for c in self._components:
active_events = c.checkpoint()
if active_events: events.extend(active_events)
return events
def get_system_name(self):
""" returns: name of system under test (SUT)"""
return self._system_name
def get_fault_module_name(self):
""" returns: name of fault injector module associated with the SUT"""
return self._fault_module_name
| {
"content_hash": "605af1fc9f6c81e7ccd645143a392790",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 33.745098039215684,
"alnum_prop": 0.6612434631028472,
"repo_name": "bahadley/dtest-controller",
"id": "37680c7b0d4f8b95c16f951d882c45c8facd67ce",
"size": "1721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/systemundertest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53634"
}
],
"symlink_target": ""
} |
"""
This module provides a command line client for the aptdaemon
"""
# Copyright (C) 2008-2009 Sebastian Heinlein <sevel@glatzor.de>
#
# Licensed under the GNU General Public License Version 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__author__ = "Sebastian Heinlein <devel@glatzor.de>"
__all__ = ("ConsoleClient", "main")
import array
import fcntl
from gettext import gettext as _
from gettext import ngettext
import locale
from optparse import OptionParser
import os
import pty
import re
import termios
import time
import tty
import signal
import sys
from aptsources.sourceslist import SourceEntry
import apt_pkg
from gi.repository import GLib
import dbus.glib
import dbus.mainloop.glib
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
import aptdaemon
from . import client
from . import enums
from . import errors
ANSI_BOLD = chr(27) + "[1m"
ANSI_RESET = chr(27) + "[0m"
PY3K = sys.version_info.major > 2
class ConsoleClient:
"""
Command line interface client to aptdaemon
"""
def __init__(self, show_terminal=True, allow_unauthenticated=False,
details=False):
self._client = client.AptClient()
self.master_fd, self.slave_fd = pty.openpty()
self._signals = []
signal.signal(signal.SIGINT, self._on_cancel_signal)
signal.signal(signal.SIGQUIT, self._on_cancel_signal)
signal.signal(signal.SIGWINCH, self._on_terminal_resize)
self._terminal_width = self._get_terminal_width()
self._watchers = []
self._old_tty_mode = None
self._show_status = True
self._status = ""
self._percent = 0
self._show_terminal = show_terminal
self._details = details
self._allow_unauthenticated = allow_unauthenticated
self._show_progress = True
self._status_details = ""
self._progress_details = ""
# Used for a spinning line to indicate a still working transaction
self._spin_elements = "|/-\\"
self._spin_cur = -1
self._spin_stamp = time.time()
self._transaction = None
self._loop = GLib.MainLoop()
def add_repository(self, line="", sourcesfile=""):
"""Add repository to the sources list."""
entry = SourceEntry(line)
self._client.add_repository(entry.type, entry.uri, entry.dist,
entry.comps, entry.comment,
sourcesfile,
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def add_vendor_key_from_file(self, path):
"""Install repository key file."""
self._client.add_vendor_key_from_file(
path,
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def add_vendor_key_from_keyserver(self, keyid, keyserver):
"""Install repository key file."""
self._client.add_vendor_key_from_keyserver(
keyid, keyserver,
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def remove_vendor_key(self, fingerprint):
"""Remove repository key."""
self._client.remove_vendor_key(fingerprint,
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def install_file(self, path):
"""Install package file."""
self._client.install_file(path, reply_handler=self._run_transaction,
error_handler=self._on_exception)
def list_trusted_vendor_keys(self):
"""List the keys of the trusted vendors."""
def on_done(keys):
for key in keys:
print(key)
self._loop.quit()
self._client.get_trusted_vendor_keys(reply_handler=on_done,
error_handler=self._on_exception)
def commit_packages(self, install, reinstall, remove, purge, upgrade,
downgrade):
"""Commit changes"""
self._client.commit_packages(install, reinstall, remove, purge,
upgrade, downgrade,
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def fix_incomplete_install(self):
"""Fix incomplete installs"""
self._client.fix_incomplete_install(
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def fix_broken_depends(self):
"""Repair broken dependencies."""
self._client.fix_broken_depends(reply_handler=self._run_transaction,
error_handler=self._on_exception)
def update_cache(self):
"""Update cache"""
self._client.update_cache(reply_handler=self._run_transaction,
error_handler=self._on_exception)
def upgrade_system(self, safe_mode):
"""Upgrade system"""
self._client.upgrade_system(safe_mode,
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def reconfigure(self, packages, priority):
"""Reconfigure packages."""
self._client.reconfigure(packages, priority,
reply_handler=self._run_transaction,
error_handler=self._on_exception)
def clean(self):
"""Clean archives."""
self._client.clean(reply_handler=self._run_transaction,
error_handler=self._on_exception)
def run(self):
"""Start the console client application."""
try:
self._loop.run()
except KeyboardInterrupt:
pass
def _set_transaction(self, transaction):
"""Monitor the given transaction"""
for handler in self._signals:
GLib.source_remove(handler)
self._transaction = transaction
self._signals = []
self._signals.append(transaction.connect("terminal-attached-changed",
self._on_terminal_attached))
self._signals.append(transaction.connect("status-changed",
self._on_status))
self._signals.append(transaction.connect("status-details-changed",
self._on_status_details))
self._signals.append(transaction.connect("progress-changed",
self._on_progress))
self._signals.append(transaction.connect("progress-details-changed",
self._on_progress_details))
self._signals.append(transaction.connect("finished", self._on_exit))
if self._show_terminal:
transaction.set_terminal(os.ttyname(self.slave_fd))
transaction.set_allow_unauthenticated(self._allow_unauthenticated)
def _on_exit(self, trans, enum):
"""Callback for the exit state of the transaction"""
# Make sure to dettach the terminal
self._detach()
if self._show_progress:
output = "[+] 100%% %s %-*.*s%s\n" % (ANSI_BOLD,
self._terminal_width - 9,
self._terminal_width - 9,
enums.get_exit_string_from_enum(enum),
ANSI_RESET)
sys.stderr.write(output)
if enum == enums.EXIT_FAILED:
msg = "%s: %s\n%s\n\n%s" % (
_("ERROR"),
enums.get_error_string_from_enum(trans.error_code),
enums.get_error_description_from_enum(trans.error_code),
trans.error_details)
print(msg)
self._loop.quit()
def _on_terminal_attached(self, transaction, attached):
"""Callback for the terminal-attachabed-changed signal of the
transaction.
"""
if self._show_terminal and attached and not self._watchers:
self._clear_progress()
self._show_progress = False
self._attach()
elif not attached:
self._show_progress = True
self._detach()
def _on_status(self, transaction, status):
"""Callback for the Status signal of the transaction"""
self._status = enums.get_status_string_from_enum(status)
self._update_progress()
def _on_status_details(self, transaction, text):
"""Callback for the StatusDetails signal of the transaction."""
self._status_details = text
self._update_progress()
def _on_progress_details(self, transaction, items_done, items_total,
bytes_done, bytes_total, speed, eta):
"""Callback for the ProgressDetails signal of the transaction."""
if bytes_total and speed:
self._progress_details = (_("Downloaded %sB of %sB at %sB/s") %
(apt_pkg.size_to_str(bytes_done),
apt_pkg.size_to_str(bytes_total),
apt_pkg.size_to_str(speed)))
elif bytes_total:
self._progress_details = (_("Downloaded %sB of %sB") %
(apt_pkg.size_to_str(bytes_done),
apt_pkg.size_to_str(bytes_total)))
else:
self._progress_details = ""
self._update_progress()
def _on_progress(self, transaction, percent):
"""Callback for the Progress signal of the transaction"""
self._percent = percent
self._update_progress()
def _update_progress(self):
"""Update the progress bar."""
if not self._show_progress:
return
text = ANSI_BOLD + self._status + ANSI_RESET
if self._status_details:
text += " " + self._status_details
if self._progress_details:
text += " (%s)" % self._progress_details
text_width = self._terminal_width - 9
# Spin the progress line (maximum 5 times a second)
if self._spin_stamp + 0.2 < time.time():
self._spin_cur = (self._spin_cur + 1) % len(self._spin_elements)
self._spin_stamp = time.time()
spinner = self._spin_elements[self._spin_cur]
# Show progress information if available
if self._percent > 100:
percent = "---"
else:
percent = self._percent
sys.stderr.write("[%s] " % spinner +
"%3.3s%% " % percent +
"%-*.*s" % (text_width, text_width, text) + "\r")
def _update_custom_progress(self, msg, percent=None, spin=True):
"""Update the progress bar with a custom status message."""
text = ANSI_BOLD + msg + ANSI_RESET
text_width = self._terminal_width - 9
# Spin the progress line (maximum 5 times a second)
if spin:
self._spin_cur = (self._spin_cur + 1) % len(self._spin_elements)
self._spin_stamp = time.time()
spinner = self._spin_elements[self._spin_cur]
else:
spinner = "+"
# Show progress information if available
if percent is None:
percent = "---"
sys.stderr.write("[%s] " % spinner +
"%3.3s%% " % percent +
"%-*.*s" % (text_width, text_width, text) + "\r")
return True
def _stop_custom_progress(self):
"""Stop the spinner which shows non trans status messages."""
if self._progress_id is not None:
GLib.source_remove(self._progress_id)
def _clear_progress(self):
"""Clear progress information on stderr."""
sys.stderr.write("%-*.*s\r" % (self._terminal_width,
self._terminal_width,
" "))
def _on_cancel_signal(self, signum, frame):
"""Callback for a cancel signal."""
if (self._transaction and
self._transaction.status != enums.STATUS_SETTING_UP):
self._transaction.cancel()
else:
self._loop.quit()
def _on_terminal_resize(self, signum, frame):
"""Callback for a changed terminal size."""
self._terminal_width = self._get_terminal_width()
self._update_progress()
def _detach(self):
"""Dettach the controlling terminal to aptdaemon."""
for wid in self._watchers:
GLib.source_remove(wid)
if self._old_tty_mode:
tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH,
self._old_tty_mode)
def _attach(self):
"""Attach the controlling terminal to aptdaemon.
Based on pty.spwan()
"""
try:
self._old_tty_mode = tty.tcgetattr(pty.STDIN_FILENO)
tty.setraw(pty.STDIN_FILENO)
except tty.error: # This is the same as termios.error
self._old_tty_mode = None
flags = GLib.IO_IN | GLib.IO_ERR | GLib.IO_HUP
self._watchers.append(
GLib.io_add_watch(pty.STDIN_FILENO,
GLib.PRIORITY_HIGH_IDLE, flags,
self._copy_io, self.master_fd))
self._watchers.append(
GLib.io_add_watch(self.master_fd, GLib.PRIORITY_HIGH_IDLE,
flags, self._copy_io, pty.STDOUT_FILENO))
def _copy_io(self, source, condition, target):
"""Callback to copy data between terminals."""
if condition == GLib.IO_IN:
data = os.read(source, 1024)
if target:
os.write(target, data)
return True
os.close(source)
return False
def _get_terminal_width(self):
"""Return the witdh in characters of the current terminal."""
try:
return array.array("h", fcntl.ioctl(sys.stderr, termios.TIOCGWINSZ,
"\0" * 8))[1]
except IOError:
# Fallback to the "default" size
return 80
def _on_exception(self, error):
"""Error callback."""
self._detach()
try:
raise error
except errors.PolicyKitError:
msg = "%s %s\n\n%s" % (_("ERROR:"),
_("You are not allowed to perform "
"this action."),
error.get_dbus_message())
except dbus.DBusException:
msg = "%s %s - %s" % (_("ERROR:"), error.get_dbus_name(),
error.get_dbus_message())
except:
msg = str(error)
self._loop.quit()
sys.exit(msg)
def _run_transaction(self, trans):
"""Callback which runs a requested transaction."""
self._set_transaction(trans)
self._stop_custom_progress()
if self._transaction.role in [enums.ROLE_UPDATE_CACHE,
enums.ROLE_ADD_VENDOR_KEY_FILE,
enums.ROLE_ADD_VENDOR_KEY_FROM_KEYSERVER,
enums.ROLE_REMOVE_VENDOR_KEY,
enums.ROLE_FIX_INCOMPLETE_INSTALL]:
#TRANSLATORS: status message
self._progress_id = GLib.timeout_add(250,
self._update_custom_progress,
_("Queuing"))
self._transaction.run(
error_handler=self._on_exception,
reply_handler=lambda: self._stop_custom_progress())
else:
#TRANSLATORS: status message
self._progress_id = GLib.timeout_add(250,
self._update_custom_progress,
_("Resolving dependencies"))
self._transaction.simulate(reply_handler=self._show_changes,
error_handler=self._on_exception)
def _show_changes(self):
def show_packages(pkgs):
"""Format the pkgs in a nice way."""
line = " "
pkgs.sort()
for pkg in pkgs:
try:
name, version = pkg.split("=", 1)[0:2]
except ValueError:
name = pkg
version = None
if self._details and version:
output = "%s=%s" % (name, version)
else:
output = name
if (len(line) + 1 + len(output) > self._terminal_width and
line != " "):
print(line)
line = " "
line += " %s" % output
if line != " ":
print(line)
self._stop_custom_progress()
self._clear_progress()
(installs, reinstalls, removals, purges, upgrades,
downgrades) = self._transaction.packages
(dep_installs, dep_reinstalls, dep_removals, dep_purges, dep_upgrades,
dep_downgrades, dep_kepts) = self._transaction.dependencies
installs.extend(dep_installs)
upgrades.extend(dep_upgrades)
removals.extend(purges)
removals.extend(dep_removals)
removals.extend(dep_purges)
reinstalls.extend(dep_reinstalls)
downgrades.extend(dep_downgrades)
kepts = dep_kepts
if installs:
#TRANSLATORS: %s is the number of packages
print((ngettext("The following NEW package will be installed "
"(%(count)s):",
"The following NEW packages will be installed "
"(%(count)s):",
len(installs)) % {"count": len(installs)}))
show_packages(installs)
if upgrades:
#TRANSLATORS: %s is the number of packages
print((ngettext("The following package will be upgraded "
"(%(count)s):",
"The following packages will be upgraded "
"(%(count)s):",
len(upgrades)) % {"count": len(upgrades)}))
show_packages(upgrades)
if removals:
#TRANSLATORS: %s is the number of packages
print((ngettext("The following package will be REMOVED "
"(%(count)s):",
"The following packages will be REMOVED "
"(%(count)s):",
len(removals)) % {"count": len(removals)}))
#FIXME: mark purges
show_packages(removals)
if downgrades:
#TRANSLATORS: %s is the number of packages
print((ngettext("The following package will be DOWNGRADED "
"(%(count)s):",
"The following packages will be DOWNGRADED "
"(%(count)s):",
len(downgrades)) % {"count": len(downgrades)}))
show_packages(downgrades)
if reinstalls:
#TRANSLATORS: %s is the number of packages
print((ngettext("The following package will be reinstalled "
"(%(count)s):",
"The following packages will be reinstalled "
"(%(count)s):",
len(reinstalls)) % {"count": len(reinstalls)}))
show_packages(reinstalls)
if kepts:
print((ngettext("The following package has been kept back "
"(%(count)s):",
"The following packages have been kept back "
"(%(count)s):",
len(kepts)) % {"count": len(kepts)}))
show_packages(kepts)
if self._transaction.download:
print(_("Need to get %sB of archives.") %
apt_pkg.size_to_str(self._transaction.download))
if self._transaction.space > 0:
print(_("After this operation, %sB of additional disk space "
"will be used.") %
apt_pkg.size_to_str(self._transaction.space))
elif self._transaction.space < 0:
print(_("After this operation, %sB of additional disk space "
"will be freed.") %
apt_pkg.size_to_str(self._transaction.space))
if (not apt_pkg.config.find_b("APT::Get::Assume-Yes") and
(self._transaction.space or self._transaction.download or
installs or upgrades or downgrades or removals or kepts or
reinstalls)):
try:
if PY3K:
cont = input(_("Do you want to continue [Y/n]?"))
else:
cont = raw_input(_("Do you want to continue [Y/n]?"))
except EOFError:
cont = "n"
#FIXME: Listen to changed dependencies!
if (not re.match(locale.nl_langinfo(locale.YESEXPR), cont) and
cont != ""):
msg = enums.get_exit_string_from_enum(enums.EXIT_CANCELLED)
self._update_custom_progress(msg, None, False)
self._loop.quit()
sys.exit(1)
#TRANSLATORS: status message
self._progress_id = GLib.timeout_add(250,
self._update_custom_progress,
_("Queuing"))
self._transaction.run(
error_handler=self._on_exception,
reply_handler=lambda: self._stop_custom_progress())
def main():
"""Run a command line client for aptdaemon"""
epilog = _("To operate on more than one package put the package "
"names in quotation marks:\naptdcon --install "
"\"foo bar\"")
parser = OptionParser(version=aptdaemon.__version__, epilog=epilog)
parser.add_option("-c", "--refresh", default="",
action="store_true", dest="refresh",
help=_("Refresh the cache"))
parser.add_option("", "--fix-depends", default="",
action="store_true", dest="fix_depends",
help=_("Try to resolve broken dependencies. "
"Potentially dangerous operation since it could "
"try to remove many packages."))
parser.add_option("", "--fix-install", default="",
action="store_true", dest="fix_install",
help=_("Try to finish a previous incompleted "
"installation"))
parser.add_option("-i", "--install", default="",
action="store", type="string", dest="install",
help=_("Install the given packages"))
parser.add_option("", "--reinstall", default="",
action="store", type="string", dest="reinstall",
help=_("Reinstall the given packages"))
parser.add_option("-r", "--remove", default="",
action="store", type="string", dest="remove",
help=_("Remove the given packages"))
parser.add_option("-p", "--purge", default="",
action="store", type="string", dest="purge",
help=_("Remove the given packages including "
"configuration files"))
parser.add_option("-u", "--upgrade", default="",
action="store", type="string", dest="upgrade",
help=_("Install the given packages"))
parser.add_option("", "--downgrade", default="",
action="store", type="string", dest="downgrade",
help=_("Downgrade the given packages"))
parser.add_option("", "--upgrade-system",
action="store_true", dest="safe_upgrade",
help=_("Deprecated: Please use "
"--safe-upgrade"))
parser.add_option("", "--safe-upgrade",
action="store_true", dest="safe_upgrade",
help=_("Upgrade the system in a safe way"))
parser.add_option("", "--full-upgrade",
action="store_true", dest="full_upgrade",
help=_("Upgrade the system, possibly installing and "
"removing packages"))
parser.add_option("", "--add-vendor-key", default="",
action="store", type="string", dest="add_vendor_key",
help=_("Add the vendor to the trusted ones"))
parser.add_option("", "--add-vendor-key-from-keyserver", default="",
action="store", type="string",
help=_("Add the vendor keyid (also needs "
"--keyserver)"))
parser.add_option("", "--keyserver", default="",
action="store", type="string",
help=_("Use the given keyserver for looking up "
"keys"))
parser.add_option("", "--add-repository", default="",
action="store", type="string", dest="add_repository",
help=_("Add new repository from the given "
"deb-line"))
parser.add_option("", "--sources-file", action="store", default="",
type="string", dest="sources_file",
help=_("Specify an alternative sources.list.d file to "
"which repositories should be added."))
parser.add_option("", "--list-trusted-vendors", default="",
action="store_true", dest="list_trusted_vendor_keys",
help=_("List trusted vendor keys"))
parser.add_option("", "--remove-vendor-key", default="",
action="store", type="string", dest="remove_vendor_key",
help=_("Remove the trusted key of the given "
"fingerprint"))
parser.add_option("", "--clean",
action="store_true", dest="clean",
help=_("Remove downloaded package files"))
parser.add_option("", "--reconfigure", default="",
action="store", type="string", dest="reconfigure",
help=_("Reconfigure installed packages. Optionally the "
"minimum priority of questions can be "
"specified"))
parser.add_option("", "--priority", default="default",
action="store", type="string", dest="priority",
help=_("The minimum debconf priority of question to "
"be displayed"))
parser.add_option("", "--hide-terminal",
action="store_true", dest="hide_terminal",
help=_("Do not attach to the apt terminal"))
parser.add_option("", "--allow-unauthenticated",
action="store_true", dest="allow_unauthenticated",
default=False,
help=_("Allow packages from unauthenticated "
"sources"))
parser.add_option("-d", "--show-details",
action="store_true", dest="details",
help=_("Show additional information about the packages. "
"Currently only the version number"))
(options, args) = parser.parse_args()
con = ConsoleClient(show_terminal=not options.hide_terminal,
allow_unauthenticated=options.allow_unauthenticated,
details=options.details)
#TRANSLATORS: status message
con._progress_id = GLib.timeout_add(250, con._update_custom_progress,
_("Waiting for authentication"))
if options.safe_upgrade:
con.upgrade_system(True)
elif options.full_upgrade:
con.upgrade_system(False)
elif options.refresh:
con.update_cache()
elif options.reconfigure:
con.reconfigure(options.reconfigure.split(), options.priority)
elif options.clean:
con.clean()
elif options.fix_install:
con.fix_incomplete_install()
elif options.fix_depends:
con.fix_broken_depends()
elif options.install and options.install.endswith(".deb"):
con.install_file(options.install)
elif (options.install or options.reinstall or options.remove or
options.purge or options.upgrade or options.downgrade):
con.commit_packages(options.install.split(),
options.reinstall.split(),
options.remove.split(),
options.purge.split(),
options.upgrade.split(),
options.downgrade.split())
elif options.add_repository:
con.add_repository(options.add_repository, options.sources_file)
elif options.add_vendor_key:
#FIXME: Should detect if from stdin or file
con.add_vendor_key_from_file(options.add_vendor_key)
elif options.add_vendor_key_from_keyserver and options.keyserver:
con.add_vendor_key_from_keyserver(
options.add_vendor_key_from_keyserver,
options.keyserver)
elif options.remove_vendor_key:
con.remove_vendor_key(options.remove_vendor_key)
elif options.list_trusted_vendor_keys:
con.list_trusted_vendor_keys()
else:
parser.print_help()
sys.exit(1)
con.run()
if __name__ == "__main__":
main()
| {
"content_hash": "d1fe4cae248a7f031ceb214bb863c187",
"timestamp": "",
"source": "github",
"line_count": 691,
"max_line_length": 79,
"avg_line_length": 44.38929088277858,
"alnum_prop": 0.5256414436149056,
"repo_name": "yasoob/PythonRSSReader",
"id": "91a25cc77bc30dfaae6eb433db21c73f60d850ad",
"size": "30673",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/dist-packages/aptdaemon/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "58615"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "HTML",
"bytes": "1638"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "22979347"
},
{
"name": "Shell",
"bytes": "5224"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import os
from sauron.metrics import Metric, MetricException
class DiskMetric(Metric):
def __init__(self, name, path, **kwargs):
Metric.__init__(self, name, **kwargs)
self.reconfig(name, path, **kwargs)
def reconfig(self, name, path, **kwargs):
Metric.reconfig(self, name, **kwargs)
self.path = path
def values(self):
# Reference:
# http://stackoverflow.com/questions/787776/find-free-disk-space-in-python-on-os-x
try:
st = os.statvfs(self.path)
divisor = 1024.0 ** 3
free = (st.f_bavail * st.f_frsize) / divisor
total = (st.f_blocks * st.f_frsize) / divisor
used = (st.f_blocks - st.f_bavail) * st.f_frsize / divisor
results = {
'free' : (round(free , 3), 'Gigabytes'),
'total' : (round(total, 3), 'Gigabytes'),
'used' : (round(used , 3), 'Gigabytes'),
'percent' : (round(float(used) / float(total), 3), 'Percent'),
'inodes' : (st.f_files, 'Count'),
'free_inodes': (st.f_ffree, 'Count')
}
return {'results': results}
except Exception as e:
raise MetricException(e)
| {
"content_hash": "80db07df72e4451debcea32d7c16b6a5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 90,
"avg_line_length": 39.24242424242424,
"alnum_prop": 0.505019305019305,
"repo_name": "wutali/sauron",
"id": "e9aeed212a1abd7d85d3a70b7fa06e7ec537f455",
"size": "1295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sauron/metrics/DiskMetric.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45906"
},
{
"name": "Shell",
"bytes": "1450"
}
],
"symlink_target": ""
} |
from scipy.cluster import hierarchy
from scipy.spatial import distance
import logging
from .all_genes import all_genes
from . import cdr3s_human
logger = logging.getLogger('util.py')
def get_rep( gene, organism ):
assert gene.startswith('TR')
vj = gene[3]
if vj == 'V':
rep = cdr3s_human.all_loopseq_representative[ organism ][ gene ]
else:
rep = cdr3s_human.all_jseq_representative[ organism ][ gene ]
return rep
def get_mm1_rep( gene, organism ):
assert gene.startswith('TR')
vj = gene[3]
if vj == 'V':
rep = cdr3s_human.all_loopseq_representative_mm1[ organism ][ gene ]
else:
rep = cdr3s_human.all_jseq_representative[ organism ][ gene ]
return rep
def get_rep_ignoring_allele( gene, organism ):
rep = get_rep( gene, organism )
rep = rep[:rep.index('*')]
return rep
def tree_sort( old_l, distances, return_leaves=True ): ## average linkage
assert len(distances) == len(old_l)
if len(old_l)==1:
leaves = [0]
else:
y = distance.squareform( distances, checks=True )
Z = hierarchy.average( y )
#c,coph_dists = hierarchy.cophenet(Z,y)
leaves = hierarchy.leaves_list( Z )
new_l = [ old_l[x] for x in leaves ]
if not return_leaves:
return new_l
else:
return new_l, leaves
def get_top_genes( blast_hits_string ):
hits = dict( [ ( x.split(':')[0], int( x.split(':')[1] ) ) for x in blast_hits_string.split(';') ] )
top_score = max( hits.values() )
return set( [ x for x,y in hits.iteritems() if y >= top_score ] )
def get_top_reps( blast_hits_string, organism ):
hits = dict( [ ( x.split(':')[0], int( x.split(':')[1] ) ) for x in blast_hits_string.split(';') ] )
top_score = max( hits.values() )
# vj = hits.keys()[0][3]
# if vj == 'V':
# rep_map = cdr3s_human.all_loopseq_representative[ organism ]
# else:
# assert vj == 'J'
# rep_map = cdr3s_human.all_jseq_representative[ organism ]
return set( [ all_genes[organism][x].rep for x,y in hits.iteritems() if y >= top_score ] )
def reps_from_genes( genes, organism, mm1=False, trim_allele=False ):
## if genes is a set we can't index into it
# vj = [ x[3] for x in genes ][0]
# if vj == 'V':
# if mm1:
# rep_map = cdr3s_human.all_loopseq_representative_mm1[ organism ]
# else:
# rep_map = cdr3s_human.all_loopseq_representative[ organism ]
# else:
# assert vj == 'J'
# rep_map = cdr3s_human.all_jseq_representative[ organism ]
# reps = set( [ rep_map[x] for x in genes ] )
reps = set( ( all_genes[organism][x].mm1_rep for x in genes ) ) if mm1 else \
set( ( all_genes[organism][x].rep for x in genes ) )
if trim_allele:
reps = set( ( x[:x.index('*')] for x in reps ) )
return reps
def get_mm1_rep_ignoring_allele( gene, organism ): # helper fxn
rep = get_mm1_rep( gene, organism )
rep = rep[:rep.index('*')]
return rep
def get_allele2mm1_rep_gene_for_counting(all_genes):
allele2mm1_rep_gene_for_counting = {}
for organism in ['human','mouse']:
allele2mm1_rep_gene_for_counting[ organism ] = {}
for chain in 'AB':
## look at gene/allele maps
vj_alleles = { 'V': [ id for (id,g) in all_genes[organism].iteritems() if g.chain==chain and g.region=='V'],
'J': [ id for (id,g) in all_genes[organism].iteritems() if g.chain==chain and g.region=='J'] }
for vj, alleles in vj_alleles.iteritems():
gene2rep = {}
gene2alleles = {}
rep_gene2alleles = {}
for allele in alleles:
#assert allele[2] == chain
gene = allele[:allele.index('*')]
rep_gene = get_mm1_rep_ignoring_allele( allele, organism )
if rep_gene not in rep_gene2alleles:
rep_gene2alleles[ rep_gene ] = []
rep_gene2alleles[ rep_gene ].append( allele )
if gene not in gene2rep:
gene2rep[gene] = set()
gene2alleles[gene] = []
gene2rep[ gene ].add( rep_gene )
gene2alleles[gene].append( allele )
merge_rep_genes = {}
for gene,reps in gene2rep.iteritems():
if len(reps)>1:
assert vj=='V'
logger.debug('multireps: %s, %s, %s',organism, gene, reps)
'''
for allele in gene2alleles[gene]:
logger.debug('%s %s %s %s' % (' '.join(all_genes[organism][allele].cdrs), allele, get_rep(allele,organism), get_mm1_rep(allele,organism)))
'''
## we are going to merge these reps
## which one should we choose?
l = [ (len(rep_gene2alleles[rep]), rep ) for rep in reps ]
l.sort()
l = l[::-1]
assert l[0][0] > l[1][0]
toprep = l[0][1]
for (count,rep) in l:
if rep in merge_rep_genes:
assert rep == toprep and merge_rep_genes[rep] == rep
merge_rep_genes[ rep ] = toprep
for allele in alleles:
count_rep = get_mm1_rep_ignoring_allele( allele, organism )
if count_rep in merge_rep_genes:
count_rep = merge_rep_genes[ count_rep ]
allele2mm1_rep_gene_for_counting[ organism ][ allele] = count_rep
logger.debug('allele2mm1_rep_gene_for_counting: %s, %s, %s',organism, allele, count_rep)
return allele2mm1_rep_gene_for_counting
allele2mm1_rep_gene_for_counting = get_allele2mm1_rep_gene_for_counting(all_genes)
def get_mm1_rep_gene_for_counting( allele, organism ):
return allele2mm1_rep_gene_for_counting[ organism ][ allele ]
def countreps_from_genes( genes, organism ):
reps = set( ( allele2mm1_rep_gene_for_counting[ organism ][ x ] for x in genes ) )
return reps
def assign_label_reps_and_colors_based_on_most_common_genes_in_repertoire( tcr_infos, organism ):
## assumes that each element of tcr_infos is a dictionary with fields that would have come from parse_tsv_line
## uses the *_countreps info that was filled in by read_pair_seqs.py
## the _label_rep* fields get over-written if they were present
for segtype in segtypes_lowercase:
countreps_tag = segtype+'_countreps'
rep_tag = segtype+'_label_rep'
color_tag = segtype+'_label_rep_color' ## where we will store the rep info
counts = {}
for tcr_info in tcr_infos:
reps = tcr_info[countreps_tag].split(';')
for rep in reps:
counts[rep] = counts.get(rep,0)+1
newcounts = {}
for tcr_info in tcr_infos:
reps = tcr_info[countreps_tag].split(';')
toprep = max( [ ( counts[x],x) for x in reps ] )[1]
tcr_info[rep_tag] = toprep ## doesnt have allele info anymore
newcounts[toprep] = newcounts.get(toprep,0)+1
l = [(y,x) for x,y in newcounts.iteritems()]
l.sort()
l.reverse()
rep_colors = dict( zip( [x[1] for x in l], html_colors.get_rank_colors_no_lights(len(l)) ) )
for tcr_info in tcr_infos:
tcr_info[ color_tag ] = rep_colors[ tcr_info[ rep_tag ] ]
return ## we modified the elements of the tcr_infos list in place
| {
"content_hash": "442dec2c0ba5e5a56cdda8c64b0af78e",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 166,
"avg_line_length": 39.748717948717946,
"alnum_prop": 0.5471552057798994,
"repo_name": "phbradley/tcr-dist",
"id": "27bd85d225415a9008e7388d4d054a17c820c261",
"size": "7751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcrdist/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1003952"
}
],
"symlink_target": ""
} |
"""Unit tests for book models/database."""
import pytest
from store.book.models import Book
@pytest.mark.usefixtures('db')
class TestBook:
"""Book test."""
def test_get_by_isbn13(self):
"""Test can retrieve by isbn."""
book = Book("book0", "Lalala", "Alvin Tan", "Penguin Books",
2016, 50, 25, "hardcover", "la", "Fiction")
book.save()
# this method is inherited from a class in database.py
retrieved = Book.get_by_id(book.isbn13)
assert retrieved == book
def test_retrieve_list_of_books(self):
"""Test able to retrieve a list of books."""
book0 = Book("book1", "Lalala", "Alvin Tan", "Penguin Books",
2016, 50, 25, "Text-only", "la", "Fiction")
book1 = Book("book2", "Lololo", "Kelvin Tan", "Aweseom Books",
2009, 500, 50, "paperback", "lo", "Non-Fiction")
book_list = [book0, book1]
for book in book_list:
book.save()
i = Book.query.all()
assert len(i) == 2
def test_filter_books_by_subject(self):
"""Test filtering books by subject."""
b1 = Book('9780439708180', "Harry Potter and the Philosopher's Stone",
"J. K. Rowling", "Scholastic", 1999, 10, 6.79, "paperback", "fantasy", "fantasy")
b2 = Book('9780439064873', "Harry Potter And The Chamber Of Secrets",
"J. K. Rowling", "Scholastic", 2000, 8, 6.70, "paperback", "fantasy", "fantasy")
b3 = Book('9780439136358', "Harry Potter And The Prisoner Of Azkaban",
"J. K. Rowling", "Scholastic", 1999, 11, 15.24, "hardcover", "fantasy", "fantasy")
b4 = Book('9780439139595', "Harry Potter And The Goblet Of Fire",
"J. K. Rowling", "Scholastic", 2000, 9, 18.28, "hardcover", "fantasy", "fantasy")
b5 = Book('9780439358071', "Harry Potter And The Order Of The Phoenix",
"J. K. Rowling", "Scholastic", 2004, 10, 7.86, "paperback", "fantasy", "fantasy")
b6 = Book('9780439784542', "Harry Potter and the Half-Blood Prince",
"J. K. Rowling", "Scholastic", 2005, 5, 16.94, "hardcover", "fantasy", "fantasy")
b7 = Book('9780545139700', "Harry Potter and the Deathly Hallows",
"J. K. Rowling", "Scholastic", 2007, 4, 9.14, "paperback", "fantasy", "fantasy")
b8 = Book('9780345803481',
"Fifty Shades of Grey: Book One of the Fifty Shades Trilogy (Fifty Shades of Grey Series)",
"E L James", "Vintage Books", 2012, 7, 9.99, "paperback", "romance", "romance")
b9 = Book('9780345803498', "Fifty Shades Darker",
"E L James", "Vintage Books", 2012, 7, 10.99, "paperback", "romance", "romance")
b10 = Book('9780345803504',
"Fifty Shades Freed: Book Three of the Fifty Shades Trilogy (Fifty Shades of Grey Series)",
"E L James", "Vintage Books", 2012, 7, 9.59, "paperback", "romance", "romance")
sample_list = []
sample_list.append(b1)
sample_list.append(b2)
sample_list.append(b3)
sample_list.append(b4)
sample_list.append(b5)
sample_list.append(b6)
sample_list.append(b7)
sample_list.append(b8)
sample_list.append(b9)
sample_list.append(b10)
for b in sample_list:
b.save()
all_books = Book.query.all()
assert len(all_books) > 0
result = Book.query.filter_by(subject="romance").all()
assert len(result) == 3
| {
"content_hash": "603895956c90ef5157ed4ddd186fb8cb",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 110,
"avg_line_length": 40.49438202247191,
"alnum_prop": 0.5640954495005549,
"repo_name": "boomcan90/store",
"id": "21fa92c10829ccf02bd174bce4ddefc2ca358561",
"size": "3604",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_book_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "76350"
},
{
"name": "HTML",
"bytes": "99300"
},
{
"name": "JavaScript",
"bytes": "204112"
},
{
"name": "Python",
"bytes": "112302"
}
],
"symlink_target": ""
} |
from openpathsampling.high_level.network import FixedLengthTPSNetwork
from openpathsampling.high_level.transition import FixedLengthTPSTransition
import openpathsampling as paths
class PartInBFixedLengthTPSTransition(FixedLengthTPSTransition):
"""Fixed length TPS transition accepting any frame in the final state.
Transition that builds an ensemble used to facilitate the rate
calculation in fixed-length TPS. [1]_ Details in
:class:`.PartInBFixedLengthTPSNetwork`.
See also
--------
PartInBFixedLengthTPSNetwork
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 1) \
& paths.PartInXEnsemble(stateB)
])
class PartInBFixedLengthTPSNetwork(FixedLengthTPSNetwork):
"""Network for fixed-length TPS accepting any frame in the final state
This network samples a single path ensemble where the paths must begin
in an initial state, run for a fixed total number of frames, and must
have at least one frame in a final state. This was used to assist in
the flux part of the TPS rate calculation. [1]_ This version is
generalized to multiple states.
Parameters
----------
intial_states : (list of) :class:`.Volume`
acceptable initial states
final_states : (list of) :class:`.Volume`
acceptable final states
length : int
length of paths in the path ensemble, in frames
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False. For
this network, A->B->A transitions are *always* allowed.
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
TransitionType = PartInBFixedLengthTPSTransition
| {
"content_hash": "ca2a8cf0007127ada85867093cab2072",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 37.660714285714285,
"alnum_prop": 0.6804172593646278,
"repo_name": "dwhswenson/openpathsampling",
"id": "01eaa863b016d99fb7436867fb08fed76b5f191d",
"size": "2109",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openpathsampling/high_level/part_in_b_tps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1167"
},
{
"name": "CSS",
"bytes": "2687"
},
{
"name": "HTML",
"bytes": "81"
},
{
"name": "Makefile",
"bytes": "64"
},
{
"name": "Python",
"bytes": "2693398"
},
{
"name": "Shell",
"bytes": "8655"
}
],
"symlink_target": ""
} |
from Firefly import logging
from Firefly.helpers.conditions import Conditions
from Firefly.helpers.events import Command
from Firefly import scheduler
class Action(object):
def __init__(self, ff_id, command, source, conditions=None, force=False, **kwargs):
self._ff_id = ff_id
self._command = command
self._source = source
self._kwargs = kwargs
self._force = force
self._delay_s = kwargs.get('delay_s')
self._delay_m = kwargs.get('delay_m')
self._conditions = None
if type(conditions) is Conditions:
self._conditions = conditions
if type(conditions) is dict:
self._conditions = Conditions(**conditions)
def execute_action(self, firefly):
if self._delay_s:
scheduler.runInS(self._delay_s,self.execute,firefly=firefly)
elif self._delay_m:
scheduler.runInM(self._delay_m,self.execute,firefly=firefly)
else:
self.execute(firefly)
def execute(self, firefly):
if self._force or self.conditions is None:
command = Command(self.id, self.source, self.command, force=self._force, **self._kwargs)
firefly.send_command(command)
return True
if self.conditions.check_conditions(firefly):
command = Command(self.id, self.source, self.command, force=self._force, **self._kwargs)
firefly.send_command(command)
return True
return False
def export(self) -> dict:
export_data = {
'ff_id': self.id,
'command': self.command,
'force': self._force,
'source': self.source
}
if self.conditions:
export_data['conditions'] = self.conditions.export()
export_data.update(**self._kwargs)
return export_data
@property
def id(self):
return self._ff_id
@property
def command(self):
return self._command
@property
def source(self):
return self._source
@property
def conditions(self):
return self._conditions | {
"content_hash": "9c7be5b57157f30fecf2a6bafc70db96",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 94,
"avg_line_length": 26.830985915492956,
"alnum_prop": 0.6671916010498687,
"repo_name": "Firefly-Automation/Firefly",
"id": "0a8d833d6f7d79ac52fb19d558454fbf72bb2984",
"size": "1905",
"binary": false,
"copies": "1",
"ref": "refs/heads/firefly3",
"path": "Firefly/helpers/action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "639413"
},
{
"name": "Shell",
"bytes": "10447"
}
],
"symlink_target": ""
} |
import math
import multiprocessing
import numpy
import os
import torch
from pathlib import Path
from onnx import numpy_helper, TensorProto
from gpt2_helper import Gpt2Helper
from benchmark_helper import create_onnxruntime_session
NON_ZERO_VALUE = str(1)
ZERO_VALUE = str(0)
def environ_setting_nodes(node_name_filter=None, node_type_filter=None):
# Set I/O data as default
os.environ["ORT_DEBUG_NODE_IO_DUMP_SHAPE_DATA"] = ZERO_VALUE
os.environ["ORT_DEBUG_NODE_IO_DUMP_INPUT_DATA"] = NON_ZERO_VALUE
os.environ["ORT_DEBUG_NODE_IO_DUMP_OUTPUT_DATA"] = NON_ZERO_VALUE
if node_name_filter is not None:
os.environ["ORT_DEBUG_NODE_IO_NAME_FILTER"] = node_name_filter
elif node_type_filter is not None:
os.environ["ORT_DEBUG_NODE_IO_OP_TYPE_FILTER"] = node_type_filter
else:
os.environ["ORT_DEBUG_NODE_IO_DUMPING_DATA_TO_FILES_FOR_ALL_NODES_IS_OK"] = NON_ZERO_VALUE
def environ_setting_paths(output_path):
# Set dumping values to files as default
os.environ["ORT_DEBUG_NODE_IO_DUMP_DATA_TO_FILES"] = NON_ZERO_VALUE
os.environ["ORT_DEBUG_NODE_IO_OUTPUT_DIR"] = output_path
def environ_reset():
for flag in [
"ORT_DEBUG_NODE_IO_DUMP_SHAPE_DATA", "ORT_DEBUG_NODE_IO_DUMP_INPUT_DATA",
"ORT_DEBUG_NODE_IO_DUMP_OUTPUT_DATA", "ORT_DEBUG_NODE_IO_NAME_FILTER", "ORT_DEBUG_NODE_IO_OP_TYPE_FILTER",
"ORT_DEBUG_NODE_IO_DUMP_DATA_TO_FILES", "ORT_DEBUG_NODE_IO_OUTPUT_DIR",
"ORT_DEBUG_NODE_IO_DUMPING_DATA_TO_FILES_FOR_ALL_NODES_IS_OK"
]:
if flag in os.environ:
del os.environ[flag]
def inference(model_path, dummy_inputs, outputs_path, use_gpu):
environ_reset()
environ_setting_nodes()
environ_setting_paths(outputs_path)
session = create_onnxruntime_session(model_path, use_gpu, enable_all_optimization=False)
Gpt2Helper.onnxruntime_inference(session, dummy_inputs)
def generate_outputs_files(model_path, dummy_inputs, outputs_path, use_gpu):
dir_path = Path(outputs_path)
if dir_path.exists() and dir_path.is_dir():
import shutil
shutil.rmtree(outputs_path)
dir_path.mkdir(parents=True, exist_ok=True)
process = multiprocessing.Process(target=inference, args=(model_path, dummy_inputs, outputs_path, use_gpu))
process.start()
process.join()
def post_processing(outputs_path, outputs_path_other):
# Compare outputs with e.g. fp16 and fp32
record = {}
if_close = {}
import glob
for filename in glob.glob(os.path.join(outputs_path, '*.tensorproto')):
filename_other = os.path.join(outputs_path_other, Path(filename).name)
if not os.path.exists(filename_other):
continue
with open(filename, 'rb') as f:
tensor = TensorProto()
tensor.ParseFromString(f.read())
array = numpy_helper.to_array(tensor)
with open(filename_other, 'rb') as f:
tensor_other = TensorProto()
tensor_other.ParseFromString(f.read())
array_other = numpy_helper.to_array(tensor_other)
if array_other.size == 0:
continue
diff = numpy.average(numpy.abs(array_other - array) / (numpy.abs(array_other) + 1e-6))
if math.isnan(diff):
continue
record[Path(filename).name.split(".")[0]] = diff
if_close[Path(filename).name.split(".")[0]] = numpy.allclose(array, array_other, rtol=1e-04, atol=1e-04)
results = [f"Node\tDiff\tClose"]
for k, v in sorted(record.items(), key=lambda x: x[1], reverse=True):
results.append(f"{k}\t{v}\t{if_close[k]}")
for line in results:
print(line)
if __name__ == '__main__':
# Below example shows how to use this helper to investigate parity issue of gpt-2 fp32 and fp16 onnx model
# Please build ORT with --cmake_extra_defines onnxruntime_DEBUG_NODE_INPUTS_OUTPUTS=ON !!
multiprocessing.set_start_method('spawn')
# Generate Inputs
sequence_length = 8
past_sequence_length = 8
batch_size = 5
dummy_inputs_fp16 = Gpt2Helper.get_dummy_inputs(batch_size,
past_sequence_length,
sequence_length,
12,
768,
12,
50257,
device=torch.device("cpu"),
float16=True)
dummy_inputs_fp32 = dummy_inputs_fp16.to_fp32()
# Get GPT-2 model from huggingface using convert_to_onnx.py
os.system('python convert_to_onnx.py -m gpt2 --output gpt2_fp32.onnx -o -p fp32 --use_gpu')
os.system('python convert_to_onnx.py -m gpt2 --output gpt2_fp16.onnx -o -p fp16 --use_gpu')
# Specify the directory to dump the node's I/O
outputs_path_fp32_gpu = "./fp32_gpu"
outputs_path_fp16_gpu = "./fp16_gpu"
generate_outputs_files("./gpt2_fp32.onnx", dummy_inputs_fp32, outputs_path_fp32_gpu, use_gpu=True)
generate_outputs_files("./gpt2_fp16.onnx", dummy_inputs_fp16, outputs_path_fp16_gpu, use_gpu=True)
# Compare each node's I/O value and sort based on average rtol
post_processing(outputs_path_fp16_gpu, outputs_path_fp32_gpu)
| {
"content_hash": "262dcb8dc504d3aef07fc7086fc99f98",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 120,
"avg_line_length": 42.41085271317829,
"alnum_prop": 0.6104916834216779,
"repo_name": "ryfeus/lambda-packs",
"id": "c83d947138067b03e06f9286e1cf651b48e576fa",
"size": "5965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ONNX-ARM/lambda-onnx-arm-3.8/onnxruntime/transformers/parity_check_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('home', '0014_auto_20161026_0217'),
]
operations = [
migrations.AddField(
model_name='vertice',
name='concept',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='home.Concept'),
preserve_default=False,
),
migrations.AlterField(
model_name='attempt',
name='date',
field=models.DateTimeField(default=datetime.datetime(2016, 10, 26, 2, 46, 57, 201033, tzinfo=utc)),
),
]
| {
"content_hash": "d1a00ffd32eb66d8f1e306f1c6497ed9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 111,
"avg_line_length": 28.59259259259259,
"alnum_prop": 0.6230569948186528,
"repo_name": "maxwallasaurus/arboretum",
"id": "7ec6c613e60a63c560f0af1b26d6a4891abdcce2",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/migrations/0015_auto_20161026_0248.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "198584"
},
{
"name": "HTML",
"bytes": "64847"
},
{
"name": "JavaScript",
"bytes": "5178638"
},
{
"name": "Python",
"bytes": "149963"
}
],
"symlink_target": ""
} |
import logging
import time
import re
import json
import inspect
import hashlib
from abc import ABCMeta, abstractmethod
import six
import __main__ as main
from .assignment import Assignment
from .interpreter import Interpreter
# decorator for methods that assume assignments have been made
def requires_assignment(f):
def wrapped_f(self, *args, **kwargs):
if not self._assigned:
self._assign()
return f(self, *args, **kwargs)
return wrapped_f
# decorator for methods that should be exposure logged
def requires_exposure_logging(f):
def wrapped_f(self, *args, **kwargs):
if \
self._auto_exposure_log and \
not self._exposure_logged:
self.log_exposure()
return f(self, *args, **kwargs)
return wrapped_f
class Experiment(object):
"""Abstract base class for PlanOut experiments"""
__metaclass__ = ABCMeta
logger_configured = False
def __init__(self, **inputs):
self.inputs = inputs # input data
# True when assignments have been exposure logged
self._exposure_logged = False
self._salt = None # Experiment-level salt
# Determines whether or not exposure should be logged
self._in_experiment = True
# use the name of the class as the default name
self._name = self.__class__.__name__
# auto-exposure logging is enabled by default
self._auto_exposure_log = True
self.setup() # sets name, salt, etc.
self._assignment = Assignment(self.salt)
self._assigned = False
def _assign(self):
"""Assignment and setup that only happens when we need to log data"""
self.configure_logger() # sets up loggers
#consumers can optionally return False from assign if they don't want exposure to be logged
assign_val = self.assign(self._assignment, **self.inputs)
self._in_experiment = True if assign_val or assign_val is None else False
self._checksum = self.checksum()
self._assigned = True
def setup(self):
"""Set experiment attributes, e.g., experiment name and salt."""
# If the experiment name is not specified, just use the class name
pass
def set_overrides(self, value):
"""Sets variables that are to remain fixed during execution."""
# note that setting this will overwrite inputs to the experiment
self._assignment.set_overrides(value)
o = self._assignment.get_overrides()
for var in o:
if var in self.inputs:
self.inputs[var] = o[var]
@property
def in_experiment(self):
return self._in_experiment
@property
def salt(self):
# use the experiment name as the salt if the salt is not set
return self._salt if self._salt else self.name
@salt.setter
def salt(self, value):
self._salt = value
if hasattr(self, '_assignment'):
self._assignment.experiment_salt = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = re.sub(r'\s+', '-', value)
if hasattr(self, '_assignment'):
self._assignment.experiment_salt = self.salt
@abstractmethod
def assign(params, **kwargs):
"""Returns evaluated PlanOut mapper with experiment assignment"""
pass
@requires_assignment
def __asBlob(self, extras={}):
"""Dictionary representation of experiment data"""
d = {
'name': self.name,
'time': int(time.time()),
'salt': self.salt,
'inputs': self.inputs,
'params': dict(self._assignment),
}
for k in extras:
d[k] = extras[k]
if self._checksum:
d['checksum'] = self._checksum
return d
def checksum(self):
# if we're running from a file and want to detect if the experiment
# file has changed
if hasattr(main, '__file__'):
# src doesn't count first line of code, which includes function
# name
src = ''.join(inspect.getsourcelines(self.assign)[0][1:])
if not isinstance(src, six.binary_type):
src = src.encode("ascii")
return hashlib.sha1(src).hexdigest()[:8]
# if we're running in an interpreter, don't worry about it
else:
return None
# we should probably get rid of this public interface
@property
def exposure_logged(self):
return self._exposure_logged
def set_auto_exposure_logging(self, value):
"""
Disables / enables auto exposure logging (enabled by default).
"""
self._auto_exposure_log = value
@requires_assignment
@requires_exposure_logging
def get_params(self):
"""
Get all PlanOut parameters. Triggers exposure log.
"""
# In general, this should only be used by custom loggers.
return dict(self._assignment)
@requires_assignment
@requires_exposure_logging
def get(self, name, default=None):
"""
Get PlanOut parameter (returns default if undefined). Triggers exposure log.
"""
return self._assignment.get(name, default)
@requires_assignment
@requires_exposure_logging
def __str__(self):
"""
String representation of exposure log data. Triggers exposure log.
"""
return str(self.__asBlob())
def log_exposure(self, extras=None):
"""Logs exposure to treatment"""
if not self._in_experiment:
return
self._exposure_logged = True
self.log_event('exposure', extras)
def log_event(self, event_type, extras=None):
"""Log an arbitrary event"""
if not self._in_experiment:
return
if extras:
extra_payload = {'event': event_type, 'extra_data': extras.copy()}
else:
extra_payload = {'event': event_type}
self.log(self.__asBlob(extra_payload))
@abstractmethod
def configure_logger(self):
"""Set up files, database connections, sockets, etc for logging."""
pass
@abstractmethod
def log(self, data):
"""Log experimental data"""
pass
@abstractmethod
def previously_logged(self):
"""Check if the input has already been logged.
Gets called once during in the constructor."""
# For high-use applications, one might have this method to check if
# there is a memcache key associated with the checksum of the
# inputs+params
pass
class DefaultExperiment(Experiment):
"""
Dummy experiment which has no logging. Default experiments used by namespaces
should inherent from this class.
"""
def configure_logger(self):
pass # we don't log anything when there is no experiment
def log(self, data):
pass
def previously_logged(self):
return True
def assign(self, params, **kwargs):
# more complex default experiments can override this method
params.update(self.get_default_params())
def get_default_params(self):
"""
Default experiments that are just key-value stores should
override this method."""
return {}
class SimpleExperiment(Experiment):
"""Simple experiment base class which exposure logs to a file"""
__metaclass__ = ABCMeta
# We only want to set up the logger once, the first time the object is
# instantiated. We do this by maintaining this class variable.
logger = {}
log_file = {}
def configure_logger(self):
"""Sets up logger to log to a file"""
my_logger = self.__class__.logger
# only want to set logging handler once for each experiment (name)
if self.name not in self.__class__.logger:
if self.name not in self.__class__.log_file:
self.__class__.log_file[self.name] = '%s.log' % self.name
file_name = self.__class__.log_file[self.name]
my_logger[self.name] = logging.getLogger(self.name)
my_logger[self.name].setLevel(logging.INFO)
my_logger[self.name].addHandler(logging.FileHandler(file_name))
my_logger[self.name].propagate = False
def log(self, data):
"""Logs data to a file"""
self.__class__.logger[self.name].info(json.dumps(data))
def set_log_file(self, path):
self.__class__.log_file[self.name] = path
def previously_logged(self):
"""Check if the input has already been logged.
Gets called once during in the constructor."""
# SimpleExperiment doesn't connect with any services, so we just assume
# that if the object is a new instance, this is the first time we are
# seeing the inputs/outputs given.
return False
class SimpleInterpretedExperiment(SimpleExperiment):
"""A variant of SimpleExperiment that loads data from a given script"""
__metaclass__ = ABCMeta
def loadScript(self):
"""loads deserialized PlanOut script to be executed by the interpreter"""
# This method should set self.script to a dictionary-based representation
# of a PlanOut script. Most commonly, this method would retreive a
# JSON-encoded string from a database or file, e.g.
# self.script = json.loads(open("myfile").read())
# If constructing experiments on the fly, one can alternatively set the
# self.script instance variable
pass
def assign(self, params, **kwargs):
self.loadScript() # lazily load script
# self.script must be a dictionary
assert hasattr(self, 'script') and type(self.script) == dict
interpreterInstance = Interpreter(
self.script,
self.salt,
kwargs,
params
)
# execute script
results = interpreterInstance.get_params()
# insert results into param object dictionary
params.update(results)
return interpreterInstance.in_experiment
def checksum(self):
# self.script must be a dictionary
assert hasattr(self, 'script') and type(self.script) == dict
src = json.dumps(self.script)
if not isinstance(src, six.binary_type):
src = src.encode("ascii")
return hashlib.sha1(src).hexdigest()[:8]
| {
"content_hash": "01e6c92dc0fb2366ef0f1e3f14a4b2b1",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 99,
"avg_line_length": 32.433846153846154,
"alnum_prop": 0.6119912721753155,
"repo_name": "cudbg/planout",
"id": "48da357f50f40808a546cf1af480f1558bfd796e",
"size": "10829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/planout/experiment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7813"
},
{
"name": "HTML",
"bytes": "16211"
},
{
"name": "JavaScript",
"bytes": "3658377"
},
{
"name": "Python",
"bytes": "81603"
},
{
"name": "Ruby",
"bytes": "11630"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger(__name__)
class Services(object):
"""
High-level base class for Benchmark(), MemoryMonitoring() and Analytics() classes.
"""
def __init__(self, *args):
"""
Init function.
:param args:
"""
pass
| {
"content_hash": "97997565d8fdd58448803fbed23d745a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 86,
"avg_line_length": 17.41176470588235,
"alnum_prop": 0.5506756756756757,
"repo_name": "PalNilsson/pilot2",
"id": "dd1c09f8fbb9975eedceedc7e5e0881b0d97fe71",
"size": "641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pilot/api/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1098187"
},
{
"name": "Shell",
"bytes": "624"
}
],
"symlink_target": ""
} |
"""\
Code for reading/writing Matlab file formats
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
from NumWrap import zeros
def mtx2file(a,filename='bs.dat'):
"General format for matrices: one space-delimited row per line"
n,m = a.shape
file = open(filename,'w')
file.write("%d %d\n" % (n,m))
for i in xrange(n):
for j in xrange(m):
file.write("%f " % a[i,j])
file.write("\n")
file.close()
return
def rdmtx(filename):
file = open(filename)
line = file.readline()
n,m = map(int,line.split())
A = zeros((n,m),'d')
for i in xrange(n):
line = file.readline()
vals = map(float,line.split())
for j in xrange(m):
A[i,j] = vals[j]
file.close()
return A
def print_halfmat(A,name=None):
"Print the lower half of a square matrix"
if name: print "%s Matrix" % name
for i in xrange(A.shape[0]):
for j in xrange (i+1):
print '%10.4f ' % A[i,j],
print ''
return
| {
"content_hash": "7a87f1b230d06a8afbd3dd3455261c0b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 25.32,
"alnum_prop": 0.5884676145339652,
"repo_name": "berquist/PyQuante",
"id": "26b86096275180c1c59cd252b40e0c66bca23236",
"size": "1266",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PyQuante/IO/Matlab.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "870"
},
{
"name": "C",
"bytes": "330729"
},
{
"name": "C++",
"bytes": "29583"
},
{
"name": "CSS",
"bytes": "16520"
},
{
"name": "FORTRAN",
"bytes": "19655"
},
{
"name": "HTML",
"bytes": "558944"
},
{
"name": "JavaScript",
"bytes": "21380"
},
{
"name": "Makefile",
"bytes": "5803"
},
{
"name": "Python",
"bytes": "2989178"
},
{
"name": "Shell",
"bytes": "106"
},
{
"name": "TeX",
"bytes": "60093"
}
],
"symlink_target": ""
} |
"""
Cloud-provider: base-classes
Copyright (c) 2010-2012 Mika Eloranta
See LICENSE for details.
"""
from . import errors
class NoProviderMethod(NotImplementedError):
def __init__(self, obj, func):
name = (obj if isinstance(obj, type) else obj.__class__).__name__
NotImplementedError.__init__(self, "{0} does not implement {1}".format(name, func))
class Provider(object):
"""Abstract base-class for cloud-specific cloud provider logic"""
def __init__(self, provider_id, cloud_prop):
self.provider_id = provider_id
self._provider_key = self.get_provider_key(cloud_prop)
def __eq__(self, other):
if not other or not isinstance(other, Provider):
return False
return self._provider_key == other._provider_key
def __ne__(self, other):
if not other or not isinstance(other, Provider):
return True
return self._provider_key != other._provider_key
def __hash__(self):
# shuffle the hash a bit to create unique hashes for Provider objects
# and their provider_keys
return hash(("cloudbase.Provider", self._provider_key))
def required_prop(self, cloud_prop, prop_name):
value = cloud_prop.get(prop_name)
if value is None:
raise errors.CloudError("'cloud.{0}' property required by {1} not defined".format(
prop_name, self.provider_id))
return value
@classmethod
def get_provider_key(cls, cloud_prop):
"""
Return the cloud provider key for the given cloud properties.
A unique provider key can be returned based on, for example, the
region of the specified data-center.
Returns a minimum unique key value needed to uniquely describe the
cloud Provider. Can be e.g. (provider_type, data_center_id), like
with AWS-EC2. The return value also needs to be hash()able.
"""
raise NoProviderMethod(cls, "get_provider_key")
def init_instance(self, cloud_prop):
"""
Create a new instance with the given properties.
Returns node properties that are changed.
"""
raise NoProviderMethod(self, "init_instance")
def assign_ip(self, props):
"""
Assign the ip's to the instances based on the given properties.
"""
raise NoProviderMethod(self, "assign_ip")
def get_instance_status(self, prop):
"""
Return instance status string for the instance specified in the given
cloud properties dict.
"""
raise NoProviderMethod(self, "get_instance_status")
def terminate_instances(self, props):
"""
Terminate instances specified in the given sequence of cloud
properties dicts.
"""
raise NoProviderMethod(self, "terminate_instances")
def wait_instances(self, props, wait_state="running"):
"""
Wait for all the given instances to reach status specified by
the 'wait_state' argument.
Returns a dict {instance_id: dict(<updated properties>)}
"""
raise NoProviderMethod(self, "wait_instances")
def create_snapshot(self, props, name=None, description=None, memory=False):
"""
Create a new snapshot for the given instances with the specified props.
Returns a dict {instance_id: dict(<updated properties>)}
"""
raise NoProviderMethod(self, "create_snapshot")
def revert_to_snapshot(self, props, name=None):
"""
Revert the given instances to the specified snapshot.
Returns a dict {instance_id: dict(<updated properties>)}
"""
raise NoProviderMethod(self, "revert_to_snapshot")
def remove_snapshot(self, props, name):
"""
Remove the specified snapshot on the given instances.
Returns a dict {instance_id: dict(<updated properties>)}
"""
raise NoProviderMethod(self, "remove_snapshot")
def power_off_instances(self, props):
"""
Power off the given instances.
Returns a dict {instance_id: dict(<updated properties>)}
"""
raise NoProviderMethod(self, "power_off_instances")
def power_on_instances(self, props):
"""
Power on the given instances.
Returns a dict {instance_id: dict(<updated properties>)}
"""
raise NoProviderMethod(self, "power_on_instances")
def find_instances(self, match_function):
"""
Look up instances which have a name matching match_function.
Returns a list [{vm_name: "vm_name", ...}, ...]
"""
raise NoProviderMethod(self, "find_instances")
| {
"content_hash": "beb4396dacdaa78f066e2a4db9a1100e",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 94,
"avg_line_length": 33.11971830985915,
"alnum_prop": 0.6249202636614927,
"repo_name": "ohmu/poni",
"id": "0a67bd907f14776aa679802c0f0f3424dc1e8bd5",
"size": "4703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poni/cloudbase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1979"
},
{
"name": "Puppet",
"bytes": "363"
},
{
"name": "Python",
"bytes": "356805"
},
{
"name": "Shell",
"bytes": "4337"
}
],
"symlink_target": ""
} |
import json
import responses
import logging
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.db.models.signals import post_save
from django.core.exceptions import ValidationError
from django.core.exceptions import MultipleObjectsReturned
from rest_framework.test import APIClient
from rest_framework.authtoken.models import Token
from rest_framework import status
from requests.adapters import HTTPAdapter
from requests_testadapter import TestSession, Resp
from requests.exceptions import HTTPError
from go_http.contacts import ContactsApiClient
from go_http.send import LoggingSender
from fake_go_contacts import Request, FakeContactsApi
from .models import NurseReg, NurseSource, nursereg_postsave
from subscription.models import Subscription
from nursereg import tasks
def override_get_today():
return datetime.strptime("20130819144811", "%Y%m%d%H%M%S")
def override_get_tomorrow():
return "2013-08-20"
def override_get_sender():
return LoggingSender('go_http.test')
TEST_REG_DATA = {
# SA ID self registration
"sa_id": {
"cmsisdn": "+27821234444",
"dmsisdn": "+27821234444",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "5101025009086",
"dob": "1951-01-02",
"sanc_reg_no": None,
"persal_no": None
},
# Passport other registration
"passport": {
"cmsisdn": "+27821235555",
"dmsisdn": "+27821234444",
"faccode": "123456",
"id_type": "passport",
"id_no": "Cub1234",
"dob": "1976-03-07",
"passport_origin": "cu",
},
# No ID self registration
"no_id": {
"cmsisdn": "+27821234444",
"dmsisdn": "+27821234444",
"faccode": "123456",
"sanc_reg_no": None,
"persal_no": None
},
"change_faccode": {
"cmsisdn": "+27821237777",
"dmsisdn": "+27821237777",
"faccode": "234567",
"id_type": "sa_id",
"id_no": "5101025009086",
"dob": "1951-01-02",
"sanc_reg_no": None,
"persal_no": None
},
"change_sanc": {
"cmsisdn": "+27821237777",
"dmsisdn": "+27821237777",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "5101025009086",
"dob": "1951-01-02",
"sanc_reg_no": "34567890",
"persal_no": None
},
"change_persal": {
"cmsisdn": "+27821237777",
"dmsisdn": "+27821237777",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "5101025009086",
"dob": "1951-01-02",
"sanc_reg_no": None,
"persal_no": "11114444"
},
"change_id": {
"cmsisdn": "+27821237777",
"dmsisdn": "+27821237777",
"faccode": "123456",
"id_type": "sa_id",
"dob": "1990-01-01",
"sanc_reg_no": None,
"persal_no": None,
"id_no": "9001016265166"
},
"change_passport": {
"cmsisdn": "+27821237777",
"dmsisdn": "+27821237777",
"faccode": "123456",
"id_type": "passport",
"dob": "1976-03-07",
"sanc_reg_no": None,
"persal_no": None,
"id_no": "Nam1234",
"passport_origin": "na"
},
"change_old_nr": {
"cmsisdn": "+27821234444",
"dmsisdn": "+27821234444",
"rmsisdn": "+27821237777",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "5101025009086",
"dob": "1951-01-02",
"sanc_reg_no": None,
"persal_no": None
},
"change_old_nr_multiple_active_subs": {
"cmsisdn": "+27821234444",
"dmsisdn": "+27821234444",
"rmsisdn": "+27821232222",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "5101025009086",
"dob": "1951-01-02",
"sanc_reg_no": None,
"persal_no": None
},
"switch_to_new_nr": {
"cmsisdn": "+27821238888",
"dmsisdn": "+27821237777",
"rmsisdn": "+27821237777",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "5101025009086",
"dob": "1951-01-02",
"sanc_reg_no": None,
"persal_no": None
}
}
TEST_NURSE_SOURCE_DATA = {
"name": "Test Nurse Source"
}
TEST_REG_DATA_BROKEN = {
# single field null-violation test
"no_msisdn": {
"cmsisdn": None,
"dmsisdn": None,
"faccode": "123456",
"id_type": "sa_id",
"id_no": "8009151234001",
"dob": "1980-09-15",
},
# data below is for combination validation testing
"sa_id_no_id_no": {
"cmsisdn": "+27001",
"dmsisdn": "+27001",
"faccode": "123456",
"id_type": "sa_id",
"id_no": None,
"dob": "1980-09-15"
},
"passport_no_id_no": {
"cmsisdn": "+27001",
"dmsisdn": "+27001",
"faccode": "123456",
"id_type": "passport",
"id_no": None,
"dob": "1980-09-15"
},
"no_passport_origin": {
"cmsisdn": "+27001",
"dmsisdn": "+27001",
"faccode": "123456",
"id_type": "passport",
"id_no": "SA12345",
"dob": "1980-09-15"
},
"no_optout_reason": {
"cmsisdn": "+27001",
"dmsisdn": "+27001",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "8009151234001",
"dob": "1980-09-15",
"opted_out": True,
"optout_reason": None,
"optout_count": 1,
},
"zero_optout_count": {
"cmsisdn": "+27001",
"dmsisdn": "+27001",
"faccode": "123456",
"id_type": "sa_id",
"id_no": "8009151234001",
"dob": "1980-09-15",
"opted_out": True,
"optout_reason": "job_change",
"optout_count": 0,
},
}
TEST_CONTACT_DATA = {
u"key": u"knownuuid",
u"msisdn": u"+155564",
u"user_account": u"knownaccount",
u"extra": {
u"an_extra": u"1"
}
}
API_URL = "http://example.com/go"
AUTH_TOKEN = "auth_token"
MAX_CONTACTS_PER_PAGE = 10
class RecordingHandler(logging.Handler):
""" Record logs. """
logs = None
def emit(self, record):
if self.logs is None:
self.logs = []
self.logs.append(record)
class APITestCase(TestCase):
fixtures = ["test_initialdata.json", "nursereg_test.json"]
def setUp(self):
self.adminclient = APIClient()
self.normalclient = APIClient()
self.sender = LoggingSender('go_http.test')
self.handler = RecordingHandler()
logger = logging.getLogger('go_http.test')
logger.setLevel(logging.INFO)
logger.addHandler(self.handler)
tasks.get_today = override_get_today
tasks.get_tomorrow = override_get_tomorrow
tasks.get_sender = override_get_sender
class FakeContactsApiAdapter(HTTPAdapter):
"""
Adapter for FakeContactsApi.
This inherits directly from HTTPAdapter instead of using TestAdapter
because it overrides everything TestAdaptor does.
"""
def __init__(self, contacts_api):
self.contacts_api = contacts_api
super(FakeContactsApiAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
req = Request(
request.method, request.path_url, request.body, request.headers)
resp = self.contacts_api.handle_request(req)
response = Resp(resp.body, resp.code, resp.headers)
r = self.build_response(request, response)
if not stream:
# force prefetching content unless streaming in use
r.content
return r
make_contact_dict = FakeContactsApi.make_contact_dict
class AuthenticatedAPITestCase(APITestCase):
def _replace_post_save_hooks(self):
has_listeners = lambda: post_save.has_listeners(NurseReg)
assert has_listeners(), (
"NurseReg model has no post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
post_save.disconnect(nursereg_postsave, sender=NurseReg)
assert not has_listeners(), (
"NurseReg model still has post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
def _restore_post_save_hooks(self):
has_listeners = lambda: post_save.has_listeners(NurseReg)
assert not has_listeners(), (
"NurseReg model still has post_save listeners. Make sure"
" helpers removed them properly in earlier tests.")
post_save.connect(nursereg_postsave, sender=NurseReg)
def make_nursesource(self, post_data=TEST_NURSE_SOURCE_DATA):
# Make source for the normal user who submits data but using admin user
user = User.objects.get(username='testnormaluser')
post_data["user"] = user
nurse_source = NurseSource.objects.create(**post_data)
return nurse_source
def make_nursereg(self, post_data):
response = self.normalclient.post('/api/v2/nurseregs/',
json.dumps(post_data),
content_type='application/json')
return response
def make_client(self):
return ContactsApiClient(auth_token=AUTH_TOKEN, api_url=API_URL,
session=self.session)
def override_get_client(self):
return self.make_client()
def make_existing_contact(self, contact_data=TEST_CONTACT_DATA):
# TODO CHANGE EXTRAS
existing_contact = make_contact_dict(contact_data)
self.contacts_data[existing_contact[u"key"]] = existing_contact
return existing_contact
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
self._replace_post_save_hooks()
# adminclient setup
self.adminusername = 'testadminuser'
self.adminpassword = 'testadminpass'
self.adminuser = User.objects.create_superuser(
self.adminusername,
'testadminuser@example.com',
self.adminpassword)
admintoken = Token.objects.create(user=self.adminuser)
self.admintoken = admintoken.key
self.adminclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.admintoken)
# normalclient setup
self.normalusername = 'testnormaluser'
self.normalpassword = 'testnormalpass'
self.normaluser = User.objects.create_user(
self.normalusername,
'testnormaluser@example.com',
self.normalpassword)
normaltoken = Token.objects.create(user=self.normaluser)
self.normaltoken = normaltoken.key
self.normalclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.normaltoken)
self.make_nursesource()
# contacts client setup
self.contacts_data = {}
self.groups_data = {}
self.contacts_backend = FakeContactsApi(
"go/", AUTH_TOKEN, self.contacts_data, self.groups_data,
contacts_limit=MAX_CONTACTS_PER_PAGE)
self.session = TestSession()
adapter = FakeContactsApiAdapter(self.contacts_backend)
self.session.mount(API_URL, adapter)
def tearDown(self):
self._restore_post_save_hooks()
def check_logs(self, msg):
if type(self.handler.logs) != list:
logs = [self.handler.logs]
else:
logs = self.handler.logs
for log in logs:
if log.msg == msg:
return True
return False
def check_logs_number_of_entries(self):
if type(self.handler.logs) != list:
logs = [self.handler.logs]
else:
logs = self.handler.logs
return len(logs)
class TestContactsAPI(AuthenticatedAPITestCase):
def test_get_contact_by_key(self):
client = self.make_client()
existing_contact = self.make_existing_contact()
contact = client.get_contact(u"knownuuid")
self.assertEqual(contact, existing_contact)
def test_get_contact_by_msisdn(self):
client = self.make_client()
existing_contact = self.make_existing_contact()
contact = client.get_contact(msisdn="+155564")
self.assertEqual(contact, existing_contact)
def test_update_contact(self):
client = self.make_client()
existing_contact = self.make_existing_contact()
expected_contact = existing_contact.copy()
expected_contact[u"name"] = u"Bob"
updated_contact = client.update_contact(
u"knownuuid", {u"name": u"Bob"})
self.assertEqual(updated_contact, expected_contact)
def test_update_contact_extras(self):
client = self.make_client()
existing_contact = self.make_existing_contact()
expected_contact = existing_contact.copy()
expected_contact[u"extra"][u"an_extra"] = u"2"
updated_contact = client.update_contact(
u"knownuuid", {
# Note the whole extra dict needs passing in
u"extra": {
u"an_extra": u"2"
}
}
)
self.assertEqual(updated_contact, expected_contact)
def test_create_contact(self):
client = self.make_client()
created_contact = client.create_contact({
u"msisdn": "+111",
u"groups": ['en'],
u"extra": {
u'clinic_code': u'12345',
u'dob': '1980-09-15',
u'due_date_day': '01',
u'due_date_month': '08',
u'due_date_year': '2015',
u'edd': '2015-08-01',
u'is_registered': 'true',
u'is_registered_by': u'clinic',
u'language_choice': u'en',
u'last_service_rating': 'never',
u'sa_id': u'8009151234001',
u'service_rating_reminder': "2013-08-20",
u'service_rating_reminders': '0',
u'source_name': u'Test Source'
}
})
self.assertEqual(created_contact["msisdn"], "+111")
self.assertIsNotNone(created_contact["key"])
def test_get_group_by_key(self):
client = self.make_client()
existing_group = client.create_group({
"name": 'groupname'
})
group = client.get_group(existing_group[u'key'])
self.assertEqual(group, existing_group)
class TestNurseRegAPI(AuthenticatedAPITestCase):
def test_create_nursesource_deny_normaluser(self):
# Setup
user = User.objects.get(username='testnormaluser')
post_data = TEST_NURSE_SOURCE_DATA
post_data["user"] = "/api/v2/users/%s/" % user.id
# Execute
response = self.normalclient.post('/api/v2/nursesources/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_nursereg_sa_id(self):
# Setup
last_nurse_source = NurseSource.objects.last()
# Execute
reg_response = self.make_nursereg(
post_data=TEST_REG_DATA["sa_id"])
# Check
self.assertEqual(reg_response.status_code, status.HTTP_201_CREATED)
d = NurseReg.objects.last()
self.assertEqual(d.cmsisdn, '+27821234444')
self.assertEqual(d.dmsisdn, '+27821234444')
self.assertEqual(d.faccode, '123456')
self.assertEqual(d.id_type, 'sa_id')
self.assertEqual(d.id_no, '5101025009086')
self.assertEqual(d.dob.strftime("%Y-%m-%d"), "1951-01-02")
self.assertEqual(d.nurse_source, last_nurse_source)
self.assertEqual(d.passport_origin, None)
self.assertEqual(d.persal_no, None)
self.assertEqual(d.opted_out, False)
self.assertEqual(d.optout_reason, None)
self.assertEqual(d.optout_count, 0)
self.assertEqual(d.sanc_reg_no, None)
def test_create_nursereg_no_id(self):
# Setup
last_nurse_source = NurseSource.objects.last()
# Execute
reg_response = self.make_nursereg(
post_data=TEST_REG_DATA["no_id"])
# Check
self.assertEqual(reg_response.status_code, status.HTTP_201_CREATED)
d = NurseReg.objects.last()
self.assertEqual(d.cmsisdn, '+27821234444')
self.assertEqual(d.dmsisdn, '+27821234444')
self.assertEqual(d.faccode, '123456')
self.assertEqual(d.id_type, None)
self.assertEqual(d.id_no, None)
self.assertEqual(d.dob, None)
self.assertEqual(d.nurse_source, last_nurse_source)
self.assertEqual(d.passport_origin, None)
self.assertEqual(d.persal_no, None)
self.assertEqual(d.opted_out, False)
self.assertEqual(d.optout_reason, None)
self.assertEqual(d.optout_count, 0)
self.assertEqual(d.sanc_reg_no, None)
def test_create_broken_nursereg_no_msisdn(self):
# Setup
# Execute
reg_response = self.make_nursereg(
post_data=TEST_REG_DATA_BROKEN["no_msisdn"])
# Check
self.assertEqual(reg_response.status_code, status.HTTP_400_BAD_REQUEST)
d = NurseReg.objects.last()
self.assertEqual(d, None)
def test_create_broken_registration_sa_id_no_id_no(self):
# Setup
# Execute
# Check
self.assertRaises(ValidationError, lambda: self.make_nursereg(
post_data=TEST_REG_DATA_BROKEN["sa_id_no_id_no"]))
d = NurseReg.objects.last()
self.assertEqual(d, None)
def test_create_broken_registration_no_passport_no(self):
# Setup
# Execute
# Check
self.assertRaises(ValidationError, lambda: self.make_nursereg(
post_data=TEST_REG_DATA_BROKEN["passport_no_id_no"]))
d = NurseReg.objects.last()
self.assertEqual(d, None)
def test_create_broken_registration_no_passport_origin(self):
# Setup
# Execute
# Check
self.assertRaises(ValidationError, lambda: self.make_nursereg(
post_data=TEST_REG_DATA_BROKEN["no_passport_origin"]))
d = NurseReg.objects.last()
self.assertEqual(d, None)
def test_create_broken_no_optout_reason(self):
# Setup
# Execute
# Check
self.assertRaises(ValidationError, lambda: self.make_nursereg(
post_data=TEST_REG_DATA_BROKEN["no_optout_reason"]))
d = NurseReg.objects.last()
self.assertEqual(d, None)
def test_create_broken_zero_optout_count(self):
# Setup
# Execute
# Check
self.assertRaises(ValidationError, lambda: self.make_nursereg(
post_data=TEST_REG_DATA_BROKEN["zero_optout_count"]))
d = NurseReg.objects.last()
self.assertEqual(d, None)
def test_fire_metric(self):
# Setup
# Execute
tasks.vumi_fire_metric.apply_async(
kwargs={
"metric": "test.metric",
"value": 1,
"agg": "sum",
"sender": self.sender})
# Check
self.assertEqual(True,
self.check_logs("Metric: 'test.metric' [sum] -> 1"))
self.assertEqual(1, self.check_logs_number_of_entries())
@responses.activate
def test_create_registration_fires_tasks_sa_id(self):
# restore the post_save hooks just for this test
post_save.connect(nursereg_postsave, sender=NurseReg)
# Check number of subscriptions before task fire
self.assertEqual(Subscription.objects.all().count(), 6)
# Check there are no pre-existing registration objects
self.assertEqual(NurseReg.objects.all().count(), 0)
responses.add(responses.POST,
"http://test/v2/nc/subscription",
body='jembi_post_json task', status=201,
content_type='application/json')
# Set up the client
tasks.get_client = self.override_get_client
# Make a new registration
reg_response = self.make_nursereg(
post_data=TEST_REG_DATA["sa_id"])
# Test registration object has been created successfully
self.assertEqual(reg_response.status_code, status.HTTP_201_CREATED)
# Test there is now a registration object in the database
d = NurseReg.objects.all()
self.assertEqual(NurseReg.objects.all().count(), 1)
# Test the registration object is the one you added
d = NurseReg.objects.last()
self.assertEqual(d.id_type, 'sa_id')
# Test post requests have been made to Jembi
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
"http://test/v2/nc/subscription")
# Test number of subscriptions after task fire
self.assertEqual(Subscription.objects.all().count(), 7)
# Test subscription object is the one you added
d = Subscription.objects.last()
self.assertEqual(d.to_addr, "+27821234444")
# Test metrics have fired
self.assertEqual(True, self.check_logs(
"Metric: u'test.nursereg.sum.json_to_jembi_success' [sum] -> 1"))
self.assertEqual(True, self.check_logs(
"Metric: u'test.sum.nc_subscriptions' [sum] -> 1"))
self.assertEqual(True, self.check_logs(
"Metric: u'test.nurseconnect.sum.nc_subscription_to_protocol_" +
"success' [sum] -> 1"))
self.assertEqual(True, self.check_logs(
"Metric: u'test.nurseconnect.unique.clinics' [sum] -> 1"))
self.assertEqual(4, self.check_logs_number_of_entries())
# remove post_save hooks to prevent teardown errors
post_save.disconnect(nursereg_postsave, sender=NurseReg)
@responses.activate
def test_create_registration_fires_tasks_no_id(self):
# Make a nursereg before restoring the post_save hooks so we can
# test that the fire_new_clinic_metric task is not executed if the
# clinic already has a registration
self.make_nursereg(
post_data=TEST_REG_DATA["no_id"])
# restore the post_save hooks just for this test
post_save.connect(nursereg_postsave, sender=NurseReg)
# Check number of subscriptions before task fire
self.assertEqual(Subscription.objects.all().count(), 6)
# Check there is a registration object in the db
self.assertEqual(NurseReg.objects.all().count(), 1)
responses.add(responses.POST,
"http://test/v2/nc/subscription",
body='jembi_post_json task', status=201,
content_type='application/json')
# Set up the client
tasks.get_client = self.override_get_client
# Make a new registration
reg_response = self.make_nursereg(
post_data=TEST_REG_DATA["no_id"])
# Test registration object has been created successfully
self.assertEqual(reg_response.status_code, status.HTTP_201_CREATED)
# Test there are now two registration objects in the database
d = NurseReg.objects.all()
self.assertEqual(NurseReg.objects.all().count(), 2)
# Test the registration object is the one you added
d = NurseReg.objects.last()
self.assertEqual(d.id_type, None)
# Test post requests have been made to Jembi
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
"http://test/v2/nc/subscription")
# Test number of subscriptions after task fire
self.assertEqual(Subscription.objects.all().count(), 7)
# Test subscription object is the one you added
d = Subscription.objects.last()
self.assertEqual(d.to_addr, "+27821234444")
# Test metrics have fired
self.assertEqual(True, self.check_logs(
"Metric: u'test.nursereg.sum.json_to_jembi_success' [sum] -> 1"))
self.assertEqual(True, self.check_logs(
"Metric: u'test.sum.nc_subscriptions' [sum] -> 1"))
self.assertEqual(True, self.check_logs(
"Metric: u'test.nurseconnect.sum.nc_subscription_to_protocol_" +
"success' [sum] -> 1"))
self.assertEqual(3, self.check_logs_number_of_entries())
# remove post_save hooks to prevent teardown errors
post_save.disconnect(nursereg_postsave, sender=NurseReg)
class TestNurseRegistrationAPI(AuthenticatedAPITestCase):
def test_get_nurseregistration(self):
# Setup
self.make_nursereg(post_data=TEST_REG_DATA["sa_id"])
last_nursereg = NurseReg.objects.last()
# Execute
response = self.adminclient.get(
'/api/v2/nurseregistrations/%s/' % last_nursereg.id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, 200)
def test_patch_nurseregistration(self):
# Setup
self.make_nursereg(post_data=TEST_REG_DATA["sa_id"])
last_nursereg = NurseReg.objects.last()
# Execute
response = self.adminclient.patch(
'/api/v2/nurseregistrations/%s/' % last_nursereg.id,
json.dumps({"persal_no": 88888888}),
content_type='application/json')
self.assertEqual(response.status_code, 200)
nursereg = NurseReg.objects.get(id=last_nursereg.id)
self.assertEqual(nursereg.persal_no, 88888888)
class TestJembiPostJsonTask(AuthenticatedAPITestCase):
def test_build_jembi_json_sa_id(self):
# Setup
nursereg_sa_id = self.make_nursereg(
post_data=TEST_REG_DATA["sa_id"])
reg = NurseReg.objects.get(pk=nursereg_sa_id.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821234444",
"cmsisdn": "+27821234444",
"rmsisdn": None,
"faccode": "123456",
"id": "5101025009086^^^ZAF^NI",
"dob": "19510102",
"persal": None,
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
def test_build_jembi_json_passport(self):
# Setup
nursereg_passport = self.make_nursereg(
post_data=TEST_REG_DATA["passport"])
reg = NurseReg.objects.get(pk=nursereg_passport.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821234444",
"cmsisdn": "+27821235555",
"rmsisdn": None,
"faccode": "123456",
"id": "Cub1234^^^CU^PPN",
"dob": "19760307",
"persal": None,
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
def test_build_jembi_json_no_id(self):
# Setup
nursereg_sa_id = self.make_nursereg(
post_data=TEST_REG_DATA["no_id"])
reg = NurseReg.objects.get(pk=nursereg_sa_id.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821234444",
"cmsisdn": "+27821234444",
"rmsisdn": None,
"faccode": "123456",
"id": "27821234444^^^ZAF^TEL",
"dob": None,
"persal": None,
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
def test_build_jembi_json_change_old_nr(self):
# Setup
nursereg_change_old_nr = self.make_nursereg(
post_data=TEST_REG_DATA["change_old_nr"])
reg = NurseReg.objects.get(pk=nursereg_change_old_nr.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821234444",
"cmsisdn": "+27821234444",
"rmsisdn": "+27821237777",
"faccode": "123456",
"id": "5101025009086^^^ZAF^NI",
"dob": "19510102",
"persal": None,
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
def test_build_jembi_json_change_faccode(self):
# Setup
nursereg_change_faccode = self.make_nursereg(
post_data=TEST_REG_DATA["change_faccode"])
reg = NurseReg.objects.get(pk=nursereg_change_faccode.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821237777",
"cmsisdn": "+27821237777",
"rmsisdn": None,
"faccode": "234567",
"id": "5101025009086^^^ZAF^NI",
"dob": "19510102",
"persal": None,
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
def test_build_jembi_json_change_persal(self):
# Setup
nursereg_change_persal = self.make_nursereg(
post_data=TEST_REG_DATA["change_persal"])
reg = NurseReg.objects.get(pk=nursereg_change_persal.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821237777",
"cmsisdn": "+27821237777",
"rmsisdn": None,
"faccode": "123456",
"id": "5101025009086^^^ZAF^NI",
"dob": "19510102",
"persal": "11114444",
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
def test_build_jembi_json_change_id(self):
# Setup
nursereg_change_id = self.make_nursereg(
post_data=TEST_REG_DATA["change_id"])
reg = NurseReg.objects.get(pk=nursereg_change_id.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821237777",
"cmsisdn": "+27821237777",
"rmsisdn": None,
"faccode": "123456",
"id": "9001016265166^^^ZAF^NI",
"dob": "19900101",
"persal": None,
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
def test_build_jembi_json_change_passport(self):
# Setup
nursereg_change_passport = self.make_nursereg(
post_data=TEST_REG_DATA["change_passport"])
reg = NurseReg.objects.get(pk=nursereg_change_passport.data["id"])
expected_json_clinic_self = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": "+27821237777",
"cmsisdn": "+27821237777",
"rmsisdn": None,
"faccode": "123456",
"id": "Nam1234^^^NA^PPN",
"dob": "19760307",
"persal": None,
"sanc": None,
"encdate": "20130819144811"
}
# Execute
payload = tasks.build_jembi_json(reg)
# Check
self.assertEqual(expected_json_clinic_self, payload)
@responses.activate
def test_jembi_post_json(self):
# Setup
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["sa_id"])
responses.add(responses.POST,
"http://test/v2/nc/subscription",
body='jembi_post_json task', status=201,
content_type='application/json')
task_response = tasks.jembi_post_json.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"sender": self.sender})
self.assertEqual(len(responses.calls), 1)
self.assertEqual(task_response.get(), 'jembi_post_json task')
self.assertEqual(True, self.check_logs(
"Metric: u'test.nursereg.sum.json_to_jembi_success' [sum] -> 1"))
self.assertEqual(1, self.check_logs_number_of_entries())
@responses.activate
def test_jembi_post_json_retries(self):
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["sa_id"])
responses.add(responses.POST,
"http://test/v2/nc/subscription",
body='{"error": "jembi json problems"}', status=531,
content_type='application/json')
task_response = tasks.jembi_post_json.apply_async(
kwargs={"nursereg_id": nursereg.data["id"]})
self.assertEqual(len(responses.calls), 4)
with self.assertRaises(HTTPError) as cm:
task_response.get()
self.assertEqual(cm.exception.response.status_code, 531)
self.assertEqual(True, self.check_logs(
"Metric: u'test.nursereg.sum.json_to_jembi_fail' [sum] -> 1"))
self.assertEqual(1, self.check_logs_number_of_entries())
@responses.activate
def test_jembi_post_json_other_httperror(self):
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["sa_id"])
responses.add(responses.POST,
"http://test/v2/nc/subscription",
body='{"error": "jembi json problems"}', status=404,
content_type='application/json')
task_response = tasks.jembi_post_json.apply_async(
kwargs={"nursereg_id": nursereg.data["id"]})
self.assertEqual(len(responses.calls), 1)
with self.assertRaises(HTTPError) as cm:
task_response.get()
self.assertEqual(cm.exception.response.status_code, 404)
self.assertEqual(True, self.check_logs(
"Metric: u'test.nursereg.sum.json_to_jembi_fail' [sum] -> 1"))
self.assertEqual(1, self.check_logs_number_of_entries())
class TestUpdateCreateVumiContactTask(AuthenticatedAPITestCase):
def test_sub_details(self):
# Setup
# Execute
sub_details = tasks.get_subscription_details()
# Check
self.assertEqual(sub_details, ("nurseconnect", "three_per_week", 1))
def test_update_vumi_contact_sa_id(self):
# Test mocks a JS nurse registration - existing Vumi contact
# Setup
# make existing contact with msisdn 27821234444
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821234444",
u"groups": [u"672442947cdf4a2aae0f96ccb688df05"],
u"user_account": u"knownaccount",
u"extra": {}
})
# nurse registration for contact 27001
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["sa_id"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821234444")
self.assertEqual(result["groups"],
[u"672442947cdf4a2aae0f96ccb688df05"])
self.assertEqual(result["key"], "knownuuid")
self.assertEqual(result["user_account"], "knownaccount")
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
})
postsubs = Subscription.objects.all().count()
self.assertEqual(postsubs, presubs + 1)
def test_create_vumi_contact_passport(self):
# Test mocks an external registration - no existing Vumi contact
# Setup
# make existing contact with msisdn 27001
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27001",
u"user_account": u"knownaccount",
u"extra": {}
})
# nurse registration for contact 27821235555
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["passport"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821235555")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1976-03-07",
"nc_passport_num": "Cub1234",
"nc_passport_country": "cu",
"nc_is_registered": "true",
"nc_id_type": "passport",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_registered_by": "+27821234444",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1"
})
postsubs = Subscription.objects.all().count()
self.assertEqual(postsubs, presubs + 1)
def test_update_vumi_contact_no_id(self):
# Test mocks a JS nurse registration - existing Vumi contact
# Setup
# make existing contact with msisdn 27821234444
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821234444",
u"groups": [u"672442947cdf4a2aae0f96ccb688df05"],
u"user_account": u"knownaccount",
u"extra": {}
})
# nurse registration for contact 27001
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["no_id"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821234444")
self.assertEqual(result["groups"],
[u"672442947cdf4a2aae0f96ccb688df05"])
self.assertEqual(result["key"], "knownuuid")
self.assertEqual(result["user_account"], "knownaccount")
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_is_registered": "true",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
})
postsubs = Subscription.objects.all().count()
self.assertEqual(postsubs, presubs + 1)
def test_create_vumi_contact_change_faccode(self):
# Test mocks an external registration - no existing Vumi contact
# Setup
# make existing contact with msisdn 27821237777
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821237777",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
}
})
# nurse registration for contact 27821237777 -change faccode
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["change_faccode"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821237777")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "234567",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
})
postsubs = Subscription.objects.all().count()
# check no additional subscription created
self.assertEqual(postsubs, presubs)
def test_create_vumi_contact_change_sanc(self):
# Test mocks an external registration - no existing Vumi contact
# Setup
# make existing contact with msisdn 27821237777
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821237777",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
}
})
# nurse registration for contact 27821237777 -change sanc nr
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["change_sanc"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821237777")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_sanc": "34567890",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
})
postsubs = Subscription.objects.all().count()
# check no additional subscription created
self.assertEqual(postsubs, presubs)
def test_create_vumi_contact_change_persal(self):
# Test mocks an external registration - no existing Vumi contact
# Setup
# make existing contact with msisdn 27821237777
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821237777",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
}
})
# nurse registration for contact 27821237777 -change persal
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["change_persal"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821237777")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_persal": "11114444",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
})
postsubs = Subscription.objects.all().count()
# check no additional subscription created
self.assertEqual(postsubs, presubs)
def test_create_vumi_contact_change_id(self):
# Test mocks an external registration - no existing Vumi contact
# Setup
# make existing contact with msisdn 27821237777
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821237777",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_is_registered": "true",
"nc_id_type": None,
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
}
})
# nurse registration for contact 27821237777 -change persal
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["change_id"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821237777")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1990-01-01",
"nc_sa_id_no": "9001016265166",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
})
postsubs = Subscription.objects.all().count()
# check no additional subscription created
self.assertEqual(postsubs, presubs)
def test_create_vumi_contact_change_passport(self):
# Test mocks an external registration - no existing Vumi contact
# Setup
# make existing contact with msisdn 27821237777
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821237777",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_is_registered": "true",
"nc_id_type": None,
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
}
})
# nurse registration for contact 27821237777 -change persal
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["change_passport"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821237777")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1976-03-07",
"nc_passport_num": "Nam1234",
"nc_passport_country": "na",
"nc_is_registered": "true",
"nc_id_type": "passport",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
})
postsubs = Subscription.objects.all().count()
# check no additional subscription created
self.assertEqual(postsubs, presubs)
def test_create_vumi_contact_change_old_nr(self):
# Setup
# make existing contact with msisdn 27821237777
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821237777",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "11",
}
})
# nurse registration - change old nr
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["change_old_nr"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821234444")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "13",
})
postsubs = Subscription.objects.all().count()
# check one additional subscriptions created
self.assertEqual(postsubs, presubs + 1)
def test_create_vumi_contact_multiple_active_subscriptions(self):
# Setup
# make existing contact with msisdn 27821232222
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821232222",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "11",
}
})
# nurse registration - change old nr
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["change_old_nr_multiple_active_subs"])
client = self.make_client()
presubs = Subscription.objects.all().count()
# Execute
# Check
with self.assertRaises(MultipleObjectsReturned):
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
contact.get()
postsubs = Subscription.objects.all().count()
# check no additional subscriptions created
self.assertEqual(postsubs, presubs)
def test_create_vumi_contact_switch_to_new_nr(self):
# Setup
# make existing contact with msisdn 27821237777
self.make_existing_contact({
u"key": u"knownuuid",
u"msisdn": u"+27821237777",
u"user_account": u"knownaccount",
u"extra": {
"nc_last_reg_id": "last nursereg id",
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "1",
}
})
# nurse registration - switch to new nr
nursereg = self.make_nursereg(
post_data=TEST_REG_DATA["switch_to_new_nr"])
client = self.make_client()
presubs = Subscription.objects.all().count()
last_nursereg = NurseReg.objects.last()
# Execute
contact = tasks.update_create_vumi_contact.apply_async(
kwargs={"nursereg_id": nursereg.data["id"],
"client": client})
result = contact.get()
# Check
self.assertEqual(result["msisdn"], "+27821238888")
self.assertEqual(result["groups"], [])
self.assertEqual(result["extra"], {
"nc_last_reg_id": str(last_nursereg.id),
"nc_dob": "1951-01-02",
"nc_sa_id_no": "5101025009086",
"nc_is_registered": "true",
"nc_id_type": "sa_id",
"nc_faccode": "123456",
"nc_source_name": "Test Nurse Source",
"nc_subscription_type": "11",
"nc_subscription_rate": "4",
"nc_subscription_seq_start": "13",
"nc_registered_by": "+27821237777"
})
postsubs = Subscription.objects.all().count()
# check one additional subscriptions created
self.assertEqual(postsubs, presubs + 1)
def test_create_subscription(self):
contact = {
"key": "knownkey",
"msisdn": "knownaddr",
"user_account": "knownaccount",
"extra": {}
}
subscription = tasks.create_subscription(contact)
self.assertEqual(subscription.to_addr, "knownaddr")
def test_create_subscription_fail_fires_metric(self):
broken_contact = {
"key": "wherestherestoftheinfo"
}
tasks.create_subscription(broken_contact)
self.assertEqual(True, self.check_logs(
"Metric: u'test.nurseconnect.sum.nc_subscription_to_protocol_" +
"fail' [sum] -> 1"))
self.assertEqual(1, self.check_logs_number_of_entries())
| {
"content_hash": "a873671131845cf435c526a6523e7ce2",
"timestamp": "",
"source": "github",
"line_count": 1542,
"max_line_length": 79,
"avg_line_length": 37.02918287937743,
"alnum_prop": 0.5564720923308639,
"repo_name": "praekelt/ndoh-control",
"id": "2033c1db38822b4f07776752aca6acbaee97b01d",
"size": "57099",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nursereg/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19562"
},
{
"name": "HTML",
"bytes": "32320"
},
{
"name": "JavaScript",
"bytes": "65518"
},
{
"name": "Nginx",
"bytes": "777"
},
{
"name": "Python",
"bytes": "553807"
},
{
"name": "Shell",
"bytes": "465"
}
],
"symlink_target": ""
} |
from modules.scrapers.overgg import Overgg
from modules.scrapers.owl import OWL
class Scraper:
@classmethod
def scrape_event_start_time(cls, event):
if 'scrape_method' in event:
if event['scrape_method'] == 'overgg':
return Overgg.scrape_match_time(event['start_match_url'])
elif event['scrape_method'] == 'owl':
return None
else:
return None
@classmethod
def scrape_match(cls, match):
if 'scrape_method' in match:
if match['scrape_method'] == 'overgg':
return Overgg.scrape_match(match)
elif match['scrape_method'] == 'owl':
return None
else:
return None
@classmethod
def scrape_event(cls, event):
if 'scrape_method' in event:
if event['scrape_method'] == 'overgg':
return Overgg.scrape_event(event)
elif event['scrape_method'] == 'owl':
return None
else:
return None | {
"content_hash": "373a488d4f5a8e196c38694b4e64e9cb",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 31.727272727272727,
"alnum_prop": 0.545367717287488,
"repo_name": "Jawoll/automatchthreads",
"id": "47a07c0532864640f5c8ebccaa7d31f95916342f",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57358"
}
],
"symlink_target": ""
} |
__author__ = 'mike'
import sys
class Cell:
type = 0
isborder = False
def __init__(type = 0, border = False):
type = type
isborder = border
def print(grid):
for level in grid:
for cell in level:
print(("X" if cell.isborder else " "))
print("\n")
def makelevel()
args = sys.argv
width = int(args[1])*2 + 2
heigth = int(args[2])*2 + 2
grid = []
level = [] #Make top borders wall
for i in range(0, width):
level.append(Cell(border=True))
grid.append(level)
| {
"content_hash": "f44fe7c3dbe552905968184311f683eb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 41,
"avg_line_length": 15.1875,
"alnum_prop": 0.6152263374485597,
"repo_name": "mersinvald/SUAI-1441-Labs",
"id": "cc38930ff83a9da15677712f4618a88d79708871",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SUAI/C1/LabOP4/sources/generator/generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "81945"
},
{
"name": "C++",
"bytes": "75042"
},
{
"name": "Pascal",
"bytes": "2103"
},
{
"name": "Prolog",
"bytes": "666"
},
{
"name": "Python",
"bytes": "986"
},
{
"name": "QMake",
"bytes": "4960"
},
{
"name": "Rust",
"bytes": "48713"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
Profile.objects.get_or_create(**{str(user_field): instance})
| {
"content_hash": "f3226d2bcf1fbb98cff9576ce010e047",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 32.11764705882353,
"alnum_prop": 0.7509157509157509,
"repo_name": "eRestin/Mezz",
"id": "2798e30f9df125a3eb875e9f2633fa2bf7cf80ae",
"size": "546",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mezzanine/accounts/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "136717"
},
{
"name": "JavaScript",
"bytes": "287173"
},
{
"name": "Python",
"bytes": "1015813"
}
],
"symlink_target": ""
} |
'''
simpleoauth.utils
-----------------
SimpleOAuth utilities.
'''
from simpleoauth.compat import is_basestring, quote_plus
FORM_URLENCODED = 'application/x-www-form-urlencoded'
OPTIONAL_OAUTH_PARAMS = ('oauth_callback', 'oauth_verifier', 'oauth_version')
def ensure_encoding(s):
'''
Ensures a given string is properly encoded.
:param s: A string to check.
:type s: str or unicode
'''
if is_basestring(s) and not isinstance(s, bytes):
return s.encode('utf-8')
return s
def sorted_urlencode_utf8(params):
def kv(k, v):
kv_fmt = '{k}={v}'
k, v = ensure_encoding(k), ensure_encoding(v)
if is_basestring(k):
k = quote_plus(k)
if is_basestring(v):
v = quote_plus(v)
return kv_fmt.format(k=k, v=v)
if hasattr(params, 'items'):
params = params.items()
sorted_params = []
for k, v in sorted(params):
sorted_params.append(kv(k, v))
return '&'.join(sorted_params)
| {
"content_hash": "5028cda9ebce6c6af11f06600e20acdd",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 23.558139534883722,
"alnum_prop": 0.5873642645607108,
"repo_name": "maxcountryman/simpleoauth",
"id": "ab226687f202cb4e151d4af3503d2494d3b02c2b",
"size": "1037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simpleoauth/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26279"
},
{
"name": "Shell",
"bytes": "1198"
}
],
"symlink_target": ""
} |
import datetime
from multiprocessing import Manager
from pale import Endpoint, PatchEndpoint, PutResourceEndpoint
from pale.arguments import BooleanArgument, IntegerArgument, StringArgument
from pale.resource import DebugResource, NoContentResource
from pale.errors.api_error import APIError
from tests.example_app.models import DateTimeModel, DateTimeRangeModel
from tests.example_app.api.resources import (DateTimeResource,
DateTimeRangeResource)
def add_after_response_test(context, response):
"""Adds a test definition to the response header."""
response.headers['After-Response'] = "OK"
class CurrentTimeEndpoint(Endpoint):
"""An API endpoint to get the current time."""
_http_method = "GET"
_uri = "/time/current"
_route_name = "current_time"
_allow_cors = True
_returns = DateTimeResource(
"The DateTimeResource representation of the current time on the "
"server.",
fields=DateTimeResource._all_fields())
_after_response_handlers = (add_after_response_test, )
def _handle(self, context):
now = DateTimeModel(datetime.datetime.utcnow())
return {'time': now}
class ParseTimeEndpoint(Endpoint):
"""Parses some passed in parameters to generate a corresponding
DateTimeResource.
"""
# mock the permissions:
# @requires_permission("licensing")
_http_method = "POST"
_uri = "/time/parse"
_route_name = "parse_time"
_default_cache = 'max-age=3'
_returns = DateTimeResource(
"The DateTimeResource corresponding to the timing "
"information sent in by the requester.")
year = IntegerArgument("Set the year of the returned datetime",
default=2015)
month = IntegerArgument("Set the month of the returned datetime",
required=True,
min_value=1,
max_value=12)
day = IntegerArgument("Set the day of the returned datetime")
name = StringArgument("The name for your datetime",
details="You can give your time a name, which will be "
"returned back to you in the response, as the field `name`. "
"If you omit this input parameter, your response won't "
"include a `name`.",
min_length=3,
max_length=20)
include_time = BooleanArgument("Include the time in the output?",
details="If present, the response will include JSON fields "
"for the current time, including `hours`, `minutes`, and "
"`seconds`.",
default=False)
def _handle(self, context):
now = DateTimeModel(datetime.datetime.utcnow())
now.update_date(
# year has a default, so it will always be present
context.args['year'],
# month is required, so it will always be present
context.args['month'],
context.args.get('day', None))
now.set_include_time(context.args['include_time'])
now.name = context.args.get('name', None)
return {'time': now}
class TimeRangeEndpoint(Endpoint):
"""Returns start and end times based on the passed in duration.
The start time is implied to be "now", and the end time is calculated
by adding the duration to that start time.
This is obviously fairly contrived, but this endpoint is here to
illustrate and test nested resources.
"""
_http_method = "GET"
_uri = "/time/range"
_route_name = "time_range_now_plus_duration"
_returns = DateTimeRangeResource(
"Information about the range specified, as well as the "
"range's start and end datetimes.")
duration = IntegerArgument(
"The duration in milliseconds to be used.",
required=True)
def _handle(self, context):
millis = context.args['duration']
time_range = DateTimeRangeModel(millis*1000) # microseconds
return {'range': time_range}
"""
Resource endpoints.
We create a multiprocessing memory manager and shared
dict to enable multithreaded support.
This 'resource' data is used to test patching.
"""
BASE_RESOURCE = {
'key': 'value'
}
MANAGER = Manager()
RESOURCE = MANAGER.dict(BASE_RESOURCE)
class GetResourceEndpoint(Endpoint):
"""Returns the 'resource' as it exists in memory.
"""
_uri = "/resource"
_http_method = 'GET'
_route_name = "resource_get"
_returns = DebugResource("app resource.")
def _handle(self, context):
return dict(RESOURCE)
class RouteArgEndpoint(Endpoint):
"""Returns the arguments as provided from URI.
"""
_uri = "/arg_test/<arg_a>/<arg_b>"
_http_method = 'GET'
_route_name = "arg_test"
_returns = DebugResource("app resource.")
def _handle(self, context):
arg_a = context.route_kwargs.get('arg_a', 'no')
arg_b = context.route_kwargs.get('arg_b', 'no')
return {"arg_a": arg_a, "arg_b": arg_b}
class ResetResourceEndpoint(Endpoint):
"""Returns the 'resource' as it exists in memory.
"""
_uri = "/resource/reset"
_http_method = 'POST'
_route_name = "resource_reset"
_returns = DebugResource("app resource.")
def _handle(self, context):
RESOURCE.clear()
RESOURCE.update(BASE_RESOURCE)
return dict(RESOURCE)
class ResourcePatchEndpoint(PatchEndpoint):
"""Patches a resource which is local to each instance of the app.
"""
_uri = "/resource"
_route_name = "resource_patch"
_resource = DebugResource("resource patch.")
_returns = DebugResource("app resource.")
def _handle_patch(self, context, patch):
data = dict(RESOURCE)
patch.apply_to_dict(data)
RESOURCE.update(data)
return dict(RESOURCE)
class ResourceCreateEndpoint(PutResourceEndpoint):
"""Patches a resource which is local to each instance of the app.
"""
_uri = "/resource"
_route_name = "resource_put"
_resource = DebugResource("resource patch.")
_returns = DebugResource("app resource.")
def _handle_put(self, context, patch):
data = {}
patch.apply_to_dict(data)
RESOURCE.clear()
RESOURCE.update(data)
return dict(RESOURCE)
class BlankEndpoint(Endpoint):
""" This carries out some action, then returns nothing on success.
"""
_http_method = "POST"
_uri = "/blank"
_route_name = "resource_blank"
_allow_cors = True
_returns = NoContentResource()
def _handle(self, context):
return None
| {
"content_hash": "ac700ba6ec555bdd5e1c3f9c3cba1488",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 75,
"avg_line_length": 28.262931034482758,
"alnum_prop": 0.6397742870215037,
"repo_name": "Loudr/pale",
"id": "e83269eac3db51affaf47af8c7751e4cac19e7a3",
"size": "6557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/example_app/api/endpoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175823"
},
{
"name": "Vim script",
"bytes": "49"
}
],
"symlink_target": ""
} |
import mechanize
import time
from hq_settings import init_browser, User, HQTransaction
class Transaction(HQTransaction):
def run(self):
br = init_browser()
start_timer = time.time()
user = User(self.username, self.password, br)
user.ensure_logged_in()
latency = time.time() - start_timer
self.custom_timers['Login'] = latency
resp = br.open('%s/a/%s/reports/' % (self.base_url, self.domain))
body = resp.read()
assert resp.code == 200, 'Bad HTTP Response'
assert "Case Activity" in body, "Couldn't find report list"
if __name__ == '__main__':
trans = Transaction()
trans.run()
print trans.custom_timers
| {
"content_hash": "f61f7786daefe1ef381e6711c380a6c3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.6164772727272727,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "69fabbcab4b0f5469e74c4de52d617b50f4b64c2",
"size": "704",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "loadtest/test_scripts/login.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from collections import deque
import numpy as np
import numpy.testing as npt
from gym import spaces
from reinforceflow.envs import ObservationStackWrap
from reinforceflow.envs import Vectorize
def _compare_recursively(sample1, sample2):
for elem1, elem2 in zip(sample1, sample2):
if isinstance(elem1, (list, tuple)):
_compare_recursively(elem1, elem2)
else:
npt.assert_equal(elem1, elem2)
class TestConverters(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestConverters, self).__init__(*args, **kwargs)
self.space_d = spaces.Discrete(4)
self.gym_out_d = 2
self.rf_out_d = [0, 0, 1, 0]
self.space_c = spaces.Box(-1, 1, [2, 4])
self.gym_out_c = np.random.uniform(low=-1, high=1, size=(2, 4))
self.rf_out_c = self.gym_out_c
self.space_b = spaces.MultiBinary(4)
self.gym_out_b = [0, 1, 0, 1]
self.rf_out_b = [[1, 0], [0, 1], [1, 0], [0, 1]]
self.space_t = spaces.Tuple((self.space_d,
self.space_c,
self.space_b,
spaces.Tuple((self.space_d, self.space_c))
))
self.gym_out_t = tuple([self.gym_out_d, self.gym_out_c, self.gym_out_b,
tuple([self.gym_out_d, self.gym_out_c])])
self.rf_out_t = tuple([self.rf_out_d, self.rf_out_c, self.rf_out_b,
tuple([self.rf_out_d, self.rf_out_c])])
def test_gym2vec_converter_discrete(self):
converter = Vectorize.make_gym2vec_converter(self.space_d)
npt.assert_equal(converter(self.gym_out_d), self.rf_out_d)
def test_gym2vec_converter_box(self):
converter = Vectorize.make_gym2vec_converter(self.space_c)
npt.assert_equal(converter(self.gym_out_c), self.rf_out_c)
def test_gym2vec_converter_binary(self):
converter = Vectorize.make_gym2vec_converter(self.space_b)
npt.assert_equal(converter(self.gym_out_b), self.rf_out_b)
def test_gym2vec_converter_tuple(self):
converter = Vectorize.make_gym2vec_converter(self.space_t)
_compare_recursively(converter(self.gym_out_t), self.rf_out_t)
def test_vec2gym_converter_discrete(self):
converter = Vectorize.make_vec2gym_converter(self.space_d)
assert converter(self.rf_out_d) == self.gym_out_d
def test_vec2gym_converter_box(self):
converter = Vectorize.make_vec2gym_converter(self.space_c)
npt.assert_equal(converter(self.rf_out_c), self.gym_out_c)
def test_vec2gym_converter_binary(self):
converter = Vectorize.make_vec2gym_converter(self.space_b)
npt.assert_equal(converter(self.rf_out_b), self.gym_out_b)
def test_vec2gym_converter_tuple(self):
converter = Vectorize.make_vec2gym_converter(self.space_t)
_compare_recursively(converter(self.rf_out_t), self.gym_out_t)
def test_stack_initial_observation_image_gray():
ones = np.ones((84, 84, 1))
stack_len = 4
desired = np.ones((84, 84, stack_len))
result = ObservationStackWrap.stack_observations(ones, stack_len, None)
npt.assert_equal(result, desired)
def test_stack_observation_image_gray():
stack_obs_test(shape=(50, 30, 1), stack_len=5, num_stacks=10)
def test_stack_observation_with_len_equals_1():
stack_obs_test(shape=(30, 30, 1), stack_len=1, num_stacks=8)
def test_stack_observation_image_rgb():
stack_obs_test(shape=(84, 84, 3), stack_len=4, num_stacks=12)
def test_stack_observation_exotic_shape():
stack_obs_test(shape=(4, 4, 4, 2), stack_len=5, num_stacks=22)
def stack_obs_test(shape, stack_len, num_stacks):
stack_axis = len(shape)-1
desired = deque(maxlen=stack_len)
for _ in range(stack_len):
desired.append(np.ones(shape))
current_stack = np.concatenate(desired, stack_axis)
stack_len = stack_len
for i in range(num_stacks):
new_obs = np.ones(shape) * i
desired.append(new_obs)
current_stack = ObservationStackWrap.stack_observations(new_obs, stack_len, current_stack)
npt.assert_equal(current_stack, np.concatenate(desired, stack_axis))
| {
"content_hash": "4ac6f6ec9c875a89ffc4638233bbbd15",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 98,
"avg_line_length": 37.110169491525426,
"alnum_prop": 0.6318794245261475,
"repo_name": "dbobrenko/reinforceflow",
"id": "cee6517e57de93c83386b13267e9caec1b32016f",
"size": "4379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/envs/test_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133520"
}
],
"symlink_target": ""
} |
import csv
def loadTourneyData():
# Fields: Season, Daynum, Wteam, Wscore, Lteam, Lscore, Wloc, Numot
games = {}
with open("data/TourneyCompactResults.csv") as t:
tourneyResults = csv.reader(t, dialect='excel')
fields = tourneyResults.next()
for line in tourneyResults:
game = {}
for i in range(0, len(fields)):
game[fields[i]] = line[i]
season = int(game["Season"])
if season not in games:
games[season] = []
games[season].append(game)
return games
| {
"content_hash": "787d6f85ed10bbf89162f64e2ecfe568",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 34.294117647058826,
"alnum_prop": 0.5557461406518011,
"repo_name": "mpsonic/MarchMadnessRandomForest",
"id": "abd4a66ddad6b6db752f38a0b953342ab04f3951",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loadTourneyData.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21870"
}
],
"symlink_target": ""
} |
"""Support for Wink fans."""
import logging
from homeassistant.components.fan import (
SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SUPPORT_DIRECTION, SUPPORT_SET_SPEED,
FanEntity)
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
SPEED_AUTO = 'auto'
SPEED_LOWEST = 'lowest'
SUPPORTED_FEATURES = SUPPORT_DIRECTION + SUPPORT_SET_SPEED
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for fan in pywink.get_fans():
if fan.object_id() + fan.name() not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkFanDevice(fan, hass)])
class WinkFanDevice(WinkDevice, FanEntity):
"""Representation of a Wink fan."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['fan'].append(self)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self.wink.set_fan_direction(direction)
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
self.wink.set_state(True, speed)
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
self.wink.set_state(True, speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
self.wink.set_state(False)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.wink.state()
@property
def speed(self) -> str:
"""Return the current speed."""
current_wink_speed = self.wink.current_fan_speed()
if SPEED_AUTO == current_wink_speed:
return SPEED_AUTO
if SPEED_LOWEST == current_wink_speed:
return SPEED_LOWEST
if SPEED_LOW == current_wink_speed:
return SPEED_LOW
if SPEED_MEDIUM == current_wink_speed:
return SPEED_MEDIUM
if SPEED_HIGH == current_wink_speed:
return SPEED_HIGH
return None
@property
def current_direction(self):
"""Return direction of the fan [forward, reverse]."""
return self.wink.current_fan_direction()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
wink_supported_speeds = self.wink.fan_speeds()
supported_speeds = []
if SPEED_AUTO in wink_supported_speeds:
supported_speeds.append(SPEED_AUTO)
if SPEED_LOWEST in wink_supported_speeds:
supported_speeds.append(SPEED_LOWEST)
if SPEED_LOW in wink_supported_speeds:
supported_speeds.append(SPEED_LOW)
if SPEED_MEDIUM in wink_supported_speeds:
supported_speeds.append(SPEED_MEDIUM)
if SPEED_HIGH in wink_supported_speeds:
supported_speeds.append(SPEED_HIGH)
return supported_speeds
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
| {
"content_hash": "b92ecc73db0ec5235286594d01d8bd59",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 32.14736842105263,
"alnum_prop": 0.6195153896529142,
"repo_name": "MartinHjelmare/home-assistant",
"id": "3fb06abc1457267b26f5e8ef739f732f3011e464",
"size": "3054",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/wink/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15222591"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
"""Test KNX events."""
from homeassistant.components.knx import CONF_KNX_EVENT_FILTER
from homeassistant.core import HomeAssistant
from .conftest import KNXTestKit
from tests.common import async_capture_events
async def test_knx_event(hass: HomeAssistant, knx: KNXTestKit):
"""Test `knx_event` event."""
test_group_a = "0/4/*"
test_address_a_1 = "0/4/0"
test_address_a_2 = "0/4/100"
test_group_b = "1/3-6/*"
test_address_b_1 = "1/3/0"
test_address_b_2 = "1/6/200"
test_group_c = "2/6/4,5"
test_address_c_1 = "2/6/4"
test_address_c_2 = "2/6/5"
test_address_d = "5/4/3"
events = async_capture_events(hass, "knx_event")
async def test_event_data(address, payload):
await hass.async_block_till_done()
assert len(events) == 1
event = events.pop()
assert event.data["data"] == payload
assert event.data["direction"] == "Incoming"
assert event.data["destination"] == address
if payload is None:
assert event.data["telegramtype"] == "GroupValueRead"
else:
assert event.data["telegramtype"] in (
"GroupValueWrite",
"GroupValueResponse",
)
assert event.data["source"] == KNXTestKit.INDIVIDUAL_ADDRESS
await knx.setup_integration(
{
CONF_KNX_EVENT_FILTER: [
test_group_a,
test_group_b,
test_group_c,
test_address_d,
]
}
)
# no event received
await hass.async_block_till_done()
assert len(events) == 0
# receive telegrams for group addresses matching the filter
await knx.receive_write(test_address_a_1, True)
await test_event_data(test_address_a_1, True)
await knx.receive_response(test_address_a_2, False)
await test_event_data(test_address_a_2, False)
await knx.receive_write(test_address_b_1, (1,))
await test_event_data(test_address_b_1, (1,))
await knx.receive_response(test_address_b_2, (255,))
await test_event_data(test_address_b_2, (255,))
await knx.receive_write(test_address_c_1, (89, 43, 34, 11))
await test_event_data(test_address_c_1, (89, 43, 34, 11))
await knx.receive_response(test_address_c_2, (255, 255, 255, 255))
await test_event_data(test_address_c_2, (255, 255, 255, 255))
await knx.receive_read(test_address_d)
await test_event_data(test_address_d, None)
# receive telegrams for group addresses not matching the filter
await knx.receive_write("0/5/0", True)
await knx.receive_write("1/7/0", True)
await knx.receive_write("2/6/6", True)
await hass.async_block_till_done()
assert len(events) == 0
| {
"content_hash": "95a31ec8290539346c179ad804c260ad",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 70,
"avg_line_length": 32.81927710843374,
"alnum_prop": 0.6156387665198237,
"repo_name": "FreekingDean/home-assistant",
"id": "6a9e021ff53af6620d48a99d20babe50b6c4f1fc",
"size": "2724",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/knx/test_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
__author__ = 'Tomasz J. Kotarba <tomasz@kotarba.net>'
__copyright__ = 'Copyright (c) 2014, Tomasz J. Kotarba. All rights reserved.'
from django.test import TestCase
from fm.forms import AreaForm, FacilityForm, ContactForm, RoleForm
from fm.models import Area, AREA_TYPES
from fm.models import Facility
from fm.models import Contact
from fm.models import Role
class FMFormTest(TestCase):
def reject_invalid_json(self, form_class, data, number_of_objects_before):
model_class = form_class._meta.model
# fill in the form and save it and see if the number of objects is
# the same before and after (assumes json data invalid)
self.assertEqual(model_class.objects.count(), number_of_objects_before)
form = form_class(data)
with self.assertRaisesMessage(
ValueError, "The %s could not be created because the data"
" didn't validate." % model_class.__name__
):
form.save()
def create_save_and_return_area(self, area_name, area_type, area_parent,
number_of_areas_before):
# fill in the form and save it and see if the number of areas is correct
# both before and after
if area_parent is None:
parent_id = None
else:
parent_id = area_parent.id
self.assertEqual(Area.objects.count(), number_of_areas_before)
form = AreaForm(data={'area_name': area_name,
'area_type': area_type,
'area_parent': parent_id})
if area_parent is not None:
area_parent.save()
area = form.save()
self.assertEqual(Area.objects.count(), number_of_areas_before + 1)
# check if the area object in the database at the current index is the
# same as the one just created
self.assertEqual(area, Area.objects.all()[number_of_areas_before])
aid = area.id
area = Area.objects.get(id=aid)
# check attributes of the saved area object
self.assertEqual(area.area_name, area_name)
self.assertEqual(area.area_type, area_type)
self.assertEqual(area.area_parent, area_parent)
return area
def create_save_and_return_facility(self, name, facility_type, status, area,
number_of_facilities_before,
json='', json_dict=None):
if (json == '' and json_dict is not None) or \
(json != '' and json_dict is None):
self.fail('Arguments json and json_dict both have to be either'
' empty or set to valid and corresponding values.')
# fill in the form and save it and see if the number of facilities is
# correct both before and after
if area is None:
area_id = None
else:
area_id = area.id
self.assertEqual(Facility.objects.count(), number_of_facilities_before)
form = FacilityForm(data={'facility_name': name,
'facility_type': facility_type,
'facility_status': status,
'facility_area': area_id,
'json': json, })
if area is not None:
area.save()
facility = form.save()
self.assertEqual(Facility.objects.count(),
number_of_facilities_before + 1)
# check if the facility object in the database at the current index is
# the same as the one just created
self.assertEqual(facility,
Facility.objects.all()[number_of_facilities_before])
fid = facility.id
facility = Facility.objects.get(id=fid)
# check attributes of the saved facility object
self.assertEqual(facility.facility_name, name)
self.assertEqual(facility.facility_type, facility_type)
self.assertEqual(facility.facility_status, status)
self.assertEqual(facility.facility_area, area)
self.assertEqual(facility.json, json_dict)
return facility
def create_save_and_return_contact(self, name, phone, email,
number_of_contacts_before,
json='', json_dict=None):
if (json == '' and json_dict is not None) or \
(json != '' and json_dict is None):
self.fail('Arguments json and json_dict both have to be either'
' empty or set to valid and corresponding values.')
# fill in the form and save it and see if the number of contacts is
# correct both before and after
self.assertEqual(Contact.objects.count(), number_of_contacts_before)
form = ContactForm(data={'contact_name': name,
'contact_phone': phone,
'contact_email': email,
'json': json,
})
contact = form.save()
self.assertEqual(Contact.objects.count(),
number_of_contacts_before + 1)
# check if the contact object in the database at the current index is
# the same as the one just created
self.assertEqual(contact,
Contact.objects.all()[number_of_contacts_before])
cid = contact.id
contact = Contact.objects.get(id=cid)
# check attributes of the saved contact object
self.assertEqual(contact.contact_name, name)
self.assertEqual(contact.contact_phone, phone)
self.assertEqual(contact.contact_email, email)
self.assertEqual(contact.json, json_dict)
return contact
def create_save_and_return_role(self, name, contact, facility,
number_of_roles_before):
# fill in the form and save it and see if the number of roles is
# correct both before and after
if contact is None:
contact_id = None
else:
contact_id = contact.id
if facility is None:
facility_id = None
else:
facility_id = facility.id
self.assertEqual(Role.objects.count(), number_of_roles_before)
form = RoleForm(data={'role_name': name,
'role_contact': contact_id,
'role_facility': facility_id, })
if contact is not None:
contact.save()
if facility is not None:
facility.save()
role = form.save()
self.assertEqual(Role.objects.count(), number_of_roles_before + 1)
# check if the role object in the database at the current index is
# the same as the one just created
self.assertEqual(role, Role.objects.all()[number_of_roles_before])
rid = role.id
role = Role.objects.get(id=rid)
# check attributes of the saved role object
self.assertEqual(role.role_name, name)
self.assertEqual(role.role_contact, contact)
self.assertEqual(role.role_facility, facility)
return role
class AreaFormTest(FMFormTest):
def test_form_has_all_required_inputs_and_text(self):
form = AreaForm()
form_html = form.as_p()
fields = form.Meta.fields
widgets = form.Meta.widgets
self.assertIn('area_name', fields)
self.assertEqual(widgets['area_name'].attrs['placeholder'],
'Enter a name')
self.assertIn('placeholder="Enter a name"', form_html)
self.assertIn('id="id_area_name"', form_html)
self.assertIn('area_type', fields)
self.assertIn('id="id_area_type"', form_html)
for area_type in AREA_TYPES:
self.assertIn(area_type[0], form_html)
self.assertIn(area_type[1], form_html)
self.assertIn('id="id_area_parent"', form_html)
def test_form_validation_for_blank_items(self):
form = AreaForm(data={'area_name': '', 'area_type': ''})
self.assertFalse(form.is_valid())
# todo: only test those which should not accept blanks
def test_form_saves_area_objects_correctly(self):
self.create_save_and_return_area('state area1', 'State', None, 0)
self.create_save_and_return_area('state zone area2', 'State Zone', None,
1)
self.create_save_and_return_area('lga area3', 'LGA', None, 2)
self.create_save_and_return_area('ward area4', 'Ward', None, 3)
def test_child_added_correctly_on_setting_a_parent(self):
area1 = self.create_save_and_return_area('area1', 'State', None, 0)
area2 = self.create_save_and_return_area('area2', 'State', area1, 1)
self.assertIn(area2, area1.area_children.all())
class FacilityFormTest(FMFormTest):
def test_form_has_all_required_inputs_and_text(self):
form = FacilityForm()
form_html = form.as_p()
fields = form.Meta.fields
widgets = form.Meta.widgets
self.assertIn('facility_name', fields)
self.assertEqual(widgets['facility_name'].attrs['placeholder'],
'Enter a name')
self.assertIn('placeholder="Enter a name"', form_html)
self.assertIn('id="id_facility_name"', form_html)
self.assertIn('facility_type', fields)
self.assertIn('id="id_facility_type"', form_html)
for facility_type in Facility.FACILITY_TYPES:
self.assertIn(facility_type[0], form_html)
self.assertIn(facility_type[1], form_html)
self.assertIn('facility_status', fields)
self.assertEqual(widgets['facility_status'].attrs['placeholder'],
'Enter a status')
self.assertIn('placeholder="Enter a status"', form_html)
self.assertIn('id="id_facility_status"', form_html)
self.assertIn('facility_area', fields)
self.assertIn('id="id_facility_area"', form_html)
self.assertIn('json', fields)
self.assertEqual(widgets['json'].attrs['placeholder'],
'Enter valid JSON or leave blank')
self.assertIn('placeholder="Enter valid JSON or leave blank"',
form_html)
self.assertIn('id="id_json"', form_html)
def test_form_validation_for_blank_items(self):
form = FacilityForm(
data={
'facility_name': '',
'facility_type': '',
'facility_status': '',
}
)
self.assertFalse(form.is_valid())
def test_form_saves_facilities_correctly(self):
self.create_save_and_return_facility(
'facility 1', 'State Store', 'status 1', None, 0)
self.create_save_and_return_facility(
'facility 2', 'Zonal Store', 'status 2', None, 1)
self.create_save_and_return_facility(
'facility 3', 'LGA Store', 'status 3', None, 2)
self.create_save_and_return_facility(
'facility 4', 'Health Facility', 'status 4', None, 3)
def test_on_setting_an_area_the_facility_is_added_to_area_facilities(self):
area = self.create_save_and_return_area('area1', 'State', None, 0)
facility = self.create_save_and_return_facility(
'facility 1', 'State Store', 'status 1', area, 0)
self.assertIn(facility, area.area_facilities.all())
def test_valid_json_is_accepted(self):
json = """
{
"firstName": "John",
"lastName": "Smith",
"isAlive": true,
"age": 25,
"height_cm": 167.64,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
},
"phoneNumbers": [
{ "type": "home", "number": "212 555-1234" },
{ "type": "office", "number": "646 555-4567" }
]
}
"""
json_dict = {
u'phoneNumbers': [
{
u'type': u'home',
u'number': u'212 555-1234'
},
{
u'type': u'office',
u'number': u'646 555-4567'
}
],
u'isAlive': True,
u'firstName': u'John',
u'lastName': u'Smith',
u'age': 25,
u'address': {
u'postalCode': u'10021-3100',
u'city': u'New York',
u'streetAddress': u'21 2nd Street',
u'state': u'NY'
},
u'height_cm': 167.64
}
self.create_save_and_return_facility('facility 1', 'State Store',
'status1', None, 0,
json=json, json_dict=json_dict)
self.create_save_and_return_facility('facility 2', 'Zonal Store',
'status2', None, 1,
json='', json_dict=None)
self.create_save_and_return_facility('facility 3', 'Health Facility',
'status3', None, 2,
json='{}', json_dict={})
self.create_save_and_return_facility('facility 4', 'LGA Store',
'status4', None, 3,
json='[1]', json_dict=[1])
def test_invalid_json_is_not_accepted(self):
invalid_json = """
{
"firstName": "John",
"lastName": "Smith",
"isAlive": true,
"age": 25,
"height_cm": 167.64,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
},
"phoneNumbers": [
{ "type": "home, "number": "212 555-1234" },
{ "type": "office", "number": "646 555-4567" }
]
}
"""
name = 'facility 1'
facility_type = 'LGA Store'
status = 'status 1'
data = {
'facility_name': name,
'facility_type': facility_type,
'facility_status': status,
'json': invalid_json,
}
self.reject_invalid_json(FacilityForm, data, 0)
class ContactFormTest(FMFormTest):
def test_form_has_all_required_inputs_and_text(self):
form = ContactForm()
form_html = form.as_p()
fields = form.Meta.fields
widgets = form.Meta.widgets
self.assertIn('contact_name', fields)
self.assertEqual(widgets['contact_name'].attrs['placeholder'],
'Enter a name')
self.assertIn('placeholder="Enter a name"', form_html)
self.assertIn('id="id_contact_name"', form_html)
self.assertIn('contact_phone', fields)
self.assertEqual(widgets['contact_phone'].attrs['placeholder'],
'Enter a phone number')
self.assertIn('placeholder="Enter a phone number"', form_html)
self.assertIn('id="id_contact_phone"', form_html)
self.assertIn('contact_email', fields)
self.assertEqual(widgets['contact_email'].attrs['placeholder'],
'Enter an e-mail')
self.assertIn('placeholder="Enter an e-mail"', form_html)
self.assertIn('id="id_contact_email"', form_html)
self.assertIn('json', fields)
self.assertEqual(widgets['json'].attrs['placeholder'],
'Enter valid JSON or leave blank')
self.assertIn('placeholder="Enter valid JSON or leave blank"',
form_html)
self.assertIn('id="id_json"', form_html)
def test_form_validation_for_blank_items(self):
form = ContactForm(
data={
'contact_name': '',
'contact_phone': '',
'contact_email': '',
}
)
self.assertFalse(form.is_valid())
def test_form_saves_contacts_correctly(self):
self.create_save_and_return_contact('contact 1', '+1231', 'a1@b.cc', 0)
self.create_save_and_return_contact('contact 2', '+1232', 'a2@b.cc', 1)
self.create_save_and_return_contact('contact 3', '+1233', 'a3@b.cc', 2)
self.create_save_and_return_contact('contact 4', '+1234', 'a4@b.cc', 3)
def test_valid_json_is_accepted(self):
json = """
{
"firstName": "John",
"lastName": "Smith",
"isAlive": true,
"age": 25,
"height_cm": 167.64,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
},
"phoneNumbers": [
{ "type": "home", "number": "212 555-1234" },
{ "type": "office", "number": "646 555-4567" }
]
}
"""
json_dict = {
u'phoneNumbers': [
{
u'type': u'home',
u'number': u'212 555-1234'
},
{
u'type': u'office',
u'number': u'646 555-4567'
}
],
u'isAlive': True,
u'firstName': u'John',
u'lastName': u'Smith',
u'age': 25,
u'address': {
u'postalCode': u'10021-3100',
u'city': u'New York',
u'streetAddress': u'21 2nd Street',
u'state': u'NY'
},
u'height_cm': 167.64
}
self.create_save_and_return_contact('contact 1', '+1231', 'a1@b.cc', 0,
json=json, json_dict=json_dict)
self.create_save_and_return_contact('contact 2', '+1232', 'a2@b.cc', 1,
json='', json_dict=None)
self.create_save_and_return_contact('contact 3', '+1233', 'a3@b.cc', 2,
json='{}', json_dict={})
self.create_save_and_return_contact('contact 4', '+1234', 'a4@b.cc', 3,
json='[1]', json_dict=[1])
def test_invalid_json_is_not_accepted(self):
invalid_json = """
{
"firstName": "John",
"lastName": "Smith",
"isAlive": true,
"age": 25,
"height_cm": 167.64,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
},
"phoneNumbers": [
{ "type": "home, "number": "212 555-1234" },
{ "type": "office", "number": "646 555-4567" }
]
}
"""
name = 'contact 1'
phone = '+1231'
email = 'a1@b.cc'
data = {
'contact_name': name,
'contact_phone': phone,
'contact_email': email,
'json': invalid_json,
}
self.reject_invalid_json(ContactForm, data, 0)
class RoleFormTest(FMFormTest):
def test_form_has_all_required_inputs_and_text(self):
form = RoleForm()
form_html = form.as_p()
fields = form.Meta.fields
self.assertIn('role_name', fields)
self.assertIn('id="id_role_name"', form_html)
for role_name in Role.ROLE_NAMES:
self.assertIn(role_name[0], form_html)
self.assertIn(role_name[1], form_html)
self.assertIn('role_contact', fields)
self.assertIn('id="id_role_contact"', form_html)
self.assertIn('role_facility', fields)
self.assertIn('id="id_role_facility"', form_html)
def test_form_validation_for_blank_items(self):
form = RoleForm(
data={
'role_name': '',
'role_contact': None,
'role_facility': None,
}
)
self.assertFalse(form.is_valid())
def test_form_saves_roles_correctly(self):
contact1 = self.create_save_and_return_contact(
'contact1', '0333', 'a@b.cc', 0)
facility1 = Facility.objects.create(facility_name='facility1')
self.create_save_and_return_role(
'SCCO', contact1, facility1, 0)
self.create_save_and_return_role(
'ZCCO', contact1, facility1, 1)
self.create_save_and_return_role(
'LGA CCO', contact1, facility1, 2)
self.create_save_and_return_role(
'LIO', contact1, facility1, 3)
def test_on_setting_a_contact_the_role_is_added_to_contact_roles(self):
contact = Contact.objects.create()
facility = Facility.objects.create()
role = self.create_save_and_return_role('WTO', contact, facility, 0)
self.assertIn(role, contact.contact_roles.all())
def test_on_setting_a_facility_the_role_is_added_to_facility_roles(self):
contact = Contact.objects.create()
facility = Facility.objects.create()
role = self.create_save_and_return_role(
'HFIC', contact, facility, 0)
self.assertIn(role, facility.facility_roles.all())
| {
"content_hash": "4bdf879f219818cdddb37065cd634f31",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 80,
"avg_line_length": 40.64905660377359,
"alnum_prop": 0.525807649461567,
"repo_name": "thanasio/connect",
"id": "cb616ccc67f808a8c04e9caf4408142a12fe54aa",
"size": "21544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kano_konnect/fm/tests/test_forms.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Test suite for repoze.who-x509
"""
from dateutil.relativedelta import relativedelta
from dateutil.tz import tzutc
from datetime import datetime
import unittest
import locale
class TestX509Base(unittest.TestCase):
"""Base class for testing X509 predictes"""
def generate_dn(self, **kwargs):
return ''.join(['/' + t + '=' + v for t, v in kwargs.iteritems()])
def make_environ(self, issuer, subject, start=None, end=None,
verified=True,
prefix=None,
verify_key='SSL_CLIENT_VERIFY',
validity_start_key='SSL_CLIENT_V_START',
validity_end_key='SSL_CLIENT_V_END',
issuer_key='SSL_CLIENT_I_DN',
subject_key='SSL_CLIENT_S_DN'):
# By default consider that our certificate was signed a month ago for
# the common validity of one year.
prefix = prefix or ''
if start is None:
start = datetime.utcnow() + relativedelta(months=-1)
start = start.replace(tzinfo=tzutc())
if end is None:
end = datetime.utcnow() + relativedelta(months=11)
end = end.replace(tzinfo=tzutc())
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
datefmt = '%b %d %H:%M:%S %Y %Z'
start, end = start.strftime(datefmt), end.strftime(datefmt)
environ = {}
environ[verify_key] = 'SUCCESS' if verified else 'FAILED'
environ[prefix + validity_start_key] = start
environ[prefix + validity_end_key] = end
environ[prefix + issuer_key] = issuer if isinstance(issuer, basestring)\
else self.generate_dn(**issuer)
environ[prefix + subject_key] = subject if isinstance(
subject,
basestring) else self.generate_dn(**subject)
return environ
| {
"content_hash": "f9862c3e93c9ac36224a7f4a05882eaf",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 37.11764705882353,
"alnum_prop": 0.5789751716851559,
"repo_name": "arturosevilla/repoze.who-x509",
"id": "aa5a1e4ed3dae17f5f023bd9d80b14f5753394d6",
"size": "2868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "49393"
},
{
"name": "Shell",
"bytes": "5112"
}
],
"symlink_target": ""
} |
"""Permutation importance for estimators"""
import numpy as np
from joblib import Parallel
from joblib import delayed
from ..metrics import check_scoring
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import check_array
from ..utils.validation import _deprecate_positional_args
def _calculate_permutation_scores(estimator, X, y, col_idx, random_state,
n_repeats, scorer):
"""Calculate score when `col_idx` is permuted."""
random_state = check_random_state(random_state)
# Work on a copy of X to to ensure thread-safety in case of threading based
# parallelism. Furthermore, making a copy is also useful when the joblib
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
# if X is large it will be automatically be backed by a readonly memory map
# (memmap). X.copy() on the other hand is always guaranteed to return a
# writable data-structure whose columns can be shuffled inplace.
X_permuted = X.copy()
scores = np.zeros(n_repeats)
shuffling_idx = np.arange(X.shape[0])
for n_round in range(n_repeats):
random_state.shuffle(shuffling_idx)
if hasattr(X_permuted, "iloc"):
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted.iloc[:, col_idx] = col
else:
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
feature_score = scorer(estimator, X_permuted, y)
scores[n_round] = feature_score
return scores
@_deprecate_positional_args
def permutation_importance(estimator, X, y, *, scoring=None, n_repeats=5,
n_jobs=None, random_state=None):
"""Permutation importance for feature evaluation [BRE]_.
The :term:`estimator` is required to be a fitted estimator. `X` can be the
data set used to train the estimator or a hold-out set. The permutation
importance of a feature is calculated as follows. First, a baseline metric,
defined by :term:`scoring`, is evaluated on a (potentially different)
dataset defined by the `X`. Next, a feature column from the validation set
is permuted and the metric is evaluated again. The permutation importance
is defined to be the difference between the baseline metric and metric from
permutating the feature column.
Read more in the :ref:`User Guide <permutation_importance>`.
Parameters
----------
estimator : object
An estimator that has already been :term:`fitted` and is compatible
with :term:`scorer`.
X : ndarray or DataFrame, shape (n_samples, n_features)
Data on which permutation importance will be computed.
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
Targets for supervised or `None` for unsupervised.
scoring : string, callable or None, default=None
Scorer to use. It can be a single
string (see :ref:`scoring_parameter`) or a callable (see
:ref:`scoring`). If None, the estimator's default scorer is used.
n_repeats : int, default=5
Number of times to permute a feature.
n_jobs : int or None, default=None
Number of jobs to run in parallel. The computation is done by computing
permutation score for each columns and parallelized over the columns.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
See :term: `Glossary <random_state>`.
Returns
-------
result : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
importances_mean : ndarray, shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray, shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray, shape (n_features, n_repeats)
Raw permutation importance scores.
References
----------
.. [BRE] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. https://doi.org/10.1023/A:1010933404324
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import permutation_importance
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
>>> y = [1, 1, 1, 0, 0, 0]
>>> clf = LogisticRegression().fit(X, y)
>>> result = permutation_importance(clf, X, y, n_repeats=10,
... random_state=0)
>>> result.importances_mean
array([0.4666..., 0. , 0. ])
>>> result.importances_std
array([0.2211..., 0. , 0. ])
"""
if not hasattr(X, "iloc"):
X = check_array(X, force_all_finite='allow-nan', dtype=None)
# Precompute random seed from the random state to be used
# to get a fresh independent RandomState instance for each
# parallel call to _calculate_permutation_scores, irrespective of
# the fact that variables are shared or not depending on the active
# joblib backend (sequential, thread-based or process-based).
random_state = check_random_state(random_state)
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
scorer = check_scoring(estimator, scoring=scoring)
baseline_score = scorer(estimator, X, y)
scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)(
estimator, X, y, col_idx, random_seed, n_repeats, scorer
) for col_idx in range(X.shape[1]))
importances = baseline_score - np.array(scores)
return Bunch(importances_mean=np.mean(importances, axis=1),
importances_std=np.std(importances, axis=1),
importances=importances)
| {
"content_hash": "a570b52ad78e9446bb024751ad763283",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 42.63636363636363,
"alnum_prop": 0.651795965228801,
"repo_name": "bnaul/scikit-learn",
"id": "8aa8766b727aae5a059e1900fb9bb2afe577af16",
"size": "6097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/inspection/_permutation_importance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
} |
from neutron_lib import constants as const
from neutron_lib.db import api as db_api
from neutron_lib.objects import common_types
from neutron_lib.objects import utils as obj_utils
from oslo_utils import versionutils
from oslo_versionedobjects import fields as obj_fields
from sqlalchemy import func
from neutron.agent.common import utils
from neutron.db.models import agent as agent_model
from neutron.db.models import l3agent as rb_model
from neutron.db.models import l3ha as l3ha_model
from neutron.db import models_v2
from neutron.objects import base
@base.NeutronObjectRegistry.register
class Agent(base.NeutronDbObject):
# Version 1.0: Initial version
# Version 1.1: Added resources_synced
VERSION = '1.1'
db_model = agent_model.Agent
fields = {
'id': common_types.UUIDField(),
'agent_type': obj_fields.StringField(),
'binary': obj_fields.StringField(),
'topic': obj_fields.StringField(),
'host': obj_fields.StringField(),
'availability_zone': obj_fields.StringField(nullable=True),
'admin_state_up': obj_fields.BooleanField(default=True),
'started_at': obj_fields.DateTimeField(tzinfo_aware=False),
'created_at': obj_fields.DateTimeField(tzinfo_aware=False),
'heartbeat_timestamp': obj_fields.DateTimeField(tzinfo_aware=False),
'description': obj_fields.StringField(nullable=True),
'configurations': common_types.DictOfMiscValuesField(),
'resource_versions': common_types.DictOfMiscValuesField(nullable=True),
'load': obj_fields.IntegerField(default=0),
'resources_synced': obj_fields.BooleanField(nullable=True),
}
@classmethod
def modify_fields_to_db(cls, fields):
result = super(Agent, cls).modify_fields_to_db(fields)
if ('configurations' in result and
not isinstance(result['configurations'],
obj_utils.StringMatchingFilterObj)):
# dump configuration into string, set '' if empty '{}'
result['configurations'] = (
cls.filter_to_json_str(result['configurations'], default=''))
if ('resource_versions' in result and
not isinstance(result['resource_versions'],
obj_utils.StringMatchingFilterObj)):
# dump resource version into string, set None if empty '{}' or None
result['resource_versions'] = (
cls.filter_to_json_str(result['resource_versions']))
return result
@classmethod
def modify_fields_from_db(cls, db_obj):
fields = super(Agent, cls).modify_fields_from_db(db_obj)
if 'configurations' in fields:
# load string from DB, set {} if configuration is ''
fields['configurations'] = (
cls.load_json_from_str(fields['configurations'], default={}))
if 'resource_versions' in fields:
# load string from DB, set None if resource_version is None or ''
fields['resource_versions'] = (
cls.load_json_from_str(fields['resource_versions']))
return fields
def obj_make_compatible(self, primitive, target_version):
super(Agent, self).obj_make_compatible(primitive, target_version)
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 1):
primitive.pop('resources_synced', None)
@property
def is_active(self):
return not utils.is_agent_down(self.heartbeat_timestamp)
# TODO(ihrachys) reuse query builder from
# get_l3_agents_ordered_by_num_routers
@classmethod
def get_l3_agent_with_min_routers(cls, context, agent_ids):
"""Return l3 agent with the least number of routers."""
with cls.db_context_reader(context):
query = context.session.query(
agent_model.Agent,
func.count(
rb_model.RouterL3AgentBinding.router_id
).label('count')).outerjoin(
rb_model.RouterL3AgentBinding).group_by(
agent_model.Agent,
rb_model.RouterL3AgentBinding
.l3_agent_id).order_by('count')
res = query.filter(agent_model.Agent.id.in_(agent_ids)).first()
agent_obj = cls._load_object(context, res[0])
return agent_obj
@classmethod
def get_l3_agents_ordered_by_num_routers(cls, context, agent_ids):
with cls.db_context_reader(context):
query = (context.session.query(agent_model.Agent, func.count(
rb_model.RouterL3AgentBinding.router_id).label('count')).
outerjoin(rb_model.RouterL3AgentBinding).
group_by(agent_model.Agent).
filter(agent_model.Agent.id.in_(agent_ids)).
order_by('count'))
return [cls._load_object(context, record[0]) for record in query]
@classmethod
@db_api.CONTEXT_READER
def get_ha_agents(cls, context, network_id=None, router_id=None):
if not (network_id or router_id):
return []
query = context.session.query(agent_model.Agent.host)
query = query.join(l3ha_model.L3HARouterAgentPortBinding,
l3ha_model.L3HARouterAgentPortBinding.l3_agent_id ==
agent_model.Agent.id)
if router_id:
query = query.filter(
l3ha_model.L3HARouterAgentPortBinding.router_id ==
router_id).all()
elif network_id:
query = query.join(models_v2.Port, models_v2.Port.device_id ==
l3ha_model.L3HARouterAgentPortBinding.router_id)
query = query.filter(models_v2.Port.network_id == network_id,
models_v2.Port.status ==
const.PORT_STATUS_ACTIVE,
models_v2.Port.device_owner.in_(
(const.DEVICE_OWNER_HA_REPLICATED_INT,
const.DEVICE_OWNER_ROUTER_SNAT))).all()
# L3HARouterAgentPortBinding will have l3 agent ids of hosting agents.
# But we need l2 agent(for tunneling ip) while creating FDB entries.
hosts = [host[0] for host in query]
agents = cls.get_objects(context, host=hosts)
return agents
@classmethod
@db_api.CONTEXT_READER
def get_agents_by_availability_zones_and_agent_type(
cls, context, agent_type, availability_zones):
query = context.session.query(agent_model.Agent).filter_by(
agent_type=agent_type).group_by(
agent_model.Agent.availability_zone)
query = query.filter(
agent_model.Agent.availability_zone.in_(availability_zones)).all()
agents = [cls._load_object(context, record) for record in query]
return agents
@classmethod
def get_objects_by_agent_mode(cls, context, agent_mode=None, **kwargs):
mode_filter = obj_utils.StringContains(agent_mode)
return cls.get_objects(context, configurations=mode_filter, **kwargs)
| {
"content_hash": "341c749f72090bf27b4a93d426d20329",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 46.23717948717949,
"alnum_prop": 0.6161098017468459,
"repo_name": "openstack/neutron",
"id": "62b7daea14b9100b668880d91b884a923037d6b1",
"size": "7828",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/objects/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import traceback
import re
import numpy as np
import pandas.lib as lib
import pandas.core.common as com
from pandas.compat import (lzip, map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.common import isnull
from pandas.core.base import PandasObject
from pandas.types.api import DatetimeTZDtype
from pandas.tseries.tools import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif com.is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=True)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if com.is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Deprecated tquery and uquery
def _safe_fetch(cur):
try:
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
except Exception as e: # pragma: no cover
excName = e.__class__.__name__
if excName == 'OperationalError':
return []
def tquery(sql, con=None, cur=None, retry=True):
"""
DEPRECATED. Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con, params).fetchall()
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection, default: None
cur: deprecated, cursor is obtained from connection, default: None
retry: boolean value to specify whether to retry after failure
default: True
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is deprecated, and will be removed in future versions. "
"You can use ``execute(...).fetchall()`` instead.",
FutureWarning, stacklevel=2)
cur = execute(sql, con, cur=cur)
result = _safe_fetch(cur)
if con is not None:
try:
cur.close()
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName == 'OperationalError': # pragma: no cover
print('Failed to commit, may need to restart interpreter')
else:
raise
traceback.print_exc()
if retry:
return tquery(sql, con=con, retry=False)
if result and len(result[0]) == 1:
# python 3 compat
result = list(lzip(*result)[0])
elif result is None: # pragma: no cover
result = []
return result
def uquery(sql, con=None, cur=None, retry=True, params=None):
"""
DEPRECATED. Does the same thing as tquery, but instead of returning
results, it returns the number of rows affected. Good for update queries.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con).rowcount
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection, default: None
cur: deprecated, cursor is obtained from connection, default: None
retry: boolean value to specify whether to retry after failure
default: True
params: list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is deprecated, and will be removed in future versions. "
"You can use ``execute(...).rowcount`` instead.",
FutureWarning, stacklevel=2)
cur = execute(sql, con, cur=cur, params=params)
result = cur.rowcount
try:
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName != 'OperationalError':
raise
traceback.print_exc()
if retry:
print('Looks like your connection failed, reconnecting...')
return uquery(sql, con, retry=False)
return result
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy connectable.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy connectables.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor='sqlite', schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy connectable.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy connectables.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
_MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated "
"and will be removed in future versions. "
"MySQL will be further supported with SQLAlchemy "
"connectables.")
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
con = sqlalchemy.create_engine(con)
return con
except ImportError:
_SQLALCHEMY_INSTALLED = False
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
else:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning, stacklevel=3)
return SQLiteDatabase(con, flavor, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isnull(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index.get_level_values(i))
column_names_and_types.append((idx_label, idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not com.is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notnull_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnulldata = col[~isnull(col)]
if len(notnulldata):
col_for_inference = notnulldata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or connection+sql flavor")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or connection+sql flavor")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type.
"""
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# Flavour specific sql strings and handler class for access to DBs without
# SQLAlchemy installed
# SQL type convertions for each DB
_SQL_TYPES = {
'string': {
'mysql': 'VARCHAR (63)',
'sqlite': 'TEXT',
},
'floating': {
'mysql': 'DOUBLE',
'sqlite': 'REAL',
},
'integer': {
'mysql': 'BIGINT',
'sqlite': 'INTEGER',
},
'datetime': {
'mysql': 'DATETIME',
'sqlite': 'TIMESTAMP',
},
'date': {
'mysql': 'DATE',
'sqlite': 'DATE',
},
'time': {
'mysql': 'TIME',
'sqlite': 'TIME',
},
'boolean': {
'mysql': 'BOOLEAN',
'sqlite': 'INTEGER',
}
}
def _get_unicode_name(name):
try:
uname = name.encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_mysql_name(name):
# Filter for unquoted identifiers
# See http://dev.mysql.com/doc/refman/5.0/en/identifiers.html
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
basere = r'[0-9,a-z,A-Z$_]'
for c in uname:
if not re.match(basere, c):
if not (0x80 < ord(c) < 0xFFFF):
raise ValueError("Invalid MySQL identifier '%s'" % uname)
return '`' + uname + '`'
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
# SQL enquote and wildcard symbols
_SQL_WILDCARD = {
'mysql': '%s',
'sqlite': '?'
}
# Validate and return escaped identifier
_SQL_GET_IDENTIFIER = {
'mysql': _get_valid_mysql_name,
'sqlite': _get_valid_sqlite_name,
}
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
flv = self.pd_sql.flavor
wld = _SQL_WILDCARD[flv] # wildcard char
escape = _SQL_GET_IDENTIFIER[flv]
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
flv = self.pd_sql.flavor
escape = _SQL_GET_IDENTIFIER[flv]
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not com.is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type][self.pd_sql.flavor]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
For now still supports `flavor` argument to deal with 'mysql' database
for backwards compatibility, but this will be removed in future versions.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor, is_cursor=False):
self.is_cursor = is_cursor
self.con = con
if flavor is None:
flavor = 'sqlite'
if flavor not in ['sqlite', 'mysql']:
raise NotImplementedError("flavors other than SQLite and MySQL "
"are not supported")
else:
self.flavor = flavor
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string.
"""
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _SQL_GET_IDENTIFIER[self.flavor]
# esc_name = escape(name)
wld = _SQL_WILDCARD[self.flavor]
flavor_map = {
'sqlite': ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld,
'mysql': "SHOW TABLES LIKE %s" % wld}
query = flavor_map.get(self.flavor)
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
escape = _SQL_GET_IDENTIFIER[self.flavor]
drop_sql = "DROP TABLE %s" % escape(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor='sqlite', keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy connectable.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| {
"content_hash": "cb196f7d1e4c7cda5bad30ec98c8c458",
"timestamp": "",
"source": "github",
"line_count": 1717,
"max_line_length": 79,
"avg_line_length": 36.63191613278975,
"alnum_prop": 0.587484299728127,
"repo_name": "BigDataforYou/movie_recommendation_workshop_1",
"id": "324988360c9fe8f79ceef4996dce2ef74a07cdd4",
"size": "62921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/sql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "738713"
},
{
"name": "C++",
"bytes": "169366"
},
{
"name": "CSS",
"bytes": "14786"
},
{
"name": "Fortran",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "1408733"
},
{
"name": "JavaScript",
"bytes": "13700"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "19755294"
},
{
"name": "Shell",
"bytes": "3276"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from setuptools import setup
import os
import re
base_path = os.path.dirname(__file__)
fp = open(os.path.join(base_path, 'pycalq', '__init__.py'))
VERSION = re.compile(r".*__version__ = '(.*?)'",
re.S).match(fp.read()).group(1)
fp.close()
version = VERSION
def read(fname):
try:
path = os.path.join(os.path.dirname(__file__), fname)
return open(path).read()
except IOError:
return ""
requirements = ['six==1.7.3', 'urllib3>=1.2,<2.0']
test_requirements = ['pytest',
'pytest-cov']
setup(
name='pyCalq',
version=version,
license='MIT',
description="Unofficial Calq client library.",
long_description=read('DESCRIPTION.rst'),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
keywords='',
author='Friedrich Kauder',
author_email='fkauder@gmail.com',
url='https://github.com/FriedrichK/pyCalq.git',
packages=['pycalq'],
test_suite='pycalq.tests',
install_requires=requirements,
tests_require=test_requirements,
)
| {
"content_hash": "94fd68c466c1708df3eaf89bb89a37de",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 61,
"avg_line_length": 27.5,
"alnum_prop": 0.5965034965034965,
"repo_name": "FriedrichK/pyCalq",
"id": "d0e89dd9b2f54f7dc631dee0c98511d440b040cf",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17342"
},
{
"name": "Shell",
"bytes": "521"
}
],
"symlink_target": ""
} |
"""
Dependency-less LDAP filter parser for Python
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pelix.utilities import is_string
import inspect
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
ESCAPE_CHARACTER = '\\'
""" The LDAP escape character: \\"""
# Do not insert the ESCAPE CHARACTER in this list
ESCAPED_CHARACTERS = "()&|=<>~*+#,;'\""
"""
The characters to escape in an LDAP string.
See http://www.ldapexplorer.com/en/manual/109010000-ldap-filter-syntax.htm
"""
# ------------------------------------------------------------------------------
AND = 0
""" 'And' LDAP operation """
OR = 1
""" 'Or' LDAP operation """
NOT = 2
""" 'Not' LDAP operation """
# ------------------------------------------------------------------------------
class LDAPFilter(object):
"""
Represents an LDAP filter
"""
def __init__(self, operator):
"""
Initializer
"""
if operator not in (AND, OR, NOT):
raise ValueError("Invalid operator: {0}".format(operator))
self.subfilters = []
self.operator = operator
def __eq__(self, other):
"""
Equality testing
"""
if type(other) is not LDAPFilter:
# Bad type
return False
if self.operator != other.operator:
# Different operators
return False
# All sub-filters must match
if len(self.subfilters) != len(other.subfilters):
# Not the same size...
return False
for subfilter in self.subfilters:
if subfilter not in other.subfilters:
# Missing sub filter
return False
# Same content
return True
def __ne__(self, other):
"""
Inequality testing
"""
return not self.__eq__(other)
def __repr__(self):
"""
String description
"""
return "{0}.get_ldap_filter({1!r})".format(__name__, self.__str__())
def __str__(self):
"""
String representation
"""
return "({0}{1})" \
.format(operator2str(self.operator),
"".join(str(subfilter) for subfilter in self.subfilters))
def append(self, ldap_filter):
"""
Appends a filter or a criterion to this filter
:param ldap_filter: An LDAP filter or criterion
:raise TypeError: If the parameter is not of a known type
:raise ValueError: If the more than one filter is associated to a
NOT operator
"""
if not isinstance(ldap_filter, (LDAPFilter, LDAPCriteria)):
raise TypeError("Invalid filter type: {0}"
.format(type(ldap_filter).__name__))
if len(self.subfilters) >= 1 and self.operator == NOT:
raise ValueError("Not operator only handles one child")
self.subfilters.append(ldap_filter)
def matches(self, properties):
"""
Tests if the given properties matches this LDAP filter and its children
:param properties: A dictionary of properties
:return: True if the properties matches this filter, else False
"""
# Use a generator, and declare it outside of the method call
# => seems to be quite a speed up trick
generator = (criterion.matches(properties)
for criterion in self.subfilters)
# Extract "if" from loops and use built-in methods
if self.operator == OR:
result = any(generator)
else:
result = all(generator)
if self.operator == NOT:
# Revert result
return not result
return result
def normalize(self):
"""
Returns the first meaningful object in this filter.
"""
if not self.subfilters:
# No sub-filters
return None
# New sub-filters list
new_filters = []
for subfilter in self.subfilters:
# Normalize the sub-filter before storing it
norm_filter = subfilter.normalize()
if norm_filter is not None and norm_filter not in new_filters:
new_filters.append(norm_filter)
# Update the instance
self.subfilters = new_filters
size = len(self.subfilters)
if size > 1:
# Normal filter
return self
else:
if self.operator == NOT:
# NOT is the only operator to accept 1 operand
return self
else:
# Return the only child as the filter object
return self.subfilters[0].normalize()
class LDAPCriteria(object):
"""
Represents an LDAP criterion
"""
def __init__(self, name, value, comparator):
"""
Sets up the criterion
:raise ValueError: If one of the parameters is empty
"""
if not name or not value or not comparator:
# Refuse empty values
raise ValueError("Invalid criterion parameter ({0}, {1}, {2})"
.format(name, value, comparator))
if not (inspect.ismethod(comparator) or
inspect.isfunction(comparator)):
# Ensure we have a valid comparator
raise ValueError("Comparator must be a method: {0}", comparator)
self.name = str(name)
self.value = value
self.comparator = comparator
def __eq__(self, other):
"""
Equality testing
"""
if type(other) is not LDAPCriteria:
# Bad type
return False
for member in ('name', 'comparator'):
if getattr(self, member) != getattr(other, member):
# Difference found
return False
if isinstance(other.value, type(self.value)):
# Same type: direct comparison
return self.value == other.value
else:
# Convert to strings for comparison
return str(self.value) == str(other.value)
def __ne__(self, other):
"""
Inequality testing
"""
return not self.__eq__(other)
def __repr__(self):
"""
String representation
"""
return '{0}.get_ldap_filter({1!r})'.format(__name__, self.__str__())
def __str__(self):
"""
String description
"""
return "({0}{1}{2})".format(escape_LDAP(self.name),
comparator2str(self.comparator),
escape_LDAP(str(self.value)))
def matches(self, properties):
"""
Tests if the given criterion matches this LDAP criterion
:param properties: A dictionary of properties
:return: True if the properties matches this criterion, else False
"""
try:
# Use the comparator
return self.comparator(self.value, properties[self.name])
except KeyError:
# Criterion key is not in the properties
return False
def normalize(self):
"""
Returns this criterion
"""
return self
# ------------------------------------------------------------------------------
def escape_LDAP(ldap_string):
"""
Escape a string to let it go in an LDAP filter
:param ldap_string: The string to escape
:return: The protected string
"""
if ldap_string is None:
return None
assert is_string(ldap_string)
if len(ldap_string) == 0:
# No content
return ldap_string
# Protect escape character previously in the string
ldap_string = ldap_string.replace(ESCAPE_CHARACTER,
ESCAPE_CHARACTER + ESCAPE_CHARACTER)
# Leading space
if ldap_string.startswith(" "):
ldap_string = "\\ {0}".format(ldap_string[1:])
# Trailing space
if ldap_string.endswith(" "):
ldap_string = "{0}\\ ".format(ldap_string[:-1])
# Escape other characters
for escaped in ESCAPED_CHARACTERS:
ldap_string = ldap_string.replace(escaped, ESCAPE_CHARACTER + escaped)
return ldap_string
def unescape_LDAP(ldap_string):
"""
Unespaces an LDAP string
:param ldap_string: The string to unescape
:return: The unprotected string
"""
if ldap_string is None:
return None
if ESCAPE_CHARACTER not in ldap_string:
# No need to loop
return ldap_string
escaped = False
result = ""
for character in ldap_string:
if not escaped and character == ESCAPE_CHARACTER:
# Escape character found
escaped = True
else:
# Copy the character
escaped = False
result += character
return result
# ------------------------------------------------------------------------------
ITERABLES = (list, tuple, set)
""" The types that are considered iterable in comparators """
def _comparator_presence(_, tested_value):
"""
Tests a filter which simply a joker, i.e. a value presence test
"""
# The filter value is a joker : simple presence test
if tested_value is None:
return False
elif hasattr(tested_value, "__len__"):
# Refuse empty values
return len(tested_value) != 0
# Presence validated
return True
def _comparator_star(filter_value, tested_value):
"""
Tests a filter containing a joker
"""
if isinstance(tested_value, ITERABLES):
for value in tested_value:
if _star_comparison(filter_value, value):
return True
else:
return _star_comparison(filter_value, tested_value)
def _star_comparison(filter_value, tested_value):
"""
Tests a filter containing a joker
"""
if not is_string(tested_value):
# Unhandled value type...
return False
parts = filter_value.split('*')
i = 0
last_part = len(parts) - 1
idx = 0
for part in parts:
# Find the part in the tested value
idx = tested_value.find(part, idx)
if idx == -1:
# Part not found
return False
if i == 0 and len(part) != 0 and idx != 0:
# First part is not a star, but the tested value is not at
# position 0 => Doesn't match
return False
if i == last_part and len(part) != 0 \
and idx != len(tested_value) - len(part):
# Last tested part is not at the end of the sequence
return False
# Be sure to test the next part
idx += len(part)
i += 1
# Whole test passed
return True
def _comparator_eq(filter_value, tested_value):
"""
Tests if the filter value is equal to the tested value
"""
if isinstance(tested_value, ITERABLES):
# Convert the list items to strings
for value in tested_value:
# Try with the string conversion
if not is_string(value):
value = repr(value)
if filter_value == value:
# Match !
return True
# Standard comparison
elif not is_string(tested_value):
# String vs string representation
return filter_value == repr(tested_value)
else:
# String vs string
return filter_value == tested_value
return False
def _comparator_approximate(filter_value, tested_value):
"""
Tests if the filter value is nearly equal to the tested value.
If the tested value is a string or an array of string, it compares their
lower case forms
"""
lower_filter_value = filter_value.lower()
if is_string(tested_value):
# Lower case comparison
return _comparator_eq(lower_filter_value, tested_value.lower())
elif hasattr(tested_value, '__iter__'):
# Extract a list of strings
new_tested = [value.lower() for value in tested_value
if is_string(value)]
if _comparator_eq(lower_filter_value, new_tested):
# Value found in the strings
return True
# Compare the raw values
return _comparator_eq(filter_value, tested_value) \
or _comparator_eq(lower_filter_value, tested_value)
def _comparator_approximate_star(filter_value, tested_value):
"""
Tests if the filter value, which contains a joker, is nearly equal to the
tested value.
If the tested value is a string or an array of string, it compares their
lower case forms
"""
lower_filter_value = filter_value.lower()
if is_string(tested_value):
# Lower case comparison
return _comparator_star(lower_filter_value, tested_value.lower())
elif hasattr(tested_value, '__iter__'):
# Extract a list of strings
new_tested = [value.lower() for value in tested_value
if is_string(value)]
if _comparator_star(lower_filter_value, new_tested):
# Value found in the strings
return True
# Compare the raw values
return _comparator_star(filter_value, tested_value) \
or _comparator_star(lower_filter_value, tested_value)
def _comparator_le(filter_value, tested_value):
"""
Tests if the filter value is greater than the tested value
tested_value <= filter_value
"""
return _comparator_lt(filter_value, tested_value) \
or _comparator_eq(filter_value, tested_value)
def _comparator_lt(filter_value, tested_value):
"""
Tests if the filter value is strictly greater than the tested value
tested_value < filter_value
"""
if is_string(filter_value):
value_type = type(tested_value)
try:
# Try a conversion
filter_value = value_type(filter_value)
except (TypeError, ValueError):
if value_type is int:
# Integer/float comparison trick
try:
filter_value = float(filter_value)
except (TypeError, ValueError):
# None-float value
return False
else:
# Incompatible type
return False
try:
return tested_value < filter_value
except TypeError:
# Incompatible type
return False
def _comparator_ge(filter_value, tested_value):
"""
Tests if the filter value is lesser than the tested value
tested_value >= filter_value
"""
return _comparator_gt(filter_value, tested_value) \
or _comparator_eq(filter_value, tested_value)
def _comparator_gt(filter_value, tested_value):
"""
Tests if the filter value is strictly lesser than the tested value
tested_value > filter_value
"""
if is_string(filter_value):
value_type = type(tested_value)
try:
# Try a conversion
filter_value = value_type(filter_value)
except (TypeError, ValueError):
if value_type is int:
# Integer/float comparison trick
try:
filter_value = float(filter_value)
except (TypeError, ValueError):
# None-float value
return False
else:
# Incompatible type
return False
try:
return tested_value > filter_value
except TypeError:
# Incompatible type
return False
_COMPARATOR_SYMBOL = {
_comparator_approximate: "~=",
_comparator_approximate_star: "~=",
_comparator_eq: "=",
_comparator_star: "=",
_comparator_le: "<=",
_comparator_lt: "<",
_comparator_ge: ">=",
_comparator_gt: ">"}
def comparator2str(comparator):
"""
Converts an operator method to a string
:param comparator: A comparator method
:return: The corresponding LDAP filter comparator string
"""
return _COMPARATOR_SYMBOL.get(comparator, "??")
def operator2str(operator):
"""
Converts an operator value to a string
:param operator: An LDAP filter operator internal value
:return: The corresponding LDAP operator string
"""
if operator == AND:
return '&'
elif operator == OR:
return '|'
elif operator == NOT:
return '!'
return '<unknown>'
# ------------------------------------------------------------------------------
def _compute_comparator(string, idx):
"""
Tries to compute the LDAP comparator at the given index
Valid operators are :
* = : equality
* <= : less than
* >= : greater than
* ~= : approximate
:param string: A LDAP filter string
:param idx: An index in the given string
:return: The corresponding operator, None if unknown
"""
part1 = string[idx]
try:
part2 = string[idx + 1]
except IndexError:
# String is too short (no comparison)
return None
if part1 == '=':
# Equality
return _comparator_eq
elif part2 != '=':
# It's a "strict" operator
if part1 == '<':
# Strictly lesser
return _comparator_lt
elif part1 == '>':
# Strictly greater
return _comparator_gt
else:
if part1 == '<':
# Less or equal
return _comparator_le
elif part1 == '>':
# Greater or equal
return _comparator_ge
elif part1 == '~':
# Approximate equality
return _comparator_approximate
def _compute_operation(string, idx):
"""
Tries to compute the LDAP operation at the given index
Valid operations are :
* & : AND
* | : OR
* ! : NOT
:param string: A LDAP filter string
:param idx: An index in the given string
:return: The corresponding operator (AND, OR or NOT)
"""
operator = string[idx]
if operator == '&':
return AND
elif operator == '|':
return OR
elif operator == '!':
return NOT
return None
def _skip_spaces(string, idx):
"""
Retrieves the next non-space character after idx index in the given string
:param string: The string to look into
:param idx: The base search index
:return: The next non-space character index, -1 if not found
"""
i = idx
for char in string[idx:]:
if not char.isspace():
return i
i += 1
return -1
def _parse_ldap_criteria(ldap_filter, startidx=0, endidx=-1):
"""
Parses an LDAP sub filter (criterion)
:param ldap_filter: An LDAP filter string
:param startidx: Sub-filter start index
:param endidx: Sub-filter end index
:return: The LDAP sub-filter
:raise ValueError: Invalid sub-filter
"""
comparators = "=<>~"
if startidx < 0:
raise ValueError("Invalid string range start={0}, end={1}"
.format(startidx, endidx))
# Get the comparator
escaped = False
idx = startidx
for char in ldap_filter[startidx:endidx]:
if not escaped:
if char == ESCAPE_CHARACTER:
# Next character escaped
escaped = True
elif char in comparators:
# Comparator found
break
else:
# Escaped character ignored
escaped = False
idx += 1
else:
# Comparator never found
raise ValueError("Comparator not found in '{0}'"
.format(ldap_filter[startidx:endidx]))
# The attribute name can be extracted directly
attribute_name = ldap_filter[startidx:idx].strip()
if not attribute_name:
# Attribute name is missing
raise ValueError("Attribute name is missing in '{0}'"
.format(ldap_filter[startidx:endidx]))
comparator = _compute_comparator(ldap_filter, idx)
if comparator is None:
# Unknown comparator
raise ValueError("Unknown comparator in '{0}' - {1}\nFilter : {2}"
.format(ldap_filter[startidx:endidx],
ldap_filter[idx], ldap_filter))
# Find the end of the comparator
while ldap_filter[idx] in comparators:
idx += 1
# Skip spaces
idx = _skip_spaces(ldap_filter, idx)
# Extract the value
value = ldap_filter[idx:endidx].strip()
# Use the appropriate comparator if a joker is found in the filter value
if value == '*':
# Presence comparator
comparator = _comparator_presence
elif '*' in value:
# Joker
if comparator == _comparator_eq:
comparator = _comparator_star
elif comparator == _comparator_approximate:
comparator = _comparator_approximate_star
return LDAPCriteria(unescape_LDAP(attribute_name), unescape_LDAP(value),
comparator)
def _parse_ldap(ldap_filter):
"""
Parses the given LDAP filter string
:param ldap_filter: An LDAP filter string
:return: An LDAPFilter object, None if the filter was empty
:raise ValueError: The LDAP filter string is invalid
"""
if ldap_filter is None:
# Nothing to do
return None
assert is_string(ldap_filter)
# Remove surrounding spaces
ldap_filter = ldap_filter.strip()
if not ldap_filter:
# Empty string
return None
escaped = False
filter_len = len(ldap_filter)
root = None
stack = []
subfilter_stack = []
idx = 0
while idx < filter_len:
if not escaped:
if ldap_filter[idx] == '(':
# Opening filter : get the operator
idx = _skip_spaces(ldap_filter, idx + 1)
if idx == -1:
raise ValueError("Missing filter operator: {0}"
.format(ldap_filter))
operator = _compute_operation(ldap_filter, idx)
if operator is not None:
# New sub-filter
stack.append(LDAPFilter(operator))
else:
# Sub-filter content
subfilter_stack.append(idx)
elif ldap_filter[idx] == ')':
# Ending filter : store it in its parent
if len(subfilter_stack) != 0:
# criterion finished
startidx = subfilter_stack.pop()
criterion = _parse_ldap_criteria(ldap_filter, startidx,
idx)
if len(stack) != 0:
top = stack.pop()
top.append(criterion)
stack.append(top)
else:
# No parent : filter contains only one criterion
# Make a parent to stay homogeneous
root = LDAPFilter(AND)
root.append(criterion)
elif len(stack) != 0:
# Sub filter finished
ended_filter = stack.pop()
if len(stack) != 0:
top = stack.pop()
top.append(ended_filter)
stack.append(top)
else:
# End of the parse
root = ended_filter
else:
raise ValueError("Too many end of parenthesis:{0}: {1}"
.format(idx, ldap_filter[idx:]))
elif ldap_filter[idx] == '\\':
# Next character must be ignored
escaped = True
else:
# Escaped character ignored
escaped = False
# Don't forget to increment...
idx += 1
# No root : invalid content
if root is None:
raise ValueError("Invalid filter string: {0}".format(ldap_filter))
# Return the root of the filter
return root.normalize()
def get_ldap_filter(ldap_filter):
"""
Retrieves the LDAP filter object corresponding to the given filter.
Parses it the argument if it is an LDAPFilter instance
:param ldap_filter: An LDAP filter (LDAPFilter or string)
:return: The corresponding filter, can be None
:raise ValueError: Invalid filter string found
:raise TypeError: Unknown filter type
"""
if ldap_filter is None:
return None
if isinstance(ldap_filter, (LDAPFilter, LDAPCriteria)):
# No conversion needed
return ldap_filter
elif is_string(ldap_filter):
# Parse the filter
return _parse_ldap(ldap_filter)
# Unknown type
raise TypeError("Unhandled filter type {0}"
.format(type(ldap_filter).__name__))
def combine_filters(filters, operator=AND):
"""
Combines two LDAP filters, which can be strings or LDAPFilter objects
:param filters: Filters to combine
:param operator: The operator for combination
:return: The combined filter, can be None if all filters are None
:raise ValueError: Invalid filter string found
:raise TypeError: Unknown filter type
"""
if not filters:
return None
if not hasattr(filters, '__iter__') or is_string(filters):
raise TypeError("Filters argument must be iterable")
# Remove None filters and convert others
ldap_filters = []
for sub_filter in filters:
if sub_filter is None:
# Ignore None filters
continue
ldap_filter = get_ldap_filter(sub_filter)
if ldap_filter is not None:
# Valid filter
ldap_filters.append(ldap_filter)
if len(ldap_filters) == 0:
# Do nothing
return None
elif len(ldap_filters) == 1:
# Only one filter, return it
return ldap_filters[0]
new_filter = LDAPFilter(operator)
for sub_filter in ldap_filters:
# Direct combination
new_filter.append(sub_filter)
return new_filter.normalize()
| {
"content_hash": "e7d7107337c6052db06f9bf861247ae1",
"timestamp": "",
"source": "github",
"line_count": 950,
"max_line_length": 80,
"avg_line_length": 28.521052631578947,
"alnum_prop": 0.5562280863627975,
"repo_name": "isandlaTech/cohorte-devtools",
"id": "b1bb71680351a27b46ae156b1c82f63be1a80232",
"size": "27149",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "org.cohorte.eclipse.runner.basic/files/test/pelix/ldapfilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151318"
},
{
"name": "HTML",
"bytes": "113064"
},
{
"name": "Java",
"bytes": "172793"
},
{
"name": "JavaScript",
"bytes": "2165497"
},
{
"name": "Python",
"bytes": "13926564"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
} |
from plenum.common.exceptions import NotConnectedToAny
from indy_common.identity import Identity
class Caching:
"""
Mixin for agents to manage caching.
Dev notes: Feels strange to inherit from WalletedAgent, but self-typing
doesn't appear to be implemented in Python yet.
"""
def getClient(self):
if self.client:
return self.client
else:
raise NotConnectedToAny
def getIdentity(self, identifier):
identity = Identity(identifier=identifier)
req = self.wallet.requestIdentity(identity,
sender=self.wallet.defaultId)
self.getClient().submitReqs(req)
return req
| {
"content_hash": "9d55ac5afa5fe9cf57597c9b462b93c6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 29.375,
"alnum_prop": 0.6425531914893617,
"repo_name": "TechWritingWhiz/indy-node",
"id": "35ceb04c0e893548233954a32d5b90fa14605afd",
"size": "705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indy_client/agent/caching.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3131"
},
{
"name": "Groovy",
"bytes": "8886"
},
{
"name": "Makefile",
"bytes": "2073"
},
{
"name": "Python",
"bytes": "1283603"
},
{
"name": "Ruby",
"bytes": "65411"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "127911"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import operator
from datetime import datetime
from collections import defaultdict
import numpy as np
from .. import units as u, constants as const
from .. import _erfa as erfa
from ..units import UnitConversionError
from ..utils.decorators import lazyproperty
from ..utils import ShapedLikeNDArray
from ..utils.compat.misc import override__dir__
from ..utils.data_info import MixinInfo, data_info_factory
from ..utils.compat.numpy import broadcast_to
from ..extern import six
from ..extern.six.moves import zip
from .utils import day_frac
from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS,
TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime)
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # pylint: disable=W0611
__all__ = ['Time', 'TimeDelta', 'TIME_SCALES', 'TIME_DELTA_SCALES',
'ScaleValueError', 'OperandTypeError']
TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'),
('tai', 'tcg'): ('tt',),
('tai', 'ut1'): ('utc',),
('tai', 'tdb'): ('tt',),
('tcb', 'tcg'): ('tdb', 'tt'),
('tcb', 'tt'): ('tdb',),
('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'),
('tcb', 'utc'): ('tdb', 'tt', 'tai'),
('tcg', 'tdb'): ('tt',),
('tcg', 'ut1'): ('tt', 'tai', 'utc'),
('tcg', 'utc'): ('tt', 'tai'),
('tdb', 'ut1'): ('tt', 'tai', 'utc'),
('tdb', 'utc'): ('tt', 'tai'),
('tt', 'ut1'): ('tai', 'utc'),
('tt', 'utc'): ('tai',),
}
GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg')
BARYCENTRIC_SCALES = ('tcb', 'tdb')
ROTATIONAL_SCALES = ('ut1',)
TIME_DELTA_TYPES = dict((scale, scales)
for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES,
ROTATIONAL_SCALES) for scale in scales)
TIME_DELTA_SCALES = TIME_DELTA_TYPES.keys()
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {('tt', 'tai'): None,
('tai', 'tt'): None,
('tcg', 'tt'): -erfa.ELG,
('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcg', 'tai'): -erfa.ELG,
('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcb', 'tdb'): -erfa.ELB,
('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
'mean': {
'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')},
'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')},
'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}},
'apparent': {
'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')},
'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')},
'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)},
'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}}
class TimeInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # unit is read-only and None
_supports_indexing = True
_represent_as_dict_attrs = ('jd1', 'jd2', 'format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location',
'_delta_ut1_utc', '_delta_tdb_tt')
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats]))
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
format = map.pop('format')
delta_ut1_utc = map.pop('_delta_ut1_utc', None)
delta_tdb_tt = map.pop('_delta_tdb_tt', None)
map['format'] = 'jd'
map['val'] = map.pop('jd1')
map['val2'] = map.pop('jd2')
out = self._parent_cls(**map)
out.format = format
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfo):
_represent_as_dict_attrs = ('jd1', 'jd2', 'format', 'scale')
def _construct_from_dict(self, map):
format = map.pop('format')
map['format'] = 'jd'
map['val'] = map.pop('jd1')
map['val2'] = map.pop('jd2')
out = self._parent_cls(**map)
out.format = format
return out
class Time(ShapedLikeNDArray):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str',
'jyear_str']
Parameters
----------
val : sequence, str, number, or `~astropy.time.Time` object
Value(s) to initialize the time or times.
val2 : sequence, str, or number; optional
Value(s) to initialize the time or times.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Subformat for inputting string times
out_subfmt : str, optional
Subformat for outputting string times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __new__(cls, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if isinstance(val, cls):
self = val.replicate(format=format, copy=copy)
else:
self = super(Time, cls).__new__(cls)
return self
def __getnewargs__(self):
return (self._time,)
def __init__(self, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if location is not None:
from ..coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
else:
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(val, val2, format, scale, copy,
precision, in_subfmt, out_subfmt)
if self.location and (self.location.size > 1 and
self.location.shape != self.shape):
try:
# check the location can be broadcast to self's shape.
self.location = broadcast_to(self.location, self.shape,
subok=True)
except Exception:
raise ValueError('The location with shape {0} cannot be '
'broadcast against time with shape {1}. '
'Typically, either give a single location or '
'one for each time.'
.format(self.location.shape, self.shape))
def _init_from_vals(self, val, val2, format, scale, copy,
precision=None, in_subfmt=None, out_subfmt=None):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = '*'
if out_subfmt is None:
out_subfmt = '*'
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError('Input val and val2 have inconsistent shape; '
'they cannot be broadcast together.')
if scale is not None:
if not (isinstance(scale, six.string_types) and
scale.lower() in self.SCALES):
raise ScaleValueError("Scale {0} is not in the allowed scales "
"{1}".format(repr(scale),
sorted(self.SCALES)))
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(val, val2, format, scale,
precision, in_subfmt, out_subfmt)
self._format = self._time.name
def _get_time_fmt(self, val, val2, format, scale,
precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and val.dtype.kind in ('S', 'U', 'O'):
formats = [(name, cls) for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)]
err_msg = ('any of the formats where the format keyword is '
'optional {0}'.format([name for name, cls in formats]))
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(('astropy_time', TimeAstropyTime))
elif not (isinstance(format, six.string_types) and
format.lower() in self.FORMATS):
if format is None:
raise ValueError("No time format was given, and the input is "
"not unique")
else:
raise ValueError("Format {0} is not one of the allowed "
"formats {1}".format(repr(format),
sorted(self.FORMATS)))
else:
formats = [(format, self.FORMATS[format])]
err_msg = 'the format class {0}'.format(format)
for format, FormatClass in formats:
try:
return FormatClass(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError):
pass
else:
raise ValueError('Input values did not match {0}'.format(err_msg))
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format='datetime', scale='utc')
info = TimeInfo()
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str',
'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(self.FORMATS)))
format_cls = self.FORMATS[format]
# If current output subformat is not in the new format then replace
# with default '*'
if hasattr(format_cls, 'subfmts'):
subfmt_names = [subfmt[0] for subfmt in format_cls.subfmts]
if self.out_subfmt not in subfmt_names:
self.out_subfmt = '*'
self._time = format_cls(self._time.jd1, self._time.jd2,
self._time._scale, self.precision,
in_subfmt=self.in_subfmt,
out_subfmt=self.out_subfmt,
from_jd=True)
self._format = format
def __repr__(self):
return ("<{0} object: scale='{1}' format='{2}' value={3}>"
.format(self.__class__.__name__, self.scale, self.format,
getattr(self, self.format)))
def __str__(self):
return str(getattr(self, self.format))
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0} is not in the allowed scales {1}"
.format(repr(scale), sorted(self.SCALES)))
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = '_get_delta_{0}_{1}'.format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision,
self.in_subfmt, self.out_subfmt,
from_jd=True)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError('precision attribute must be an int between '
'0 and 9')
self._time.precision = val
del self.cache
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
if not isinstance(val, six.string_types):
raise ValueError('in_subfmt attribute must be a string')
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
if not isinstance(val, six.string_types):
raise ValueError('out_subfmt attribute must be a string')
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
for attr in ('jd1', 'jd2', '_delta_ut1_utc', '_delta_tdb_tt',
'location'):
val = getattr(self, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
return value if self._time.jd1.shape else value.item()
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
return self._shaped_like_input(self._time.jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
return self._shaped_like_input(self._time.jd2)
@property
def value(self):
"""Time value(s) in current format"""
# The underlying way to get the time values for the current format is:
# self._shaped_like_input(self._time.to_value(parent=self))
# This is done in __getattr__. By calling getattr(self, self.format)
# the ``value`` attribute is cached.
return getattr(self, self.format)
def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
"""
if kind.lower() not in ('barycentric', 'heliocentric'):
raise ValueError("'kind' parameter must be one of 'heliocentric' "
"or 'barycentric'")
if location is None:
if self.location is None:
raise ValueError('An EarthLocation needs to be set or passed '
'in to calculate bary- or heliocentric '
'corrections')
location = self.location
from ..coordinates import (UnitSphericalRepresentation, CartesianRepresentation,
HCRS, ICRS, GCRS, solar_system_ephemeris)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError("Supplied location does not have a valid `get_itrs` method")
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == 'heliocentric':
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation).
represent_as(CartesianRepresentation).xyz)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale='tdb')
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
---------------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `str`, or `None`; optional
The longitude on the Earth at which to compute the sidereal time.
Can be given as a `~astropy.units.Quantity` with angular units
(or an `~astropy.coordinates.Angle` or
`~astropy.coordinates.Longitude`), or as a name of an
observatory (currently, only ``'greenwich'`` is supported,
equivalent to 0 deg). If `None` (default), the ``lon`` attribute of
the Time object is used.
model : str or `None`; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
sidereal time : `~astropy.coordinates.Longitude`
Sidereal time as a quantity with units of hourangle
""" # docstring is formatted below
from ..coordinates import Longitude
if kind.lower() not in SIDEREAL_TIME_MODELS.keys():
raise ValueError('The kind of sidereal time has to be {0}'.format(
' or '.join(sorted(SIDEREAL_TIME_MODELS.keys()))))
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models.keys())[-1]
else:
if model.upper() not in available_models:
raise ValueError(
'Model {0} not implemented for {1} sidereal time; '
'available models are {2}'
.format(model, kind, sorted(available_models.keys())))
if longitude is None:
if self.location is None:
raise ValueError('No longitude is given but the location for '
'the Time object is not set.')
longitude = self.location.longitude
elif longitude == 'greenwich':
longitude = Longitude(0., u.degree,
wrap_angle=180.*u.degree)
else:
# sanity check on input
longitude = Longitude(longitude, u.degree,
wrap_angle=180.*u.degree)
gst = self._erfa_sidereal_time(available_models[model.upper()])
return Longitude(gst + longitude, u.hourangle)
if isinstance(sidereal_time.__doc__, six.string_types):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()),
'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys()))
def _erfa_sidereal_time(self, model):
"""Calculate a sidereal time using a IAU precession/nutation model."""
from ..coordinates import Longitude
erfa_function = model['function']
erfa_parameters = [getattr(getattr(self, scale)._time, jd_part)
for scale in model['scales']
for jd_part in ('jd1', 'jd2')]
sidereal_time = erfa_function(*erfa_parameters)
return Longitude(sidereal_time, u.radian).to(u.hourangle)
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply('copy', format=format)
def replicate(self, format=None, copy=False):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply('copy' if copy else 'replicate', format=format)
def _apply(self, method, *args, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = kwargs.pop('format', None)
if new_format is None:
new_format = self.format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == 'replicate':
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
tm = super(Time, self.__class__).__new__(self.__class__)
tm._time = TimeJD(jd1, jd2, self.scale, self.precision,
self.in_subfmt, self.out_subfmt, from_jd=True)
# Optional ndarray attributes.
for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location',
'precision', 'in_subfmt', 'out_subfmt'):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only a single element and the method would return a view,
# since in that case nothing would change).
if getattr(val, 'size', 1) > 1:
val = apply_method(val)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(tm.FORMATS)))
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(tm._time.jd1, tm._time.jd2,
tm._time._scale, tm.precision,
tm.in_subfmt, tm.out_subfmt,
from_jd=True)
tm._format = new_format
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
return [(indices if i == axis else np.arange(s).reshape(
(1,)*(i if keepdims or i < axis else i-1) + (s,) +
(1,)*(ndim-i-(1 if keepdims or i > axis else 2))))
for i, s in enumerate(self.shape)]
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# first get the minimum at normal precision.
jd = self.jd1 + self.jd2
approx = jd.min(axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (self.jd1 - approx) + self.jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd = self.jd1 + self.jd2
approx = jd.max(axis, keepdims=True)
dt = (self.jd1 - approx) + self.jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
jd_approx = self.jd
jd_remainder = (self - self.__class__(jd_approx, format='jd')).jd
if axis is None:
return np.lexsort((jd_remainder.ravel(), jd_approx.ravel()))
else:
return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return (self.max(axis, keepdims=keepdims) -
self.min(axis, keepdims=keepdims))
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis,
keepdims=True)]
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache['scale']
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
cache = self.cache['format']
if attr not in cache:
if attr == self.format:
tm = self
else:
tm = self.replicate(format=attr)
value = tm._shaped_like_input(tm._time.to_value(parent=tm))
cache[attr] = value
return cache[attr]
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError("Cannot convert TimeDelta with "
"undefined scale to any defined scale.")
else:
raise ScaleValueError("Cannot convert {0} with scale "
"'{1}' to scale '{2}'"
.format(self.__class__.__name__,
self.scale, attr))
else:
# Should raise AttributeError
return self.__getattribute__(attr)
@override__dir__
def __dir__(self):
result = set(self.SCALES)
result.update(self.FORMATS)
return result
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError('Attribute shape must match or be '
'broadcastable to that of Time object. '
'Typically, give either a single value or '
'one for each time.')
return val
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : ``astropy.utils.iers.IERS`` table, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. If `None`, use default version (see
``astropy.utils.iers``)
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True)
>>> status == TIME_BEFORE_IERS_RANGE
array([ True, False], dtype=bool)
"""
if iers_table is None:
from ..utils.iers import IERS
iers_table = IERS.open()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, '_delta_ut1_utc'):
from ..utils.iers import IERS_Auto
iers_table = IERS_Auto.open()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc.jd1, self_utc.jd2
scale = 'utc'
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == 'ut1':
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta)
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
val = self._match_shape(val)
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
self._delta_ut1_utc = val
del self.cache
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, '_delta_tdb_tt'):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ('tt', 'tdb'):
raise ValueError('Accessing the delta_tdb_tt attribute '
'is only possible for TT or TDB time '
'scales')
else:
jd1 = self._time.jd1
jd2 = self._time.jd2
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
from ..coordinates import EarthLocation
location = EarthLocation.from_geodetic(0., 0., 0.)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.longitude
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1, jd2, ut, lon.to(u.radian).value,
rxy.to(u.km).value, z.to(u.km).value)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
val = self._match_shape(val)
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
self._delta_tdb_tt = val
del self.cache
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
raise OperandTypeError(self, other, '-')
# Tdelta - something is dealt with in TimeDelta, so we have
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = isinstance(other, TimeDelta)
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
if other_is_delta: # T - Tdelta
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
out._set_scale(other.scale if other.scale is not None
else 'tai')
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
self_time = (self._time if self.scale in TIME_DELTA_SCALES
else self.tai._time)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(self_time.jd1, self_time.jd2, format='jd',
scale=self_time.scale)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
raise OperandTypeError(self, other, '+')
# Tdelta + something is dealt with in TimeDelta, so we have
# T + Tdelta = T
# T + T = error
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '+')
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
out._set_scale(other.scale if other.scale is not None else 'tai')
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def _time_difference(self, other, op=None):
"""If other is of same class as self, return difference in self.scale.
Otherwise, raise OperandTypeError.
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
raise OperandTypeError(self, other, op)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot compare TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return (self.jd1 - other.jd1) + (self.jd2 - other.jd2)
def __lt__(self, other):
return self._time_difference(other, '<') < 0.
def __le__(self, other):
return self._time_difference(other, '<=') <= 0.
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
try:
diff = self._time_difference(other)
except OperandTypeError:
return False
return diff == 0.
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
try:
diff = self._time_difference(other)
except OperandTypeError:
return True
return diff != 0.
def __gt__(self, other):
return self._time_difference(other, '>') > 0.
def __ge__(self, other):
return self._time_difference(other, '>=') >= 0.
def to_datetime(self, timezone=None):
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDelta(Time):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
Parameters
----------
val : numpy ndarray, list, str, number, or `~astropy.time.TimeDelta` object
Data to initialize table.
val2 : numpy ndarray, list, str, or number; optional
Data to initialize table.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
if format is None:
try:
val = val.to(u.day)
if val2 is not None:
val2 = val2.to(u.day)
except Exception:
raise ValueError('Only Quantities with Time units can '
'be used to initiate {0} instances .'
.format(self.__class__.__name__))
format = 'jd'
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
def replicate(self, *args, **kwargs):
out = super(TimeDelta, self).replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0} is not in the allowed scales {1}"
.format(repr(scale), sorted(self.SCALES)))
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1, jd2 + offset2, scale,
self.precision, self.in_subfmt,
self.out_subfmt, from_jd=True)
def __add__(self, other):
# only deal with TimeDelta + TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
return other.__add__(self)
else:
try:
other = TimeDelta(other)
except Exception:
raise OperandTypeError(self, other, '+')
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot add TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 + other._time.jd1
jd2 = self._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __sub__(self, other):
# only deal with TimeDelta - TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '-')
else:
try:
other = TimeDelta(other)
except Exception:
raise OperandTypeError(self, other, '-')
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot subtract TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 - other._time.jd1
jd2 = self._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, '*')
try: # convert to straight float if dimensionless quantity
other = other.to(1)
except Exception:
pass
try:
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
except Exception as err: # try downgrading self to a quantity
try:
return self.to(u.day) * other
except Exception:
raise err
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __div__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
return self.__truediv__(other)
def __rdiv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
return self.__rtruediv__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# cannot do __mul__(1./other) as that looses precision
try:
other = other.to(1)
except Exception:
pass
try: # convert to straight float if dimensionless quantity
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
except Exception as err: # try downgrading self to a quantity
try:
return self.to(u.day) / other
except Exception:
raise err
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
return other / self.to(u.day)
def to(self, *args, **kwargs):
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to(*args, **kwargs)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
val = np.array(val, copy=copy, subok=True)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if not (val.dtype == np.float64 or val.dtype.kind in 'OSUa'):
val = np.asanyarray(val, dtype=np.float64)
return val
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = '' if op is None else ' for {0}'.format(op)
super(OperandTypeError, self).__init__(
"Unsupported operand type(s){0}: "
"'{1}' and '{2}'".format(op_string,
left.__class__.__name__,
right.__class__.__name__))
| {
"content_hash": "5bfa4e300df40b2d0b31d69a3cf27972",
"timestamp": "",
"source": "github",
"line_count": 1726,
"max_line_length": 93,
"avg_line_length": 39.90961761297798,
"alnum_prop": 0.5627286452586958,
"repo_name": "tbabej/astropy",
"id": "d54d842f6ece686325f40e60d776790e5c3acf20",
"size": "68884",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/time/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366874"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "7610601"
},
{
"name": "Shell",
"bytes": "425"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.codegen.jaxb.jaxb_library import JaxbLibrary
from pants.base.deprecated import deprecated_module
deprecated_module('1.5.0dev0', 'Use pants.backend.codegen.jaxb instead')
JaxbLibrary = JaxbLibrary
| {
"content_hash": "6a240d0698c808e40ca0c05aa264433d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 93,
"avg_line_length": 37.1,
"alnum_prop": 0.7520215633423181,
"repo_name": "lahosken/pants",
"id": "726d8be7917c0b4b0c11ca07edf36546b08873a0",
"size": "518",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/codegen/targets/jaxb_library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "78744"
},
{
"name": "Java",
"bytes": "464137"
},
{
"name": "JavaScript",
"bytes": "30784"
},
{
"name": "Protocol Buffer",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "5622705"
},
{
"name": "Rust",
"bytes": "168857"
},
{
"name": "Scala",
"bytes": "81298"
},
{
"name": "Shell",
"bytes": "66771"
},
{
"name": "Thrift",
"bytes": "2183"
}
],
"symlink_target": ""
} |
"""
Python implementation of the io module.
"""
from __future__ import (print_function, unicode_literals)
import os
import abc
import codecs
import warnings
import errno
# Import thread instead of threading to reduce startup cost
try:
from thread import allocate_lock as Lock
except ImportError:
from dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
__metaclass__ = type
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super(IOError, self).__init__(errno, strerror)
if not isinstance(characters_written, (int, long)):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1,
encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (basestring, int, long)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, basestring):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, (int, long)):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, basestring):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, basestring):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase:
__metaclass__ = abc.ABCMeta
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise a IOError when operations they do not support are called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
self.flush()
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise an IOError if file is not seekable
"""
if not self.seekable():
raise IOError("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise an IOError if file is not readable
"""
if not self.readable():
raise IOError("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise an IOError if file is not writable
"""
if not self.writable():
raise IOError("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self):
"""Context management protocol. Returns self."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is not None and not isinstance(hint, (int, long)):
raise TypeError("integer or None expected")
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call."""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
def write(self, b):
"""Write the given buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf.extend(initial_bytes)
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("integer argument expected, got {0!r}".format(
type(n)))
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
except IOError as e:
if e.errno != EINTR:
raise
continue
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False):
if newline is not None and not isinstance(newline, basestring):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, basestring):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, basestring):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
try:
name = self.name
except AttributeError:
return "<_pyio.TextIOWrapper encoding='{0}'>".format(self.encoding)
else:
return "<_pyio.TextIOWrapper name={0!r} encoding='{1}'>".format(
name, self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
self.flush()
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, unicode):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise IOError("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
for next_byte in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise IOError("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise IOError("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise IOError("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError:
raise TypeError("an integer is required")
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def next(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value:
if not isinstance(initial_value, unicode):
initial_value = unicode(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| {
"content_hash": "6d48824b59c874c7c12f51ff70b0a499",
"timestamp": "",
"source": "github",
"line_count": 2005,
"max_line_length": 81,
"avg_line_length": 34.12369077306733,
"alnum_prop": 0.5762810956181121,
"repo_name": "azoft-dev-team/imagrium",
"id": "d5114338fad37808883881cd63658569fd3da36a",
"size": "68418",
"binary": false,
"copies": "3",
"ref": "refs/heads/win",
"path": "env/Lib/_pyio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22116"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111703"
},
{
"name": "Java",
"bytes": "448343"
},
{
"name": "Python",
"bytes": "14076342"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ruby",
"bytes": "5269"
},
{
"name": "Shell",
"bytes": "3193"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Button to digital Port 4.
button = ARD_D4 # This is the D4 pin.
buzzer = ARD_D8 # This is the D8 pin.
jetduino.pinMode(button, INPUT_PIN)
jetduino.pinMode(buzzer, OUTPUT_PIN)
print "GrovePi Basic Hardware Test."
print "Setup: Connect the button sensor to port D4. Connect a Grove LED to port D8."
print "Press the button and the buzzer will buzz!"
while True:
try:
butt_val = jetduino.digitalRead(button) # Each time we go through the loop, we read D4.
print (butt_val) # Print the value of D4.
if butt_val > 0:
jetduino.digitalWrite(buzzer, HIGH)
print ('start')
time.sleep(1)
else:
jetduino.digitalWrite(buzzer, LOW)
time.sleep(.5)
except IOError:
print ("Error")
| {
"content_hash": "1738e65f0d8ce1348610f793fa542ab6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 103,
"avg_line_length": 36.898305084745765,
"alnum_prop": 0.7629765732659624,
"repo_name": "NeuroRoboticTech/Jetduino",
"id": "6c5f804b3b5e947c187546e684130bc4cda8fe05",
"size": "2547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Software/Python/jetduino_hardware_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "37042"
},
{
"name": "C",
"bytes": "38867"
},
{
"name": "C#",
"bytes": "33014"
},
{
"name": "C++",
"bytes": "101883"
},
{
"name": "CMake",
"bytes": "3553"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "JavaScript",
"bytes": "30142"
},
{
"name": "Python",
"bytes": "568027"
},
{
"name": "Shell",
"bytes": "17661"
}
],
"symlink_target": ""
} |
import time
from pymongo import MongoClient
#from subprocess import Popen, PIPE
import FindVid as fv
from sys import argv, exit
import hashlib
import os
import subprocess, shlex
def hashFile(filename, blocksize=65536):
hash = hashlib.sha1()
# File not found error is thrown up(wards)
with open(filename, 'rb') as f:
buffer = f.read(blocksize)
while len(buffer) > 0:
hash.update(buffer)
buffer = f.read(blocksize)
return str(hash.hexdigest())
# returns the configuration dictionary
def config(db="findvid", collection="videos", config={"_id": "config"}):
client = MongoClient(port=8099)
db = client[db]
videos = db[collection]
config = videos.find_one(config)
videopath = config["abspath"] + config["videopath"]
thumbnailpath = config["abspath"] + config["thumbnailpath"]
return (videos, videopath, thumbnailpath)
def transcode_video(srcVideo, dstVideo, quiet=False, forceTranscode=True):
quietText = ""
if quiet:
quietText = " -loglevel quiet"
opt = ""
if forceTranscode:
opt = "-y"
else:
opt = "-n"
cmd = "ffmpeg " + opt + " -i " + srcVideo + " -c:v libx264" + quietText + " -preset veryslow -threads 1 " + dstVideo
if not quiet:
print (cmd)
#primt cmd
#cmd = shlex.split(cmd)
print cmd
return subprocess.call(cmd,shell=True)
#Index the given videofile (rel. path), create thumbnails in designated folder or given alternative
def index_video(database, collection, fileHash, videofile, searchable=True, uploaded=False, thumbpath = None):
videos, videopath, thumbnailpath = config(db=database, collection=collection)
#retrieve absolute path
vidpath = os.path.join(videopath, videofile);
#Check if this exact video exists already
video = videos.find_one({'_id': fileHash})
if (video):
if video['removed']:
videos.update({'_id': fileHash}, {'$set': {'removed': False}})
videos.update({'_id': fileHash}, {'$set': {'searchable': searchable}})
return fileHash
else:
return None
print "Get cuts of " , vidpath
#Use C-Lib to get cuts in the video
cuts = fv.getCuts(vidpath)
#Heuristic approach: Suitable keyframe between 2 cuts
keyframes = [(cuts[i-1] + cuts[i])/2 for i in range(1, len(cuts))]
#extract features from videofile given the keyframes array, use the middle keyframe as videothumb and save to default folder
if (thumbpath == None):
thumbpath = thumbnailpath # use default
features = fv.getFeatures(vidpath, fileHash, keyframes[len(keyframes)/2], keyframes, thumbpath)
scenes = [] # features of scenes as list
for i, c in enumerate(cuts[1:]):
scene = {} # scene document
scene["_id"] = i
scene["tinyimg"] = features[i][0]
scene["edges"] = features[i][1]
scene["colorhist"] = features[i][2]
scenes.append(scene)
video = {}
#General video information
video["_id"] = fileHash
video["filename"] = videofile
video["uploadtime"] = time.time()
fps = fv.getFramerate(vidpath)
video["fps"] = fps
video["removed"] = False
video["cuts"] = cuts
video["scenes"] = scenes
video["upload"] = uploaded
video["searchable"] = searchable
#The momentous step of inserting into the database
#This is done on a single document(Or is it?) and therefore atomic, according to the documentation
#therefore, user induced process abortion should not leave anything to be cleaned up
videos.insert(video)
return fileHash
if __name__ == "__main__":
if len(argv) < 2:
print "ERROR: file missing!" + "\n"
exit(1)
#Get PyMongo client
client = MongoClient(port=8099)
db = client["findvid"]
videos = db["videos"]
videofile = argv[1]
index_video(videos, videofile)
| {
"content_hash": "7dd9b3ebbd52fb0ce57a2fecdae3efaa",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 125,
"avg_line_length": 28.806451612903224,
"alnum_prop": 0.7015677491601344,
"repo_name": "findvid/main",
"id": "00358b9493cc1d6a492778ac8f9ce7a79372e012",
"size": "3594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/src/indexing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "99227"
},
{
"name": "C++",
"bytes": "1215880"
},
{
"name": "CSS",
"bytes": "21924"
},
{
"name": "JavaScript",
"bytes": "13536"
},
{
"name": "Makefile",
"bytes": "7042"
},
{
"name": "Python",
"bytes": "68421"
},
{
"name": "Shell",
"bytes": "244"
}
],
"symlink_target": ""
} |
"""
Created on Aug 23, 2015
@author: jrm
"""
import enaml
from enaml.qt.qt_application import QtApplication
import enamlx
if __name__ == "__main__":
enamlx.install()
with enaml.imports():
from tree_view import Main
app = QtApplication()
view = Main()
view.show()
app.start()
| {
"content_hash": "8a958a6d4d10f16fe30ef5ea83073c4e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 16.36842105263158,
"alnum_prop": 0.6237942122186495,
"repo_name": "frmdstryr/enamlx",
"id": "1a1cc1bca5b11f75456907c6076d9cd035612545",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tree_view/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "306"
},
{
"name": "Python",
"bytes": "169953"
}
],
"symlink_target": ""
} |
from .trace import (
GetTraceRequest,
ListTracesRequest,
ListTracesResponse,
PatchTracesRequest,
Trace,
Traces,
TraceSpan,
)
__all__ = (
"GetTraceRequest",
"ListTracesRequest",
"ListTracesResponse",
"PatchTracesRequest",
"Trace",
"Traces",
"TraceSpan",
)
| {
"content_hash": "0f72a5cd352b32c0688b30b796fe204f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 25,
"avg_line_length": 16.42105263157895,
"alnum_prop": 0.6217948717948718,
"repo_name": "googleapis/python-trace",
"id": "ea3865326a7f45f3143bf91eab99494032d70604",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/trace_v1/types/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "404724"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
"""Django settings for tests."""
import os
import django
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
SECRET_KEY = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
INTERNAL_IPS = ['127.0.0.1']
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
USE_TZ = True
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
INTERNAL_APPS = [
'tickets',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
MEDIA_URL = '/media/' # Avoids https://code.djangoproject.com/ticket/21451
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
STATIC_ROOT = os.path.join(BASE_DIR, 'tests', 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'tests', 'additional_static'),
("prefix", os.path.join(BASE_DIR, 'tests', 'additional_static')),
]
# Cache and database
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
if django.VERSION[:2] < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
| {
"content_hash": "ab6dff87b5672e572613ee17338f5c94",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 21.84,
"alnum_prop": 0.6843711843711844,
"repo_name": "Christophe31/django-tickets",
"id": "5fc40bcbbfe369a87cb1c2ea0298dd2d7c07b8f3",
"size": "1638",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tickets/tests/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1233"
},
{
"name": "Makefile",
"bytes": "762"
},
{
"name": "Python",
"bytes": "13220"
}
],
"symlink_target": ""
} |
import win32api, win32con
import actionFunctions
#An Example GameAI with sample values
#All Values should be replaced and defined for a real game
"""
All coordinates assume a screen resolution of 1280x1024, and Chrome
maximized with the Bookmarks Toolbar enabled.
Down key has been hit 4 times to center play area in browser.
"""
x_pad = 156
y_pad = 100
x_size = 800
y_size = 800
#The frequency at which gameIO should capture the screen for processing
#Represented in seconds
sampleRate = .1
#Failure state, defined as pixel location(x,y) and pixel color
#In this example, if pixel at 908, 400 has an r value of 65, a g value of 100, and a b value of 137
failure = (345, 400, 65, 100, 137)
#A Tuple containing the bounds of our game
bounds = (x_pad, y_pad, x_pad+x_size, y_pad+y_size)
#performs a set of actions to start the game from the main menu
def start():
actionFunctions.leftClick()
#Performs a set of functions to reset the game from a failure state
#Should be called from GameIO when gameIO detects a failure
def reset():
actionFunctions.leftClick()
#Determines and performs action should take given input image
#Not doing anything should always be an option!
def chooseAction(image):
actionFunctions.doNothing()
| {
"content_hash": "eff1623a989d14609e125e02851b3880",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 99,
"avg_line_length": 27.844444444444445,
"alnum_prop": 0.7525937749401437,
"repo_name": "cmsc471-swagbag/ScoreAttackBot",
"id": "f4bf5e93fd16d1c0345c92078fcfc0a58bacc3dd",
"size": "1253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exampleAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3463"
}
],
"symlink_target": ""
} |
import json
import requests
class KeyardClient(object):
def __init__(self, keyard_host):
self.keyard_host = keyard_host + "/keyard"
def _make_request(self, method, data):
requests_method = getattr(requests, method.lower())
kwargs = {
'headers': {'content-type': 'application/json'}
}
if method == "GET":
kwargs.update({'params': data})
else:
kwargs.update({'data': json.dumps(data)})
return requests_method(self.keyard_host, **kwargs)
def register(self, name, version, location):
method = "POST"
data = {'service_name': name, 'version': version, 'location': location}
body = self._make_request(method, data)
if body.status_code == 200:
return True
else:
raise Exception(body.text)
def unregister(self, name, version, location):
method = "DELETE"
data = {'service_name': name, 'version': version, 'location': location}
body = self._make_request(method, data)
if body.status_code == 200:
return True
else:
raise Exception(body.text)
def health_check(self, name, version, location):
method = "PUT"
data = {'service_name': name, 'version': version, 'location': location}
body = self._make_request(method, data)
if body.status_code == 200:
return True
else:
raise Exception(body.text)
def get_service(self, name, version=None):
method = "GET"
data = {'service_name': name}
if version:
data.update({'version': version})
body = self._make_request(method, data)
if body.status_code == 200:
return json.loads(body.text)
else:
raise Exception(body.text)
| {
"content_hash": "7468d1a7ebfd101b3fb198226b28d370",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 32.35087719298246,
"alnum_prop": 0.5629067245119306,
"repo_name": "rzanluchi/keyard-client",
"id": "6596d61fd26867051fbba55235ee9c16ff2c1a6c",
"size": "1868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keyard_client/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8282"
}
],
"symlink_target": ""
} |
import os, sys
#===================================================================================================
# ToStr
#===================================================================================================
def ToStr(o):
if hasattr(o, 'ToStr'):
return o.ToStr()
return str(o)
def Contains(sub, s):
return s.find(sub) >= 0
def NotContains(sub, s):
return s.find(sub) < 0
#===================================================================================================
# Container
#===================================================================================================
class Container:
def __init__(self):
self.contents = {}
def get(self, s):
return self.contents.setdefault(s, Class(s))
def indent(self, s):
lines = []
for line in s.splitlines():
lines.append(' '+line)
return '\n'.join(lines)
#===================================================================================================
# Class
#===================================================================================================
class Class(Container):
def __init__(self, name):
self.name = name
Container.__init__(self)
def ToStr(self):
str_contents = []
for _key, content in sorted(self.contents.iteritems()):
str_contents.append(self.indent(ToStr(content)))
if not str_contents:
str_contents.append(self.indent('pass'))
return \
'''
class %s:
%s
''' % (self.name, '\n'.join(str_contents))
def __str__(self):
return self.ToStr()
#===================================================================================================
# Method
#===================================================================================================
class Method(Container):
def __init__(self, name, signature):
self.name = name
self.signature = signature
Container.__init__(self)
def ToStr(self):
signature = self.signature
ret = ''
has_self = False
docstring = ''
if signature.startswith('('):
signature = signature[1:]
if signature.startswith('self, '):
signature = signature[len('self, '):]
has_self = True
if Contains('->', signature):
signature, ret = signature.split('->')
signature = signature.strip()
ret = ret.strip()
if NotContains(' ', signature):
if Contains('()', signature):
signature= 'param='+signature
elif Contains('.', signature):
if NotContains('=', signature):
signature = 'param='+signature
else:
split = signature.split('=')
if Contains('.', split[0]):
split[0] = split[0].replace('.', '_')
signature = '='.join(split)
signature_rep = '('+signature
else:
signature_rep = ''
splitted = signature.split(', ')
size = len(splitted)
i = -1
for s in splitted:
i += 1
found_after_equals = None
if Contains('=', s):
split_equals = s.split('=')
s = split_equals[0]
found_after_equals = '='+'='.join(split_equals[1:])
parts = s.split(' ')
type, s = ' '.join(parts[:-1]), parts[-1]
if not s.strip():
s = 'param'
if Contains('.', s):
if found_after_equals is None:
s = 'param='+s
else:
split = s.split('=')
if Contains('.', split[0]):
split[0] = split[0].replace('.', '_')
s = '='.join(split)
if found_after_equals is not None:
s += found_after_equals
s = s.strip()
if found_after_equals is None:
if s.endswith('()'):
s=s[:-2]
if s.endswith('()):'):
s=s[:-4]+'):'
elif s.endswith('())'):
s=s[:-3]+')'
if i == size-1:
if not s.endswith(')'):
s += ')'
if type.endswith(')'):
type = type[:-1]
if type:
#Add to docstring
param = s.strip()
if Contains('=', param):
param = param.split('=')[0].strip()
if param.endswith(':'):
param = param[:-1]
if param.endswith(')'):
param = param[:-1]
docstring+= '\n @type %s: %s' % (param, type)
if signature_rep and not signature_rep.strip().endswith(','):
signature_rep += ', '
elif signature_rep.endswith(','):
signature_rep += ' '
signature_rep += s
if has_self:
signature_rep = '(self, '+signature_rep
else:
signature_rep = '('+signature_rep
if ret:
if Contains(',', ret):
splitted = ret.split(',')
new = ''
for s in splitted:
if new:
new += ', '
new += s.strip().replace(' ', '_')
ret = new
elif Contains(' ', ret.strip()):
ret = ret.strip().replace(' ', '_')
string_rep = \
"""
def %s%s:
'''
%s
'''
""" % (self.name, signature_rep, docstring)
if ret:
string_rep += 'return '+ret
val = string_rep.replace('from,', 'from_,').\
replace('from=', 'from_=').\
replace('exec(', 'exec_(').\
replace('in)', 'in_)').\
replace('in,', 'in_,').\
replace('...', '___').\
replace('(1)', '(one)')
return val
def __str__(self):
return self.ToStr()
#===================================================================================================
# Attribute
#===================================================================================================
class Attribute(Container):
def __init__(self, name, type):
self.name = name
self.type = type
Container.__init__(self)
def ToStr(self):
return '%s = %s' % (self.name, self.type)
def __str__(self):
return self.ToStr()
#===================================================================================================
# Module
#===================================================================================================
class Module(Container):
def __init__(self):
self.contents = {}
def AddString(self, before, after):
splitted = before.split('.')
prev = self
for part in splitted[:-1]:
prev = prev.get(part)
if after == '10': #integer constant
prev.contents.setdefault(splitted[-1], Attribute(splitted[-1], 'int'))
elif after.startswith('4('): #4 means method
after = after[1:]
prev.contents.setdefault(splitted[-1], Method(splitted[-1], after))
elif after.startswith('1('): #1 means constructor
after = after[1:]
if not after.startswith('(self'):
return #Constructor method (ignore as we only want the __init__)
#setdefault because we don't want to override it if one declaration is already there
prev.contents.setdefault(splitted[-1], Method(splitted[-1], after))
elif after.startswith('7'): #1 means constructor
pass #occurrence: PyQt4.QtCore.QObject.staticMetaObject?7
else:
raise AssertionError('Not treated: '+before+after)
def ToStr(self):
ret = []
for _key, content in sorted(self.contents.iteritems()):
ret.append(ToStr(content))
return '\n'.join(ret)
def __str__(self):
return self.ToStr()
#===================================================================================================
# Convert
#===================================================================================================
def Convert(api_file, parts_for_module, cancel_monitor, lines=None, output_stream=None):
cancel_monitor.setTaskName('Opening: '+api_file)
if lines is None:
f = open(api_file, 'r')
try:
lines = f.readlines()
finally:
f.close()
directory = os.path.dirname(api_file)
cancel_monitor.setTaskName('Parsing: '+api_file)
if cancel_monitor.isCanceled():
return
cancel_monitor.worked(1)
found = {}
for line in lines:
contents = line.split('.')
if len(contents) >= parts_for_module:
found['.'.join(contents[:2])] = ''
for handle_module in sorted(found.iterkeys()):
cancel_monitor.setTaskName('Handling: '+handle_module)
cancel_monitor.worked(1)
if cancel_monitor.isCanceled():
return
module = Module()
for line in lines:
if line.startswith(handle_module+'.'):
line = line[len(handle_module+'.'):].strip()
line = line.replace('::', '.')
before, after = line.split('?')
module.AddString(before, after)
final_contents = '''"""Automatically generated file from %s (QScintilla API file)
Note that the generated code must be Python 3.0 compatible to be used in a .pypredef file.
Imports should not be used (if a class is used from another module,
it should be completely redeclared in a .pypredef file)
The name of the file should be a direct representation of the module name
(i.e.: a PyQt4.QtCore.pypredef file represents a PyQt4.QtCore module)
"""
'''
target = handle_module+'.pypredef'
print('Writing contents for: %s to: %s' % (handle_module, target))
final_contents += ToStr(module)
if output_stream is None:
f = open(os.path.join(directory, target), 'w')
try:
f.write(final_contents.replace('None =', 'None_ =').replace('.None', '.None_'))
finally:
f.close()
else:
output_stream.write(final_contents)
#===================================================================================================
# CancelMonitor
#===================================================================================================
class CancelMonitor:
def isCanceled(self): #Match IProgressMonitor.isCanceled.
return 0 #Never cancelled
def worked(self, val):
pass
def setTaskName(self, name):
pass
#===================================================================================================
# main
#===================================================================================================
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) < 2:
print(
'Expected the first parameter to be the QScintilla .api file\n'
'and the second parameter is the number of strings which \n'
'define a module\n\n')
print('E.g.: python convert_api_to_pypredef.py PyQt4.api 2')
else:
api_file = args[0]
assert os.path.exists(api_file), 'File: %s does not exist.' % (api_file,)
parts_for_module = int(args[1])
assert parts_for_module >= 1, 'At least the 1st part must define a module.'
cancel_monitor = CancelMonitor()
Convert(api_file, parts_for_module, cancel_monitor)
else:
try:
api_file
parts_for_module
except:
pass
else:
try:
cancel_monitor
except:
cancel_monitor = CancelMonitor()
#Available in the namespace (jython scripting calling it)
assert os.path.exists(api_file), 'File: %s does not exist.' % (api_file,)
parts_for_module = int(parts_for_module)
assert parts_for_module >= 1, 'At least the 1st part must define a module.'
Convert(api_file, parts_for_module, cancel_monitor)
print 'SUCCESS'
| {
"content_hash": "e86b00056fe78c25a6f7982db0ece9a7",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 100,
"avg_line_length": 33.966921119592875,
"alnum_prop": 0.39718330961120685,
"repo_name": "ArcherSys/ArcherSys",
"id": "cff6ed1175aedbd0d75a1456593b313ac1242b62",
"size": "13349",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "eclipse/plugins/org.python.pydev.jython_4.5.5.201603221110/jysrc/convert_api_to_pypredef.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
""" Managed variant
For potentially causative variants that are not yet in ClinVar
and have yet not been marked causative in any existing case.
"""
from datetime import datetime
from scout.utils.md5 import generate_md5_key
class ManagedVariant(dict):
"""
# required primary fields
chromosome=str, # required
position=int, # required
end=int, # required
reference=str, # required
alternative=str, # required
build=str, # required, ["37","38"], default "37"
date=datetime.datetime
# required derived fields
# display name is variant_id (no md5) chrom_pos_ref_alt (simple_id)
display_name=str, # required
#optional fields
# maintainer user_id list
maintainer=list(user_id), # optional
institute=institute_id, # optional
# optional fields foreseen for future use
category=str, # choices=('sv', 'snv', 'str', 'cancer', 'cancer_sv')
sub_category=str, # choices=('snv', 'indel', 'del', 'ins', 'dup', 'inv', 'cnv', 'bnd', 'str')
description=str,
"""
def __init__(
self,
chromosome,
position,
end,
reference,
alternative,
institute,
maintainer=[],
build="37",
date=None,
category="snv",
sub_category="snv",
description=None,
):
super(ManagedVariant, self).__init__()
self["chromosome"] = str(chromosome)
self["position"] = position
self["end"] = end
self["reference"] = reference
self["alternative"] = alternative
self["build"] = build
self["managed_variant_id"] = "_".join(
[
str(part)
for part in (
chromosome,
position,
reference,
alternative,
category,
sub_category,
build,
)
]
)
self["display_id"] = "_".join(
[str(part) for part in (chromosome, position, reference, alternative)]
)
self["variant_id"] = generate_md5_key(
[str(part) for part in (chromosome, position, reference, alternative, "clinical")]
)
self["date"] = date or datetime.now()
self["institute"] = institute or None
self["maintainer"] = maintainer or []
self["category"] = category
self["sub_category"] = sub_category
self["description"] = description
| {
"content_hash": "21c50d1299925d4883bb751169f3f035",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 98,
"avg_line_length": 29.03448275862069,
"alnum_prop": 0.5435471100554236,
"repo_name": "Clinical-Genomics/scout",
"id": "d9277ea1f79de8f391fc962fee015ed13aa1cce4",
"size": "2526",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scout/models/managed_variant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12516"
},
{
"name": "Dockerfile",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "911931"
},
{
"name": "JavaScript",
"bytes": "32692"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "2419990"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .. import QROOT
from ..decorators import snake_case_methods
from .base import Plottable
from ..base import NameOnlyObject
import six
__all__ = [
'F1',
'F2',
'F3',
]
class BaseFunction(object):
class ParProxy(object):
def __init__(self, fcn, idx):
self.fcn_ = fcn
self.idx_ = idx
@property
def index(self):
return self.idx_
@property
def name(self):
return self.fcn_.GetParName(self.idx_)
@name.setter
def name(self, val):
return self.fcn_.SetParName(self.idx_, val)
@property
def value(self):
return self.fcn_.GetParameter(self.idx_)
@value.setter
def value(self, val):
self.fcn_.SetParameter(self.idx_, val)
@property
def error(self):
return self.fcn_.GetParError(self.idx_)
@error.setter
def error(self, val):
return self.fcn_.SetParError(self.idx_, val)
@property
def limits(self):
m = QROOT.Double()
M = QROOT.Double()
self.fcn_.GetParLimits(self.idx_, m, M)
return float(m), float(M)
@limits.setter
def limits(self, val):
if not hasattr(val, '__len__') and len(val) != 2:
raise RuntimeError('Function limits must be a tuple size 2')
self.fcn_.SetParLimits(self.idx_, val[0], val[1])
def __getitem__(self, value):
if isinstance(value, six.string_types):
idx = self.GetParNumber(value)
elif isinstance(value, six.integer_types):
idx = value
else:
raise ValueError('Function index must be a integer or a string')
return BaseFunction.ParProxy(self, idx)
@snake_case_methods
class F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1):
_ROOT = QROOT.TF1
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F1, self).__init__(*args, name=name)
self._post_init(**kwargs)
@snake_case_methods
class F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2):
_ROOT = QROOT.TF2
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F2, self).__init__(*args, name=name)
self._post_init(**kwargs)
@snake_case_methods
class F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3):
_ROOT = QROOT.TF3
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F3, self).__init__(*args, name=name)
self._post_init(**kwargs)
| {
"content_hash": "ed99d5be318a766be1b4f86769ee1915",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 76,
"avg_line_length": 26.939393939393938,
"alnum_prop": 0.567679040119985,
"repo_name": "rootpy/rootpy",
"id": "87478457e9d7f6474522534745cb909e6f9d643d",
"size": "2667",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rootpy/plotting/func.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "109"
},
{
"name": "Makefile",
"bytes": "2778"
},
{
"name": "Python",
"bytes": "861605"
},
{
"name": "Shell",
"bytes": "3089"
}
],
"symlink_target": ""
} |
"""The tests for generic camera component."""
import unittest
from unittest import mock
import requests_mock
from werkzeug.test import EnvironBuilder
from homeassistant.bootstrap import setup_component
from homeassistant.components.http import request_class
from tests.common import get_test_home_assistant
class TestGenericCamera(unittest.TestCase):
"""Test the generic camera platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.wsgi = mock.MagicMock()
self.hass.config.components.append('http')
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_fetching_url(self, m):
"""Test that it fetches the given url."""
self.hass.wsgi = mock.MagicMock()
m.get('http://example.com', text='hello world')
assert setup_component(self.hass, 'camera', {
'camera': {
'name': 'config_test',
'platform': 'generic',
'still_image_url': 'http://example.com',
'username': 'user',
'password': 'pass'
}})
image_view = self.hass.wsgi.mock_calls[0][1][0]
builder = EnvironBuilder(method='GET')
Request = request_class()
request = Request(builder.get_environ())
request.authenticated = True
resp = image_view.get(request, 'camera.config_test')
assert m.call_count == 1
assert resp.status_code == 200, resp.response
assert resp.response[0].decode('utf-8') == 'hello world'
image_view.get(request, 'camera.config_test')
assert m.call_count == 2
@requests_mock.Mocker()
def test_limit_refetch(self, m):
"""Test that it fetches the given url."""
self.hass.wsgi = mock.MagicMock()
from requests.exceptions import Timeout
m.get('http://example.com/5a', text='hello world')
m.get('http://example.com/10a', text='hello world')
m.get('http://example.com/15a', text='hello planet')
m.get('http://example.com/20a', status_code=404)
assert setup_component(self.hass, 'camera', {
'camera': {
'name': 'config_test',
'platform': 'generic',
'still_image_url':
'http://example.com/{{ states.sensor.temp.state + "a" }}',
'limit_refetch_to_url_change': True,
}})
image_view = self.hass.wsgi.mock_calls[0][1][0]
builder = EnvironBuilder(method='GET')
Request = request_class()
request = Request(builder.get_environ())
request.authenticated = True
self.hass.states.set('sensor.temp', '5')
with mock.patch('requests.get', side_effect=Timeout()):
resp = image_view.get(request, 'camera.config_test')
assert m.call_count == 0
assert resp.status_code == 500, resp.response
self.hass.states.set('sensor.temp', '10')
resp = image_view.get(request, 'camera.config_test')
assert m.call_count == 1
assert resp.status_code == 200, resp.response
assert resp.response[0].decode('utf-8') == 'hello world'
resp = image_view.get(request, 'camera.config_test')
assert m.call_count == 1
assert resp.status_code == 200, resp.response
assert resp.response[0].decode('utf-8') == 'hello world'
self.hass.states.set('sensor.temp', '15')
# Url change = fetch new image
resp = image_view.get(request, 'camera.config_test')
assert m.call_count == 2
assert resp.status_code == 200, resp.response
assert resp.response[0].decode('utf-8') == 'hello planet'
# Cause a template render error
self.hass.states.remove('sensor.temp')
resp = image_view.get(request, 'camera.config_test')
assert m.call_count == 2
assert resp.status_code == 200, resp.response
assert resp.response[0].decode('utf-8') == 'hello planet'
| {
"content_hash": "0385f8299597f44c3ea3fcf3bf83b900",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 74,
"avg_line_length": 35.87826086956522,
"alnum_prop": 0.5935530780416869,
"repo_name": "leoc/home-assistant",
"id": "df80b48e36bc8c6c07bc1ad8bd77776735f6b6cf",
"size": "4126",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/camera/test_generic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1366220"
},
{
"name": "Python",
"bytes": "3636900"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
"""Script to create demonstration sets from our trained policies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top,g-bad-import-order
import platform
if int(platform.python_version_tuple()[0]) < 3:
import cPickle as pickle
else:
import _pickle as pickle
import os
import random
from absl import app
from absl import flags
from absl import logging
from common import Actor
import gym
import numpy as np
from replay_buffer import ReplayBuffer
import tensorflow.compat.v1 as tf
from utils import do_rollout
from tensorflow.contrib.eager.python import tfe as contrib_eager_python_tfe
# pylint: enable=g-import-not-at-top,g-bad-import-order
FLAGS = flags.FLAGS
flags.DEFINE_float('exploration_noise', 0.1,
'Scale of noise used for exploration.')
flags.DEFINE_integer('random_actions', int(1e4),
'Number of random actions to sample to replay buffer '
'before sampling policy actions.')
flags.DEFINE_integer('training_steps', int(1e6), 'Number of training steps.')
flags.DEFINE_string('env', 'Hopper-v1',
'Environment for training/evaluation.')
flags.DEFINE_string('expert_dir', '', 'Directory to load the expert model.')
flags.DEFINE_integer('num_expert_trajectories', 100,
'Number of trajectories taken from the expert.')
flags.DEFINE_string('save_dir', '', 'Directory to save models.')
flags.DEFINE_integer('seed', 42, 'Fixed random seed for training.')
flags.DEFINE_boolean('use_gpu', False,
'Directory to write TensorBoard summaries.')
flags.DEFINE_string('master', 'local', 'Location of the session.')
flags.DEFINE_integer('ps_tasks', 0, 'Number of Parameter Server tasks.')
flags.DEFINE_integer('task_id', 0, 'Id of the current TF task.')
def main(_):
"""Run td3/ddpg training."""
contrib_eager_python_tfe.enable_eager_execution()
if FLAGS.use_gpu:
tf.device('/device:GPU:0').__enter__()
if FLAGS.expert_dir.find(FLAGS.env) == -1:
raise ValueError('Expert directory must contain the environment name')
tf.set_random_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
env = gym.make(FLAGS.env)
env.seed(FLAGS.seed)
obs_shape = env.observation_space.shape
act_shape = env.action_space.shape
expert_replay_buffer_var = contrib_eager_python_tfe.Variable(
'', name='expert_replay_buffer')
saver = contrib_eager_python_tfe.Saver([expert_replay_buffer_var])
tf.gfile.MakeDirs(FLAGS.save_dir)
with tf.variable_scope('actor'):
actor = Actor(obs_shape[0], act_shape[0])
expert_saver = contrib_eager_python_tfe.Saver(actor.variables)
best_checkpoint = None
best_reward = float('-inf')
checkpoint_state = tf.train.get_checkpoint_state(FLAGS.expert_dir)
for checkpoint in checkpoint_state.all_model_checkpoint_paths:
expert_saver.restore(checkpoint)
expert_reward, _ = do_rollout(
env, actor, replay_buffer=None, noise_scale=0.0, num_trajectories=10)
if expert_reward > best_reward:
best_reward = expert_reward
best_checkpoint = checkpoint
expert_saver.restore(best_checkpoint)
expert_replay_buffer = ReplayBuffer()
expert_reward, _ = do_rollout(
env,
actor,
replay_buffer=expert_replay_buffer,
noise_scale=0.0,
num_trajectories=FLAGS.num_expert_trajectories)
logging.info('Expert reward %f', expert_reward)
print('Expert reward {}'.format(expert_reward))
expert_replay_buffer_var.assign(pickle.dumps(expert_replay_buffer))
saver.save(os.path.join(FLAGS.save_dir, 'expert_replay_buffer'))
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "b17cefa30aeecabdd16f5349400246cb",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 77,
"avg_line_length": 32.95575221238938,
"alnum_prop": 0.7027389903329753,
"repo_name": "google-research/google-research",
"id": "209d2714798b3d22b297f957fdaaeef707fdf647",
"size": "4332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dac/create_demos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import mpi4py.MPI as mpi
master = mpi.Comm.Get_parent()
intra = master.Merge()
print('ouvrier: %d/%d' % (intra.Get_rank(), intra.Get_size()))
intra.Disconnect()
master.Disconnect()
| {
"content_hash": "c49e027a138de590f875ed66c291e71b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 12.666666666666666,
"alnum_prop": 0.6684210526315789,
"repo_name": "rboman/progs",
"id": "d34e253b0cfe1cf9ed33c5ccbd3168ba83f552b5",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/mpi/test08_slave.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "15571"
},
{
"name": "C",
"bytes": "166004"
},
{
"name": "C#",
"bytes": "2021"
},
{
"name": "C++",
"bytes": "1063256"
},
{
"name": "CMake",
"bytes": "211806"
},
{
"name": "Eiffel",
"bytes": "5484041"
},
{
"name": "Fortran",
"bytes": "576316"
},
{
"name": "GLSL",
"bytes": "3366"
},
{
"name": "HTML",
"bytes": "7199"
},
{
"name": "Java",
"bytes": "21330"
},
{
"name": "JavaScript",
"bytes": "28"
},
{
"name": "Julia",
"bytes": "1730"
},
{
"name": "Lua",
"bytes": "10474"
},
{
"name": "M",
"bytes": "143"
},
{
"name": "MATLAB",
"bytes": "7915698"
},
{
"name": "Makefile",
"bytes": "251"
},
{
"name": "Objective-C++",
"bytes": "183"
},
{
"name": "PHP",
"bytes": "10089"
},
{
"name": "PostScript",
"bytes": "450068"
},
{
"name": "Processing",
"bytes": "2358"
},
{
"name": "Python",
"bytes": "1107870"
},
{
"name": "QMake",
"bytes": "3608"
},
{
"name": "SWIG",
"bytes": "14104"
},
{
"name": "Shell",
"bytes": "52373"
},
{
"name": "TeX",
"bytes": "166564"
}
],
"symlink_target": ""
} |
"""Environment variables, to track the state of libpb and the ports."""
from __future__ import absolute_import
import os
__all__ = [
"CPUS", "CONFIG", "DEPEND", "MODE", "PKG_MGMT", "STAGE", "TARGET",
"env", "master", "flags",
]
CPUS = os.sysconf("SC_NPROCESSORS_ONLN")
PORTSDIR = "/usr/ports"
PKG_CACHEDIR = "/var/cache/pkg"
env = {}
master = {
"PORTSDIR" : PORTSDIR, # Ports directory
"PKG_CACHEDIR" : PKG_CACHEDIR # Local cache of remote repositories
}
###############################################################################
# LIBPB STATE FLAGS
###############################################################################
# buildstatus - The minimum install stage required before a port will be build.
# This impacts when a dependency is considered resolved.
#
# chroot - The chroot directory to use. If blank then the current root
# (i.e. /) is used. A mixture of `chroot' and direct file inspection is
# used when an actual chroot is specified.
#
# config - The criteria required before prompting the user with configuring a
# port. The currently supported options are:
# none - never prompt (use the currently set options)
# changed - only prompt if the options have changed
# newer - only prompt if the port is newer than when the port
# was last configured
# always - always prompt
#
# debug - Collect and display extra debugging information about when a slot
# was connected and when a signal was emitted. Results in slower
# performance and higher memory usage.
#
# fetch_only - Only fetch a port's distfiles.
#
# log_dir - Directory where the log files, of the port build, and for
# portbuilder, are stored.
#
# log_file - The log file for portbuilder
#
# method - The methods used to resolve a dependency. Multiple methods may be
# specified in a sequence but a method may only be used once. Currently
# supported methods are:
# build - build the dependency from a port
# package - install the dependency from the local package
# repository (${PKGREPOSITORY})
# repo - install the dependency from a repository
#
# mode - The current mode of operation. The currently supported modes are:
# install - act when all port's direct dependencies are resolved
# recursive - act when all port's direct and indirect dependencies
# are resolved
# clean - only cleanup of ports are allowed (used for early
# program termination)
#
# no_op - Do not do anything (and behave as if the command was successful).
#
# no_op_print - When no_op is True, print the commands that would have been
# executed.
#
# pkg_mgmt - The package management tools used. The currently supported tools
# are:
# pkg - The package tools shipped with FreeBSD base
# pkgng - The next generation package tools shipped with ports
#
# target - The dependency targets when building a port required by a dependant.
# The currently supported targets are:
# install - install the port
# package - package the port
# clean - clean the port, may be specified before and/or
# after the install/package target indicating that the
# port should cleaned before or after, respectively.
CONFIG = ("none", "changed", "newer", "all")
METHOD = ("build", "package", "repo")
MODE = ("install", "recursive", "clean")
PKG_MGMT = ("pkg", "pkgng")
STAGE = (0, 1, 2, 3)
TARGET = ("clean", "install", "package")
flags = {
"buildstatus" : 0, # The minimum level for build
"chroot" : "", # Chroot directory of system
"config" : "changed", # Configure ports based on criteria
"debug" : True, # Print extra debug messages
"fetch_only" : False, # Only fetch ports
"log_dir" : "/tmp/portbuilder", # Directory for logging information
"log_file" : "portbuilder", # General log file
"method" : ["build"], # Resolve dependencies methods
"mode" : "install", # Mode of operation
"no_op" : False, # Do nothing
"no_op_print" : False, # Print commands instead of execution
"pkg_mgmt" : "pkg", # The package system used ('pkg(ng)?')
"target" : ["install", "clean"] # Dependency target (aka DEPENDS_TARGET)
}
| {
"content_hash": "8e84c2ef088a9cf376b116171e4dd5a1",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 80,
"avg_line_length": 45.05769230769231,
"alnum_prop": 0.5840802390098164,
"repo_name": "DragonSA/portbuilder",
"id": "cab36ff4982c0ad9f8c41af60e920a22f28a09b5",
"size": "4686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libpb/env.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "146688"
},
{
"name": "Shell",
"bytes": "971"
}
],
"symlink_target": ""
} |
"""
Tts API
Description # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from telestream_cloud_tts.api_client import ApiClient
from telestream_cloud_tts.exceptions import (
ApiTypeError,
ApiValueError
)
class TtsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_corpus(self, project_id, name, body, **kwargs): # noqa: E501
"""Creates a new Corpus # noqa: E501
Creates a new Corpus # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_corpus(project_id, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str name: Corpus name (required)
:param str body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_corpus_with_http_info(project_id, name, body, **kwargs) # noqa: E501
def create_corpus_with_http_info(self, project_id, name, body, **kwargs): # noqa: E501
"""Creates a new Corpus # noqa: E501
Creates a new Corpus # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_corpus_with_http_info(project_id, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str name: Corpus name (required)
:param str body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'name', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_corpus" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `create_corpus`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `create_corpus`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_corpus`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['text/plain']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/corpora/{name}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_job(self, project_id, job, **kwargs): # noqa: E501
"""Creates a new Job # noqa: E501
Creates a new Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_job(project_id, job, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param Job job: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_job_with_http_info(project_id, job, **kwargs) # noqa: E501
def create_job_with_http_info(self, project_id, job, **kwargs): # noqa: E501
"""Creates a new Job # noqa: E501
Creates a new Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_job_with_http_info(project_id, job, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param Job job: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Job, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'job'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `create_job`") # noqa: E501
# verify the required parameter 'job' is set
if self.api_client.client_side_validation and ('job' not in local_var_params or # noqa: E501
local_var_params['job'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job` when calling `create_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'job' in local_var_params:
body_params = local_var_params['job']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_project(self, project, **kwargs): # noqa: E501
"""Creates a new Project # noqa: E501
Creates a new Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project(project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Project project: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_project_with_http_info(project, **kwargs) # noqa: E501
def create_project_with_http_info(self, project, **kwargs): # noqa: E501
"""Creates a new Project # noqa: E501
Creates a new Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_with_http_info(project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Project project: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501
local_var_params['project'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project` when calling `create_project`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project' in local_var_params:
body_params = local_var_params['project']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_corpus(self, project_id, name, **kwargs): # noqa: E501
"""Creates a new Corpus # noqa: E501
Creates a new Corpus # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_corpus(project_id, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str name: Corpus name (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_corpus_with_http_info(project_id, name, **kwargs) # noqa: E501
def delete_corpus_with_http_info(self, project_id, name, **kwargs): # noqa: E501
"""Creates a new Corpus # noqa: E501
Creates a new Corpus # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_corpus_with_http_info(project_id, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str name: Corpus name (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_corpus" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `delete_corpus`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_corpus`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/corpora/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_job(self, project_id, job_id, **kwargs): # noqa: E501
"""Deletes the Job # noqa: E501
Deletes the Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_job(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_job_with_http_info(project_id, job_id, **kwargs) # noqa: E501
def delete_job_with_http_info(self, project_id, job_id, **kwargs): # noqa: E501
"""Deletes the Job # noqa: E501
Deletes the Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_job_with_http_info(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `delete_job`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `delete_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
if 'job_id' in local_var_params:
path_params['jobID'] = local_var_params['job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/jobs/{jobID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_project(self, project_id, **kwargs): # noqa: E501
"""Deletes the Project # noqa: E501
Deletes the Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_project_with_http_info(project_id, **kwargs) # noqa: E501
def delete_project_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Deletes the Project # noqa: E501
Deletes the Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `delete_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_job(self, project_id, job_id, **kwargs): # noqa: E501
"""Returns the Job # noqa: E501
Returns the Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_job_with_http_info(project_id, job_id, **kwargs) # noqa: E501
def get_job_with_http_info(self, project_id, job_id, **kwargs): # noqa: E501
"""Returns the Job # noqa: E501
Returns the Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job_with_http_info(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Job, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `get_job`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `get_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
if 'job_id' in local_var_params:
path_params['jobID'] = local_var_params['job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/jobs/{jobID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_project(self, project_id, **kwargs): # noqa: E501
"""Returns the Project # noqa: E501
Returns the Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_project_with_http_info(project_id, **kwargs) # noqa: E501
def get_project_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Returns the Project # noqa: E501
Returns the Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `get_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def getget_corpus(self, project_id, name, **kwargs): # noqa: E501
"""Returns the Corpus # noqa: E501
Returns the Corpus # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.getget_corpus(project_id, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str name: Corpus name (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Corpus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.getget_corpus_with_http_info(project_id, name, **kwargs) # noqa: E501
def getget_corpus_with_http_info(self, project_id, name, **kwargs): # noqa: E501
"""Returns the Corpus # noqa: E501
Returns the Corpus # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.getget_corpus_with_http_info(project_id, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str name: Corpus name (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Corpus, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method getget_corpus" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `getget_corpus`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `getget_corpus`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/corpora/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Corpus', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def job_outputs(self, project_id, job_id, **kwargs): # noqa: E501
"""Returns the Job Outputs # noqa: E501
Returns the Job Outputs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.job_outputs(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[JobOutput]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.job_outputs_with_http_info(project_id, job_id, **kwargs) # noqa: E501
def job_outputs_with_http_info(self, project_id, job_id, **kwargs): # noqa: E501
"""Returns the Job Outputs # noqa: E501
Returns the Job Outputs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.job_outputs_with_http_info(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[JobOutput], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method job_outputs" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `job_outputs`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `job_outputs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
if 'job_id' in local_var_params:
path_params['jobID'] = local_var_params['job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/jobs/{jobID}/outputs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[JobOutput]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def job_result(self, project_id, job_id, **kwargs): # noqa: E501
"""Returns the Job Result # noqa: E501
Returns the Job Result # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.job_result(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: JobResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.job_result_with_http_info(project_id, job_id, **kwargs) # noqa: E501
def job_result_with_http_info(self, project_id, job_id, **kwargs): # noqa: E501
"""Returns the Job Result # noqa: E501
Returns the Job Result # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.job_result_with_http_info(project_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param str job_id: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(JobResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method job_result" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `job_result`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `job_result`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
if 'job_id' in local_var_params:
path_params['jobID'] = local_var_params['job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/jobs/{jobID}/result', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JobResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_corpora(self, project_id, **kwargs): # noqa: E501
"""Returns a collection of Corpora # noqa: E501
Returns a collection of Corpora # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_corpora(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CorporaCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_corpora_with_http_info(project_id, **kwargs) # noqa: E501
def list_corpora_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Returns a collection of Corpora # noqa: E501
Returns a collection of Corpora # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_corpora_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CorporaCollection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_corpora" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `list_corpora`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/corpora', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CorporaCollection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_jobs(self, project_id, **kwargs): # noqa: E501
"""Returns a collection of Jobs # noqa: E501
Returns a collection of Jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_jobs(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param int page: page number
:param int per_page: number of records per page
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: JobsCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_jobs_with_http_info(project_id, **kwargs) # noqa: E501
def list_jobs_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Returns a collection of Jobs # noqa: E501
Returns a collection of Jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_jobs_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param int page: page number
:param int per_page: number of records per page
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(JobsCollection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'page', 'per_page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_jobs" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `list_jobs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'per_page' in local_var_params and local_var_params['per_page'] is not None: # noqa: E501
query_params.append(('per_page', local_var_params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JobsCollection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_projects(self, **kwargs): # noqa: E501
"""Returns a collection of Projects # noqa: E501
Returns a collection of Projects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_projects(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ProjectsCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_projects_with_http_info(**kwargs) # noqa: E501
def list_projects_with_http_info(self, **kwargs): # noqa: E501
"""Returns a collection of Projects # noqa: E501
Returns a collection of Projects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_projects_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ProjectsCollection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_projects" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProjectsCollection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def train_project(self, project_id, **kwargs): # noqa: E501
"""Queues training # noqa: E501
Queues training # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.train_project(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.train_project_with_http_info(project_id, **kwargs) # noqa: E501
def train_project_with_http_info(self, project_id, **kwargs): # noqa: E501
"""Queues training # noqa: E501
Queues training # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.train_project_with_http_info(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method train_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `train_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}/train', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_project(self, project_id, project, **kwargs): # noqa: E501
"""Updates an existing Project # noqa: E501
Updates an existing Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project(project_id, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param Project project: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_project_with_http_info(project_id, project, **kwargs) # noqa: E501
def update_project_with_http_info(self, project_id, project, **kwargs): # noqa: E501
"""Updates an existing Project # noqa: E501
Updates an existing Project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project_with_http_info(project_id, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project_id: ID of the Project (required)
:param Project project: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['project_id', 'project'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in local_var_params or # noqa: E501
local_var_params['project_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project_id` when calling `update_project`") # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501
local_var_params['project'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project` when calling `update_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['projectID'] = local_var_params['project_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project' in local_var_params:
body_params = local_var_params['project']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectID}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| {
"content_hash": "2e8e3ebd709d136bcd1f39b6c92925c2",
"timestamp": "",
"source": "github",
"line_count": 1852,
"max_line_length": 122,
"avg_line_length": 46.56425485961123,
"alnum_prop": 0.5668564537263587,
"repo_name": "Telestream/telestream-cloud-python-sdk",
"id": "455b8837344252df1ab044d7b8e2a2f6493b1ef4",
"size": "86254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telestream_cloud_tts_sdk/telestream_cloud_tts/api/tts_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1339719"
},
{
"name": "Shell",
"bytes": "6712"
}
],
"symlink_target": ""
} |
'''@file trainer.py
neural network trainer environment'''
import os
from abc import ABCMeta, abstractmethod
from time import time, sleep
import tensorflow as tf
import numpy as np
class Trainer(object):
'''General class outlining the training environment of a classifier.'''
__metaclass__ = ABCMeta
def __init__(self,
conf,
decoder,
classifier,
input_dim,
reconstruction_dim,
dispenser,
val_reader,
val_targets,
expdir,
server,
task_index):
'''
NnetTrainer constructor, creates the training graph
Args:
classifier: the neural net classifier that will be trained
conf: the trainer config
decoder: a callable that will create a decoder
input_dim: the input dimension to the nnnetgraph
reconstruction_dim: dimension of the reconstruction targets
dispenser: a Batchdispenser object
val_reader: the feature reader for the validation data if None
validation will not be used
val_targets: a dictionary containing the targets of the validation
set
expdir: directory where the summaries will be written
server: optional server to be used for distributed training
task_index: optional index of the worker task in the cluster
'''
self.conf = conf
self.dispenser = dispenser
self.num_steps = int(dispenser.num_batches*int(conf['num_epochs'])
/max(1, int(conf['numbatches_to_aggregate'])))
self.val_reader = val_reader
self.val_targets = val_targets
self.expdir = expdir
self.server = server
cluster = tf.train.ClusterSpec(server.server_def.cluster)
#save the max lengths
self.max_target_length1, self.max_target_length2 =\
dispenser.max_target_length
self.max_input_length = dispenser.max_input_length
# save the boolean that holds if doing learning rate adaptation
if 'learning_rate_adaptation' in conf:
if conf['learning_rate_adaptation'] == 'True':
self.learning_rate_adaptation = True
elif conf['learning_rate_adaptation'] == 'False':
self.learning_rate_adaptation = False
else:
raise Exception('wrong kind of info in \
learning_rate_adaptation')
else:
# if not specified, assum learning rate adaptation is not necessary
self.learning_rate_adaptation = False
#create the graph
self.graph = tf.Graph()
if 'local' in cluster.as_dict():
num_replicas = 1
else:
#distributed training
num_replicas = len(cluster.as_dict()['worker'])
self.is_chief = task_index == 0
device = tf.train.replica_device_setter(
cluster=cluster,
worker_device='/job:worker/task:%d' % task_index)
#define the placeholders in the graph
with self.graph.as_default():
with tf.device(device):
#create the inputs placeholder
self.inputs = tf.placeholder(
dtype=tf.float32,
shape=[dispenser.size, self.max_input_length,
input_dim],
name='inputs')
#the first part of the tupple of targets (text targets)
targets1 = tf.placeholder(
dtype=tf.int32,
shape=[dispenser.size, self.max_target_length1],
name='targets1')
#second part of the tupple of targets
#(audio samples or input features)
targets2 = tf.placeholder(
dtype=tf.float32,
shape=[dispenser.size, self.max_target_length2,
reconstruction_dim],
name='targets2')
# the targets are passed together as a tupple
self.targets = (targets1, targets2)
#the length of all the input sequences
self.input_seq_length = tf.placeholder(
dtype=tf.int32,
shape=[dispenser.size],
name='input_seq_length')
#length of all the output sequences (first from target tuple)
target_seq_length1 = tf.placeholder(
dtype=tf.int32,
shape=[dispenser.size],
name='output_seq_length1')
#length of the sequences of the second element of target tuple
target_seq_length2 = tf.placeholder(
dtype=tf.int32,
shape=[dispenser.size],
name='output_seq_length2')
# last two placeholders are passed together as one argument
self.target_seq_length = \
(target_seq_length1, target_seq_length2)
#a placeholder to set the position
self.pos_in = tf.placeholder(
dtype=tf.int32,
shape=[],
name='pos_in')
self.val_loss_in = tf.placeholder(
dtype=tf.float32,
shape=[],
name='val_loss_in')
#compute the training outputs of the classifier
trainlogits, logit_seq_length = classifier(
inputs=self.inputs,
input_seq_length=self.input_seq_length,
targets=self.targets,
target_seq_length=self.target_seq_length,
is_training=True)
#create a decoder object for validation
if self.conf['validation_mode'] == 'decode':
self.decoder = decoder()
elif self.conf['validation_mode'] == 'loss':
vallogits, val_logit_seq_length = classifier(
inputs=self.inputs,
input_seq_length=self.input_seq_length,
targets=self.targets,
target_seq_length=self.target_seq_length,
is_training=False)
self.decoder_loss = self.compute_loss(
self.targets, vallogits, val_logit_seq_length,
self.target_seq_length)
else:
raise Exception('unknown validation mode %s' %
self.conf['validation_mode'])
#a variable to hold the amount of steps already taken
self.global_step = tf.get_variable(
name='global_step',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
with tf.variable_scope('train'):
#a variable that indicates if features are being read
self.reading = tf.get_variable(
name='reading',
shape=[],
dtype=tf.bool,
initializer=tf.constant_initializer(False),
trainable=False)
#the position in the feature reader
self.pos = tf.get_variable(
name='position',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
#the current validation loss
self.val_loss = tf.get_variable(
name='validation_loss',
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(1.79e+308),
trainable=False)
#a variable that specifies when the model was last validated
self.validated_step = tf.get_variable(
name='validated_step',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(
-int(conf['valid_frequency'])),
trainable=False)
#operation to start reading
self.block_reader = self.reading.assign(True).op
#operation to release the reader
self.release_reader = self.reading.assign(False).op
#operation to set the position
self.set_pos = self.pos.assign(self.pos_in).op
#operation to update the validated steps
self.set_val_step = self.validated_step.assign(
self.global_step).op
#operation to set the validation loss
self.set_val_loss = self.val_loss.assign(
self.val_loss_in).op
#a variable to scale the learning rate (used to reduce the
#learning rate in case validation performance drops)
learning_rate_fact = tf.get_variable(
name='learning_rate_fact',
shape=[],
initializer=tf.constant_initializer(1.0),
trainable=False)
#operation to half the learning rate
self.halve_learningrate_op = learning_rate_fact.assign(
learning_rate_fact/2).op
#factor to scale the learning rate according to how many
# of the elements in a batch are equal to zero, if we are
# using scaled learning rate
if self.learning_rate_adaptation:
empty_factor = tf.get_variable(
name='empty_targets_factor',
shape=[],
initializer=tf.constant_initializer(1.0),
trainable=False)
empty_targets = tf.equal(self.target_seq_length[0], 0)
ones = tf.ones(dispenser.size)
zeros = tf.zeros(dispenser.size)
binary = tf.where(empty_targets, zeros, ones)
how_many_not_empty = tf.reduce_sum(binary)
empty_factor_new = how_many_not_empty/dispenser.size
self.update_emptyfactor_op = empty_factor.assign(
empty_factor_new).op
else:
empty_factor = 1
#compute the learning rate with exponential decay and scale
#with the learning rate factor
self.learning_rate = (tf.train.exponential_decay(
learning_rate=float(conf['initial_learning_rate']),
global_step=self.global_step,
decay_steps=self.num_steps,
decay_rate=float(conf['learning_rate_decay']))
* learning_rate_fact * empty_factor)
#create the optimizer
if 'optimizer' in conf:
# we can explicitly specify to use gradient descent
if conf['optimizer'] == 'gradient_descent':
optimizer = tf.train.GradientDescentOptimizer(
self.learning_rate)
elif conf['optimizer'] == 'adam':
# or to use adam
if 'beta1' in conf and 'beta2' in conf:
# in which case we can also adapt the params
optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate,
beta1=float(conf['beta1']),
beta2=float(conf['beta2'])
)
else:
# if params not specified, use default
optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate)
else:
raise Exception('The trainer ' + conf['optimizer'] \
+ ' is not defined.')
else:
# default is adam with standard params
optimizer = tf.train.AdamOptimizer(self.learning_rate)
#create an optimizer that aggregates gradients
if int(conf['numbatches_to_aggregate']) > 0:
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=int(
conf['numbatches_to_aggregate']),
total_num_replicas=num_replicas)
#compute the loss
self.loss = self.compute_loss(
self.targets, trainlogits, logit_seq_length,
self.target_seq_length)
#compute the gradients
grads = optimizer.compute_gradients(self.loss)
with tf.variable_scope('clip'):
#clip the gradients
grads = [(tf.clip_by_value(grad, -1., 1.), var)
for grad, var in grads]
#opperation to apply the gradients
apply_gradients_op = optimizer.apply_gradients(
grads_and_vars=grads,
global_step=self.global_step,
name='apply_gradients')
#all remaining operations with the UPDATE_OPS GraphKeys
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
#create an operation to update the gradients, the batch_loss
#and do all other update ops
self.update_op = tf.group(
*([apply_gradients_op] + update_ops),
name='update')
#create the summaries for visualisation
tf.summary.scalar('validation loss', self.val_loss)
tf.summary.scalar('learning rate', self.learning_rate)
#create a histogram for all trainable parameters
for param in tf.trainable_variables():
tf.summary.histogram(param.name, param)
#create the schaffold
self.scaffold = tf.train.Scaffold()
@abstractmethod
def compute_loss(self, targets, logits, logit_seq_length,
target_seq_length):
'''
Compute the loss
Creates the operation to compute the loss, this is specific to each
trainer
Args:
targets: a tupple of targets, the first one being a
[batch_size, max_target_length] tensor containing the real text
targets, the second one being a [batch_size, max_length x dim]
tensor containing the reconstruction features.
logits: a tuple of [batch_size, max_logit_length, dim] tensors
containing the logits for the text and the reconstruction
logit_seq_length: the length of all the logit sequences as a tuple
of [batch_size] vectors
target_seq_length: the length of all the target sequences as a
tuple of two [batch_size] vectors, both for one of the elements
in the targets tuple
Returns:
a scalar value containing the total loss
'''
raise NotImplementedError('Abstract method')
def train(self):
'''train the model'''
#look for the master if distributed training is done
master = self.server.target
#start the session and standart servises
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
#config.log_device_placement = True
#create a hook for saving the final model
save_hook = SaveAtEnd(os.path.join(self.expdir, 'model',
'network.ckpt'))
with self.graph.as_default():
with tf.train.MonitoredTrainingSession(
master=master,
is_chief=self.is_chief,
checkpoint_dir=os.path.join(self.expdir, 'logdir'),
scaffold=self.scaffold,
chief_only_hooks=[save_hook],
config=config) as sess:
#set the reading flag to false
sess.run(self.release_reader)
#start the training loop
#pylint: disable=E1101
while (not sess.should_stop()
and self.global_step.eval(sess) < self.num_steps):
#check if validation is due
[step, val_step] = sess.run(
[self.global_step, self.validated_step])
if (step - val_step >= int(self.conf['valid_frequency'])
and int(self.conf['valid_frequency']) > 0):
self.validate(sess)
#start time
start = time()
#wait until the reader is free
#pylint: disable=E1101
while self.reading.eval(sess):
sleep(1)
#block the reader
sess.run(self.block_reader)
#read a batch of data
#batch_target_tupples is a list of tupples
#pylint: disable=E1101
batch_data, batch_labels = self.dispenser.get_batch(
self.pos.eval(sess))
#update the position
self.set_pos.run(
session=sess,
feed_dict={self.pos_in:self.dispenser.pos})
#release the reader
sess.run(self.release_reader)
#update the model
loss, lr = self.update(batch_data, batch_labels, sess)
print(('step %d/%d loss: %f, learning rate: %f, '
'time elapsed: %f sec')
%(self.global_step.eval(sess), self.num_steps,
loss, lr, time()-start))
#the chief will create the final model
if self.is_chief:
if not os.path.isdir(os.path.join(self.expdir, 'model')):
os.mkdir(os.path.join(self.expdir, 'model'))
def update(self, inputs, targets, sess):
'''
update the neural model with a batch or training data
Args:
inputs: the inputs to the neural net, this should be a list
containing an NxF matrix for each utterance in the batch where
N is the number of frames in the utterance
targets: the targets for neural net, should be a list of tuples,
each tuple containing two N-dimensional vectors for one
utterance
sess: the session
Returns:
a pair containing:
- the loss at this step
- the learning rate used at this step
'''
# go from a list of tupples to two seperate lists
targets1 = [t[0] for t in targets]
targets2 = [t[1] for t in targets]
#get a list of sequence lengths
input_seq_length = [i.shape[0] for i in inputs]
target_seq_length1 = [t1.shape[0] for t1 in targets1]
target_seq_length2 = [t2.shape[0] for t2 in targets2]
#pad the inputs and targets untill the maximum lengths
padded_inputs = np.array(pad(inputs, self.max_input_length))
padded_targets1 = np.array(pad(targets1, self.max_target_length1))
padded_targets2 = np.array(pad(targets2, self.max_target_length2))
# first do an update of the emptyness factor
if self.learning_rate_adaptation:
_ = sess.run(
fetches=[self.update_emptyfactor_op],
feed_dict={self.inputs:padded_inputs,
self.targets[0]:padded_targets1,
self.targets[1]:padded_targets2,
self.input_seq_length:input_seq_length,
self.target_seq_length[0]:target_seq_length1,
self.target_seq_length[1]:target_seq_length2})
_, loss, lr = sess.run(
fetches=[self.update_op,
self.loss,
self.learning_rate],
feed_dict={self.inputs:padded_inputs,
self.targets[0]:padded_targets1,
self.targets[1]:padded_targets2,
self.input_seq_length:input_seq_length,
self.target_seq_length[0]:target_seq_length1,
self.target_seq_length[1]:target_seq_length2})
return loss, lr
def validate(self, sess):
'''
Evaluate the performance of the neural net and halves the learning rate
if it is worse
Args:
inputs: the inputs to the neural net, this should be a list
containing NxF matrices for each utterance in the batch where
N is the number of frames in the utterance
targets: the one-hot encoded targets for neural net, this should be
a list containing an NxO matrix for each utterance where O is
the output dimension of the neural net
'''
#update the validated step
sess.run([self.set_val_step])
if self.conf['validation_mode'] == 'decode':
outputs = self.decoder.decode(self.val_reader, sess)
#when decoding, we want the targets to be only the text targets
val_text_targets = dict()
for utt_id in self.val_targets:
val_text_targets[utt_id] = self.val_targets[utt_id][0]
val_loss = self.decoder.score(outputs, val_text_targets)
elif self.conf['validation_mode'] == 'loss':
val_loss = self.compute_val_loss(self.val_reader, self.val_targets,
sess)
else:
raise Exception(self.conf['validation_mode']+' is not a correct\
choice for the validation mode')
print 'validation loss: %f' % val_loss
#pylint: disable=E1101
if (val_loss > self.val_loss.eval(session=sess)
and self.conf['valid_adapt'] == 'True'):
print 'halving learning rate'
sess.run([self.halve_learningrate_op])
sess.run(self.set_val_loss, feed_dict={self.val_loss_in:val_loss})
def compute_val_loss(self, reader, targets, sess):
'''compute the validation loss on a set in the reader
Args:
reader: a reader to read the data
targets: the ground truth targets as a dictionary
sess: a tensorflow session
Returns:
the loss'''
looped = False
avrg_loss = 0.0
total_elements = 0
total_steps = int(np.ceil(float(reader.num_utt)/\
float(self.dispenser.size)))
step = 1
while not looped:
inputs = []
labels = []
for _ in range(self.dispenser.size):
#read a batch of data
(utt_id, inp, looped) = reader.get_utt()
inputs.append(inp)
labels.append(targets[utt_id])
if looped:
break
num_elements = len(inputs)
#add empty elements to the inputs to get a full batch
feat_dim = inputs[0].shape[1]
if labels[0][1] is not None:
rec_dim = labels[0][1].shape[1]
else:
rec_dim = 1
inputs += [np.zeros([0, feat_dim])]*(
self.dispenser.size-len(inputs))
labels += [np.zeros([0]), np.zeros([0, rec_dim])]*(
self.dispenser.size-len(labels))
#get the sequence length
input_seq_length = [inp.shape[0] for inp in inputs]
label_seq_length1 = [lab[0].shape[0] for lab in labels]
label_seq_length2 = [lab[1].shape[0] if lab[1] is not None \
else 0 for lab in labels]
#pad and put in a tensor
input_tensor = np.array([np.append(
inp, np.zeros([self.max_input_length-inp.shape[0],
inp.shape[1]]), 0) for inp in inputs])
label_tensor1 = np.array([np.append(
lab[0], np.zeros([self.max_target_length1-lab[0].shape[0]]), 0)
for lab in labels])
if labels[0][1] is not None:
label_tensor2 = np.array([np.append(
lab[1], np.zeros([self.max_target_length2-lab[1].shape[0],
lab[1].shape[1]]), 0)
for lab in labels])
else:
label_tensor2 = np.zeros([self.dispenser.size, 1, 1])
print 'Doing validation, step %d/%d' %(step, total_steps)
loss = sess.run(
self.decoder_loss,
feed_dict={self.inputs:input_tensor,
self.input_seq_length:input_seq_length,
self.targets[0]:label_tensor1,
self.target_seq_length[0]:label_seq_length1,
self.targets[1]:label_tensor2,
self.target_seq_length[1]:label_seq_length2})
avrg_loss = ((total_elements*avrg_loss + num_elements*loss)/
(num_elements + total_elements))
total_elements += num_elements
step = step+1
return avrg_loss
def pad(inputs, length):
'''
Pad the inputs so they have the maximum length
Args:
inputs: the inputs, this should be a list containing time major
tensors
length: the length that will be used for padding the inputs
Returns:
the padded inputs
'''
padded_inputs = [np.append(
i, np.zeros([length-i.shape[0]] + list(i.shape[1:])), 0)
for i in inputs]
return padded_inputs
class SaveAtEnd(tf.train.SessionRunHook):
'''a training hook for saving the final model'''
def __init__(self, filename):
'''hook constructor
Args:
filename: where the model will be saved'''
self.filename = filename
def begin(self):
'''this will be run at session creation'''
#pylint: disable=W0201
self._saver = tf.train.Saver(tf.trainable_variables(), sharded=True)
def end(self, session):
'''this will be run at session closing'''
self._saver.save(session, self.filename)
| {
"content_hash": "02d50423841715826c5889f82fbca677",
"timestamp": "",
"source": "github",
"line_count": 687,
"max_line_length": 80,
"avg_line_length": 40.48471615720524,
"alnum_prop": 0.5031460108582317,
"repo_name": "JeroenBosmans/nabu",
"id": "6777ab2b583e63a3a3e59cf58ccbb3d1faa945b6",
"size": "27813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabu/neuralnetworks/trainers/trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "395778"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_secret
short_description: Manage docker secrets.
version_added: "2.4"
description:
- Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
- Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
unless the C(force) option is set.
- Updates to secrets are performed by removing the secret and creating it again.
options:
data:
description:
- The value of the secret. Required when state is C(present).
type: str
data_is_b64:
description:
- If set to C(true), the data is assumed to be Base64 encoded and will be
decoded before being used.
- To use binary C(data), it is better to keep it Base64 encoded and let it
be decoded by this option.
type: bool
default: no
version_added: "2.8"
labels:
description:
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
type: dict
force:
description:
- Use with state C(present) to always remove and recreate an existing secret.
- If I(true), an existing secret will be replaced, even if it has not changed.
type: bool
default: no
name:
description:
- The name of the secret.
type: str
required: yes
state:
description:
- Set to C(present), if the secret should exist, and C(absent), if it should not.
type: str
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
- docker.docker_py_2_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
- "Docker API >= 1.25"
author:
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Create secret foo (from a file on the control machine)
docker_secret:
name: foo
# If the file is JSON or binary, Ansible might modify it (because
# it is first decoded and later re-encoded). Base64-encoding the
# file directly after reading it prevents this to happen.
data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
data_is_b64: true
state: present
- name: Change the secret data
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the secret
two: '2'
state: present
- name: No change
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing secret
state: present
- name: Update an existing label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the secret
one: '1'
state: present
- name: Force the removal/creation of the secret
docker_secret:
name: foo
data: Goodnight everyone!
force: yes
state: present
- name: Remove secret foo
docker_secret:
name: foo
state: absent
'''
RETURN = '''
secret_id:
description:
- The ID assigned by Docker to the secret object.
returned: success and C(state == "present")
type: str
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
'''
import base64
import hashlib
try:
from docker.errors import APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.docker.common import AnsibleDockerClient, DockerBaseClass, compare_generic
from ansible.module_utils._text import to_native, to_bytes
class SecretManager(DockerBaseClass):
def __init__(self, client, results):
super(SecretManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
if self.data is not None:
if parameters.get('data_is_b64'):
self.data = base64.b64decode(self.data)
else:
self.data = to_bytes(self.data)
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.data_key = None
def __call__(self):
if self.state == 'present':
self.data_key = hashlib.sha224(self.data).hexdigest()
self.present()
elif self.state == 'absent':
self.absent()
def get_secret(self):
''' Find an existing secret. '''
try:
secrets = self.client.secrets(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
for secret in secrets:
if secret['Spec']['Name'] == self.name:
return secret
return None
def create_secret(self):
''' Create a new secret '''
secret_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
except APIError as exc:
self.client.fail("Error creating secret: %s" % to_native(exc))
if isinstance(secret_id, dict):
secret_id = secret_id['ID']
return secret_id
def present(self):
''' Handles state == 'present', creating or updating the secret '''
secret = self.get_secret()
if secret:
self.results['secret_id'] = secret['ID']
data_changed = False
attrs = secret.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
if data_changed or labels_changed or self.force:
# if something changed or force, delete and re-create the secret
self.absent()
secret_id = self.create_secret()
self.results['changed'] = True
self.results['secret_id'] = secret_id
else:
self.results['changed'] = True
self.results['secret_id'] = self.create_secret()
def absent(self):
''' Handles state == 'absent', removing the secret '''
secret = self.get_secret()
if secret:
try:
if not self.check_mode:
self.client.remove_secret(secret['ID'])
except APIError as exc:
self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
data=dict(type='str', no_log=True),
data_is_b64=dict(type='bool', default=False),
labels=dict(type='dict'),
force=dict(type='bool', default=False)
)
required_if = [
('state', 'present', ['data'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_version='2.1.0',
min_docker_api_version='1.25',
)
results = dict(
changed=False,
secret_id=''
)
SecretManager(client, results)()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| {
"content_hash": "d4f458fd3925377aa6ccfa45de4cac15",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 137,
"avg_line_length": 29.94463667820069,
"alnum_prop": 0.6067714351744858,
"repo_name": "SergeyCherepanov/ansible",
"id": "2554d90ac0143bd13315f59ef7050b43f4581262",
"size": "8803",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/docker/docker_secret.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="fillcolor", parent_name="layout.newshape", **kwargs
):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "93e98eb96f783d3896ef006e7a980401",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 32.61538461538461,
"alnum_prop": 0.6108490566037735,
"repo_name": "plotly/plotly.py",
"id": "8ccefafa0b710bac093b9f9a7a856982b0dc35e5",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/newshape/_fillcolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import click
@click.command()
def main(args=None):
"""Console script for langevin_dynamics"""
click.echo("Replace this message by putting your code into "
"langevin_dynamics.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
if __name__ == "__main__":
main()
| {
"content_hash": "621ec0e480d46e9055dcae325bcc3ee1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 26.583333333333332,
"alnum_prop": 0.6394984326018809,
"repo_name": "arosenstein/langevin_dynamics",
"id": "82c0bffb576215a33686840459cf5e188dececf2",
"size": "344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "langevin_dynamics/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10244"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import typer
from typer.testing import CliRunner
from docs_src.parameter_types.bool import tutorial004 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "-d" in result.output
assert "--demo" in result.output
def test_main():
result = runner.invoke(app)
assert result.exit_code == 0
assert "Running in production" in result.output
def test_demo():
result = runner.invoke(app, ["--demo"])
assert result.exit_code == 0
assert "Running demo" in result.output
def test_short_demo():
result = runner.invoke(app, ["-d"])
assert result.exit_code == 0
assert "Running demo" in result.output
def test_script():
result = subprocess.run(
[sys.executable, "-m", "coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
| {
"content_hash": "f28e63815dd4c009e359e82d9ab50aa3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 22.27659574468085,
"alnum_prop": 0.6494746895893028,
"repo_name": "tiangolo/typer",
"id": "a01034d6ebc9ea421fead77236236e55734a7ad9",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tutorial/test_parameter_types/test_bool/test_tutorial004.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "380062"
},
{
"name": "Shell",
"bytes": "2257"
}
],
"symlink_target": ""
} |
"""Tests for the album art fetchers."""
import _common
from _common import unittest
from beetsplug import fetchart
from beets.autotag import AlbumInfo
from beets import library
from beets import importer
import os
import shutil
import StringIO
class MockHeaders(object):
def __init__(self, typeval):
self.typeval = typeval
def gettype(self):
return self.typeval
class MockUrlRetrieve(object):
def __init__(self, pathval, typeval):
self.pathval = pathval
self.headers = MockHeaders(typeval)
self.fetched = None
def __call__(self, url):
self.fetched = url
return self.pathval, self.headers
class AmazonArtTest(unittest.TestCase):
def test_invalid_type_returns_none(self):
fetchart.urllib.urlretrieve = MockUrlRetrieve('path', '')
artpath = fetchart.art_for_asin('xxxx')
self.assertEqual(artpath, None)
def test_jpeg_type_returns_path(self):
fetchart.urllib.urlretrieve = MockUrlRetrieve('somepath', 'image/jpeg')
artpath = fetchart.art_for_asin('xxxx')
self.assertEqual(artpath, 'somepath')
class FSArtTest(unittest.TestCase):
def setUp(self):
self.dpath = os.path.join(_common.RSRC, 'arttest')
os.mkdir(self.dpath)
def tearDown(self):
shutil.rmtree(self.dpath)
def test_finds_jpg_in_directory(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = fetchart.art_in_path(self.dpath)
self.assertEqual(fn, os.path.join(self.dpath, 'a.jpg'))
def test_appropriately_named_file_takes_precedence(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
_common.touch(os.path.join(self.dpath, 'cover.jpg'))
fn = fetchart.art_in_path(self.dpath)
self.assertEqual(fn, os.path.join(self.dpath, 'cover.jpg'))
def test_non_image_file_not_identified(self):
_common.touch(os.path.join(self.dpath, 'a.txt'))
fn = fetchart.art_in_path(self.dpath)
self.assertEqual(fn, None)
class CombinedTest(unittest.TestCase):
def setUp(self):
self.dpath = os.path.join(_common.RSRC, 'arttest')
os.mkdir(self.dpath)
self.old_urlopen = fetchart.urllib.urlopen
fetchart.urllib.urlopen = self._urlopen
self.page_text = ""
self.urlopen_called = False
def tearDown(self):
shutil.rmtree(self.dpath)
fetchart.urllib.urlopen = self.old_urlopen
def _urlopen(self, url):
self.urlopen_called = True
self.fetched_url = url
return StringIO.StringIO(self.page_text)
def test_main_interface_returns_amazon_art(self):
fetchart.urllib.urlretrieve = \
MockUrlRetrieve('anotherpath', 'image/jpeg')
album = _common.Bag(asin='xxxx')
artpath = fetchart.art_for_album(album, None)
self.assertEqual(artpath, 'anotherpath')
def test_main_interface_returns_none_for_missing_asin_and_path(self):
album = _common.Bag()
artpath = fetchart.art_for_album(album, None)
self.assertEqual(artpath, None)
def test_main_interface_gives_precedence_to_fs_art(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fetchart.urllib.urlretrieve = \
MockUrlRetrieve('anotherpath', 'image/jpeg')
album = _common.Bag(asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath)
self.assertEqual(artpath, os.path.join(self.dpath, 'a.jpg'))
def test_main_interface_falls_back_to_amazon(self):
fetchart.urllib.urlretrieve = \
MockUrlRetrieve('anotherpath', 'image/jpeg')
album = _common.Bag(asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath)
self.assertEqual(artpath, 'anotherpath')
def test_main_interface_tries_amazon_before_aao(self):
fetchart.urllib.urlretrieve = \
MockUrlRetrieve('anotherpath', 'image/jpeg')
album = _common.Bag(asin='xxxx')
fetchart.art_for_album(album, self.dpath)
self.assertFalse(self.urlopen_called)
def test_main_interface_falls_back_to_aao(self):
fetchart.urllib.urlretrieve = \
MockUrlRetrieve('anotherpath', 'text/html')
album = _common.Bag(asin='xxxx')
fetchart.art_for_album(album, self.dpath)
self.assertTrue(self.urlopen_called)
def test_main_interface_uses_caa_when_mbid_available(self):
mock_retrieve = MockUrlRetrieve('anotherpath', 'image/jpeg')
fetchart.urllib.urlretrieve = mock_retrieve
album = _common.Bag(mb_albumid='releaseid', asin='xxxx')
artpath = fetchart.art_for_album(album, None)
self.assertEqual(artpath, 'anotherpath')
self.assertTrue('coverartarchive.org' in mock_retrieve.fetched)
def test_local_only_does_not_access_network(self):
mock_retrieve = MockUrlRetrieve('anotherpath', 'image/jpeg')
fetchart.urllib.urlretrieve = mock_retrieve
album = _common.Bag(mb_albumid='releaseid', asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath, local_only=True)
self.assertEqual(artpath, None)
self.assertFalse(self.urlopen_called)
self.assertFalse(mock_retrieve.fetched)
def test_local_only_gets_fs_image(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
mock_retrieve = MockUrlRetrieve('anotherpath', 'image/jpeg')
fetchart.urllib.urlretrieve = mock_retrieve
album = _common.Bag(mb_albumid='releaseid', asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath, local_only=True)
self.assertEqual(artpath, os.path.join(self.dpath, 'a.jpg'))
self.assertFalse(self.urlopen_called)
self.assertFalse(mock_retrieve.fetched)
class AAOTest(unittest.TestCase):
def setUp(self):
self.old_urlopen = fetchart.urllib.urlopen
self.old_urlretrieve = fetchart.urllib.urlretrieve
fetchart.urllib.urlopen = self._urlopen
self.retriever = MockUrlRetrieve('somepath', 'image/jpeg')
fetchart.urllib.urlretrieve = self.retriever
self.page_text = ''
def tearDown(self):
fetchart.urllib.urlopen = self.old_urlopen
fetchart.urllib.urlretrieve = self.old_urlretrieve
def _urlopen(self, url):
return StringIO.StringIO(self.page_text)
def test_aao_scraper_finds_image(self):
self.page_text = """
<br />
<a href="TARGET_URL" title="View larger image" class="thickbox" style="color: #7E9DA2; text-decoration:none;">
<img src="http://www.albumart.org/images/zoom-icon.jpg" alt="View larger image" width="17" height="15" border="0"/></a>
"""
res = fetchart.aao_art('x')
self.assertEqual(self.retriever.fetched, 'TARGET_URL')
self.assertEqual(res, 'somepath')
def test_aao_scraper_returns_none_when_no_image_present(self):
self.page_text = "blah blah"
res = fetchart.aao_art('x')
self.assertEqual(self.retriever.fetched, None)
self.assertEqual(res, None)
class ArtImporterTest(unittest.TestCase, _common.ExtraAsserts):
def setUp(self):
# Mock the album art fetcher to always return our test file.
self.art_file = os.path.join(_common.RSRC, 'tmpcover.jpg')
_common.touch(self.art_file)
self.old_afa = fetchart.art_for_album
self.afa_response = self.art_file
def art_for_album(i, p, local_only=False):
return self.afa_response
fetchart.art_for_album = art_for_album
# Test library.
self.libpath = os.path.join(_common.RSRC, 'tmplib.blb')
self.libdir = os.path.join(_common.RSRC, 'tmplib')
os.mkdir(self.libdir)
os.mkdir(os.path.join(self.libdir, 'album'))
itempath = os.path.join(self.libdir, 'album', 'test.mp3')
shutil.copyfile(os.path.join(_common.RSRC, 'full.mp3'), itempath)
self.lib = library.Library(self.libpath)
self.i = _common.item()
self.i.path = itempath
self.album = self.lib.add_album([self.i])
self.lib._connection().commit()
# The plugin and import configuration.
self.plugin = fetchart.FetchArtPlugin()
self.config = _common.iconfig(self.lib)
# Import task for the coroutine.
self.task = importer.ImportTask(None, None, [self.i])
self.task.is_album = True
self.task.album_id = self.album.id
info = AlbumInfo(
album = 'some album',
album_id = 'albumid',
artist = 'some artist',
artist_id = 'artistid',
tracks = [],
)
self.task.set_choice((info, [self.i]))
def tearDown(self):
fetchart.art_for_album = self.old_afa
if os.path.exists(self.art_file):
os.remove(self.art_file)
if os.path.exists(self.libpath):
os.remove(self.libpath)
if os.path.exists(self.libdir):
shutil.rmtree(self.libdir)
def _fetch_art(self, should_exist):
"""Execute the fetch_art coroutine for the task and return the
album's resulting artpath. ``should_exist`` specifies whether to
assert that art path was set (to the correct value) or or that
the path was not set.
"""
# Execute the two relevant parts of the importer.
self.plugin.fetch_art(self.config, self.task)
self.plugin.assign_art(self.config, self.task)
artpath = self.lib.albums()[0].artpath
if should_exist:
self.assertEqual(artpath,
os.path.join(os.path.dirname(self.i.path), 'cover.jpg'))
self.assertExists(artpath)
else:
self.assertEqual(artpath, None)
return artpath
def test_fetch_art(self):
assert not self.lib.albums()[0].artpath
self._fetch_art(True)
def test_art_not_found(self):
self.afa_response = None
self._fetch_art(False)
def test_no_art_for_singleton(self):
self.task.is_album = False
self._fetch_art(False)
def test_leave_original_file_in_place(self):
self._fetch_art(True)
self.assertExists(self.art_file)
def test_delete_original_file(self):
self.config.delete = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_move_original_file(self):
self.config.move = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_do_not_delete_original_if_already_in_place(self):
artdest = os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
shutil.copyfile(self.art_file, artdest)
self.afa_response = artdest
self._fetch_art(True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "a6d08d4fe49024efa1616d7de37bd778",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 128,
"avg_line_length": 38.54416961130742,
"alnum_prop": 0.6365969930326366,
"repo_name": "aspidites/beets",
"id": "37ffc176fb606d0b82bb6233b2ba0e9e51f059e9",
"size": "11555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_art.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "85314"
},
{
"name": "Python",
"bytes": "672147"
}
],
"symlink_target": ""
} |
r"""Run the Rust ELO calculator on all original images.
Filters out evaluations for each of the original images
and runs the Rust ELO calulator on each original image
to create ELO results for all distortions per original.
"""
import argparse
import json
import os
import subprocess
import sys
import tempfile
parser = argparse.ArgumentParser()
parser.add_argument("-i",
"--input_json",
dest="input_json",
help="Read evaluations from this file")
parser.add_argument("-o",
"--output_directory",
dest="output_directory",
help="Write the ELO results in this directory")
args = parser.parse_args()
if args.input_json is None or args.output_directory is None:
parser.print_help()
sys.exit(1)
with open(args.input_json) as f:
evaluations = json.loads(f.read())
originals = {}
for evaluation in evaluations:
if evaluation["image"] not in originals:
originals[evaluation["image"]] = []
originals[evaluation["image"]].append(evaluation)
for original in originals:
with tempfile.NamedTemporaryFile() as filtered_file:
# Repeat the matches to get more stable results.
# Arbitrarily all matches get ~ original number of matches for ELO
# calculation purposes.
repeated_matches = (originals[original] *
int(len(evaluations) / len(originals[original])))
filtered_file.write(json.dumps(repeated_matches).encode("utf-8"))
filtered_file.flush()
p = subprocess.Popen(
f"cargo run --release {filtered_file.name}",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
out, _ = p.communicate()
outdir = os.path.join(args.output_directory, original)
os.makedirs(outdir, exist_ok=True)
outfilename = os.path.join(outdir, "elos")
with open(outfilename, "w") as outfile:
outfile.write(out.decode("utf-8"))
print(f"Saved ELOs for all distortions on {original} in {outfilename}")
| {
"content_hash": "d1864660f0b13a2d24d240501781aee8",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 34.33898305084746,
"alnum_prop": 0.6692991115498519,
"repo_name": "google-research/google-research",
"id": "5584098aef9eeb4015b5c16ea3f612acdc00132d",
"size": "2657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mucped22/scripts/elo/create_elos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import argparse
import fcntl
import logging
import os
import subprocess
import sys
import time
BASE_DIR = os.environ.get('OS_REFRESH_CONFIG_BASE_DIR',
'/opt/stack/os-config-refresh')
PHASES = ['pre-configure',
'configure',
'migration',
'post-configure']
def main(argv=sys.argv):
parser = argparse.ArgumentParser(
description="""Runs through all of the phases to ensure
configuration is applied and enabled on a machine. Will exit with
an error if any phase has a problem. Scripts should not depend on
eachother having worked properly. Set OS_REFRESH_CONFIG_BASE_DIR
environment variable to override the default
""")
parser.add_argument('--print-base', default=False, action='store_true',
help='Print base dir and exit')
parser.add_argument('--print-phases', default=False, action='store_true',
help='Print phases (tab separated) and exit')
parser.add_argument('--log-level', default='INFO',
choices=['ERROR', 'WARN', 'CRITICAL', 'INFO', 'DEBUG'])
parser.add_argument('--lockfile',
default='/var/run/os-refresh-config.lock',
help='Lock file to prevent multiple running copies.')
options = parser.parse_args(argv[1:])
if options.print_base:
print(BASE_DIR)
return 0
if options.print_phases:
print("\t".join(PHASES))
return 0
log = logging.getLogger('os-refresh-config')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel(options.log_level)
# Keep open (and thus, locked) for duration of program
lock = open(options.lockfile, 'a')
try:
fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as e:
log.error('Could not lock %s. %s' % (options.lockfile, e))
return e.errno
lock.truncate(0)
lock.write("Locked by pid==%d at %s\n" % (os.getpid(), time.localtime()))
for phase in PHASES:
phase_dir = os.path.join(BASE_DIR, '%s.d' % phase)
log.debug('Checking %s' % phase_dir)
if os.path.exists(phase_dir):
args = ['dib-run-parts']
args.append(phase_dir)
try:
log.info('Starting phase %s' % phase)
log.debug('Running %s' % args)
subprocess.check_call(args, close_fds=True)
sys.stdout.flush()
sys.stderr.flush()
log.info('Completed phase %s' % phase)
except subprocess.CalledProcessError as e:
log.error("during %s phase. [%s]\n" % (phase, e))
log.error("Aborting...")
return 1
else:
log.debug('No dir for phase %s' % phase)
lock.truncate(0)
lock.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "13b0df82eb6cf21c0ef2f9f739b70d01",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 34.93181818181818,
"alnum_prop": 0.5757970071567989,
"repo_name": "ccrouch/os-refresh-config",
"id": "d2393edfd938fdb7d4b8e73181815afc1c8af513",
"size": "3721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os_refresh_config/os_refresh_config.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import os
import re
import socket
import stat
import time
from ironic.common import exception
from ironic.common import utils
from ironic.openstack.common import excutils
from ironic.openstack.common import log as logging
from ironic.openstack.common import processutils
LOG = logging.getLogger(__name__)
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (portal_address, portal_port),
run_as_root=True,
check_exit_code=[0])
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0])
# Ensure the login complete
time.sleep(3)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0])
def delete_iscsi(portal_address, portal_port, target_iqn):
"""Delete the iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'-o', 'delete',
run_as_root=True,
check_exit_code=[0])
def make_partitions(dev, root_mb, swap_mb, ephemeral_mb):
"""Create partitions for root and swap on a disk device."""
cmd = []
def add_partition(start, size, fs_type=''):
end = start + size
cmd.extend(['mkpart', 'primary', fs_type, str(start), str(end)])
return end
offset = 1
if ephemeral_mb:
offset = add_partition(offset, ephemeral_mb)
offset = add_partition(offset, swap_mb, fs_type='linux-swap')
offset = add_partition(offset, root_mb)
else:
offset = add_partition(offset, root_mb)
offset = add_partition(offset, swap_mb, fs_type='linux-swap')
utils.execute('parted', '-a', 'optimal', '-s', dev, '--', 'mklabel',
'msdos', 'unit', 'MiB', *cmd, run_as_root=True, attempts=3,
check_exit_code=[0])
# avoid "device is busy"
time.sleep(3)
def is_block_device(dev):
"""Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.execute('dd',
'if=%s' % src,
'of=%s' % dst,
'bs=1M',
'oflag=direct',
run_as_root=True,
check_exit_code=[0])
def mkswap(dev, label='swap1'):
"""Execute mkswap on a device."""
utils.mkfs('swap', dev, label)
def mkfs_ephemeral(dev, ephemeral_format, label="ephemeral0"):
utils.mkfs(ephemeral_format, dev, label)
def block_uuid(dev):
"""Get UUID of a block device."""
out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def switch_pxe_config(path, root_uuid):
"""Switch a pxe config from deployment mode to service mode."""
with open(path) as f:
lines = f.readlines()
root = 'UUID=%s' % root_uuid
rre = re.compile(r'\{\{ ROOT \}\}')
dre = re.compile('^default .*$')
with open(path, 'w') as f:
for line in lines:
line = rre.sub(root, line)
line = dre.sub('default boot', line)
f.write(line)
def notify(address, port):
"""Notify a node that it becomes ready to reboot."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((address, port))
s.send('done')
finally:
s.close()
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \
% (address, port, iqn, lun)
return dev
def get_image_mb(image_path):
"""Get size of an image in Megabyte."""
mb = 1024 * 1024
image_byte = os.path.getsize(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format,
image_path, preserve_ephemeral=False):
"""Create partitions and copy an image to the root partition.
:param dev: Path for the device to work on.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param image_path: Path for the instance's disk image.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
"""
# NOTE(lucasagomes): When there's an ephemeral partition we want
# root to be last because that would allow root to resize and make it
# safer to do takeovernode with slightly larger images
if ephemeral_mb:
ephemeral_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
root_part = "%s-part3" % dev
else:
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
raise exception.InstanceDeployFailure(_("Parent device '%s' not found")
% dev)
make_partitions(dev, root_mb, swap_mb, ephemeral_mb)
if not is_block_device(root_part):
raise exception.InstanceDeployFailure(_("Root device '%s' not found")
% root_part)
if not is_block_device(swap_part):
raise exception.InstanceDeployFailure(_("Swap device '%s' not found")
% swap_part)
if ephemeral_mb and not is_block_device(ephemeral_part):
raise exception.InstanceDeployFailure(
_("Ephemeral device '%s' not found") % ephemeral_part)
dd(image_path, root_part)
mkswap(swap_part)
if ephemeral_mb and not preserve_ephemeral:
mkfs_ephemeral(ephemeral_part, ephemeral_format)
try:
root_uuid = block_uuid(root_part)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to detect root device UUID."))
return root_uuid
def deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb, ephemeral_mb, ephemeral_format,
preserve_ephemeral=False):
"""All-in-one function to deploy a node.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
:param image_path: Path for the instance's disk image.
:param pxe_config_path: Path for the instance PXE config file.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
"""
dev = get_dev(address, port, iqn, lun)
image_mb = get_image_mb(image_path)
if image_mb > root_mb:
root_mb = image_mb
discovery(address, port)
login_iscsi(address, port, iqn)
try:
root_uuid = work_on_disk(dev, root_mb, swap_mb, ephemeral_mb,
ephemeral_format, image_path,
preserve_ephemeral)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_("Deploy to address %s failed.") % address)
LOG.error(_("Command: %s") % err.cmd)
LOG.error(_("StdOut: %r") % err.stdout)
LOG.error(_("StdErr: %r") % err.stderr)
except exception.InstanceDeployFailure as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Deploy to address %s failed.") % address)
LOG.error(e)
finally:
logout_iscsi(address, port, iqn)
delete_iscsi(address, port, iqn)
switch_pxe_config(pxe_config_path, root_uuid)
# Ensure the node started netcat on the port after POST the request.
time.sleep(3)
notify(address, 10000)
| {
"content_hash": "917ee4acea4767435b49678b4dd53cee",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 79,
"avg_line_length": 35.04089219330855,
"alnum_prop": 0.5854020793549756,
"repo_name": "JioCloud/ironic",
"id": "bad64584774f9095451b3d2052fb0948781ceaed",
"size": "10063",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/deploy_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1640165"
}
],
"symlink_target": ""
} |
from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("de_pedia_ba", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("de_pedia_ba", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
| {
"content_hash": "fa5f9d722c09b224659b4e62739c7deb",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 114,
"avg_line_length": 34.36486486486486,
"alnum_prop": 0.6112858828155722,
"repo_name": "reuf/de_pedia_ba",
"id": "f9c99424d06b775048c0fb3ae5ebefd6157b700c",
"size": "5086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/engine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1768"
},
{
"name": "HTML",
"bytes": "20363"
},
{
"name": "JavaScript",
"bytes": "3150"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "39256"
},
{
"name": "Shell",
"bytes": "4572"
}
],
"symlink_target": ""
} |
from config import config
import pycurl, cStringIO
buf = cStringIO.StringIO()
fields = {
'token': config['api_token'],
'content': 'file',
'action': 'export',
'record': 'f21a3ffd37fc0b3c',
'field': 'file_upload',
'event': 'event_1_arm_1'
}
ch = pycurl.Curl()
ch.setopt(ch.URL, config['api_url'])
ch.setopt(ch.HTTPPOST, fields.items())
ch.setopt(ch.WRITEFUNCTION, buf.write)
ch.perform()
ch.close()
print buf.getvalue()
buf.close()
| {
"content_hash": "09cf8c6d3ddd36c4cc2a5955d23f0ede",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 38,
"avg_line_length": 19.91304347826087,
"alnum_prop": 0.6550218340611353,
"repo_name": "KevinDufendach/VandAID",
"id": "87dc5d82b74fbf99da21a088f93479a89de98394",
"size": "481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resources/redcap-api-examples-python/export_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5597"
},
{
"name": "HTML",
"bytes": "20946"
},
{
"name": "JavaScript",
"bytes": "51797"
},
{
"name": "Python",
"bytes": "19167"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resource', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='logicalresource',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='physicalresource',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='resourceorder',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='resourceorderitem',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| {
"content_hash": "495ad86f9065a703532abd3fb4e19665",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 111,
"avg_line_length": 33.903225806451616,
"alnum_prop": 0.5946717411988582,
"repo_name": "Semprini/cbe",
"id": "274d7d36c98ac73ac5fd69bcd40a2f4763939af6",
"size": "1100",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cbe/cbe/resource/migrations/0002_auto_20210617_2350.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2292"
},
{
"name": "HTML",
"bytes": "3112"
},
{
"name": "PowerShell",
"bytes": "20448"
},
{
"name": "Python",
"bytes": "241197"
}
],
"symlink_target": ""
} |
"""accounts user forms."""
from django import forms
from django.contrib import auth
from accounts.models import CustomUser
class CustomUserCreationForm(auth.forms.UserCreationForm):
"""A form to create users."""
#https://groups.google.com/forum/?fromgroups=#!topic/django-users/kOVEy9zYn5c
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
CustomUser._default_manager.get(username=username)
except CustomUser.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
class Meta(auth.forms.UserCreationForm.Meta):
model = CustomUser
class CustomUserChangeForm(auth.forms.UserChangeForm):
"""A form to update users."""
class Meta(auth.forms.UserChangeForm.Meta):
model = CustomUser
| {
"content_hash": "bfa6fdcbb678da930515e0df86679b3d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 81,
"avg_line_length": 29.939393939393938,
"alnum_prop": 0.6973684210526315,
"repo_name": "juliotrigo/django-accounts",
"id": "82d79f3ba6eed84d3cbdddf0bf4f971baa51736a",
"size": "1013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/forms/users.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7441"
}
],
"symlink_target": ""
} |
"""The tests for the Netgear Arlo sensors."""
from collections import namedtuple
from unittest.mock import patch
import pytest
from homeassistant.components.arlo import DATA_ARLO, sensor as arlo
from homeassistant.const import (
ATTR_ATTRIBUTION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
)
def _get_named_tuple(input_dict):
return namedtuple("Struct", input_dict.keys())(*input_dict.values())
def _get_sensor(name="Last", sensor_type="last_capture", data=None):
if data is None:
data = {}
return arlo.ArloSensor(name, data, sensor_type)
@pytest.fixture()
def default_sensor():
"""Create an ArloSensor with default values."""
return _get_sensor()
@pytest.fixture()
def battery_sensor():
"""Create an ArloSensor with battery data."""
data = _get_named_tuple({"battery_level": 50})
return _get_sensor("Battery Level", "battery_level", data)
@pytest.fixture()
def temperature_sensor():
"""Create a temperature ArloSensor."""
return _get_sensor("Temperature", "temperature")
@pytest.fixture()
def humidity_sensor():
"""Create a humidity ArloSensor."""
return _get_sensor("Humidity", "humidity")
@pytest.fixture()
def cameras_sensor():
"""Create a total cameras ArloSensor."""
data = _get_named_tuple({"cameras": [0, 0]})
return _get_sensor("Arlo Cameras", "total_cameras", data)
@pytest.fixture()
def captured_sensor():
"""Create a captured today ArloSensor."""
data = _get_named_tuple({"captured_today": [0, 0, 0, 0, 0]})
return _get_sensor("Captured Today", "captured_today", data)
class PlatformSetupFixture:
"""Fixture for testing platform setup call to add_entities()."""
def __init__(self):
"""Instantiate the platform setup fixture."""
self.sensors = None
self.update = False
def add_entities(self, sensors, update):
"""Mock method for adding devices."""
self.sensors = sensors
self.update = update
@pytest.fixture()
def platform_setup():
"""Create an instance of the PlatformSetupFixture class."""
return PlatformSetupFixture()
@pytest.fixture()
def sensor_with_hass_data(default_sensor, hass):
"""Create a sensor with async_dispatcher_connected mocked."""
hass.data = {}
default_sensor.hass = hass
return default_sensor
@pytest.fixture()
def mock_dispatch():
"""Mock the dispatcher connect method."""
target = "homeassistant.components.arlo.sensor.async_dispatcher_connect"
with patch(target) as _mock:
yield _mock
def test_setup_with_no_data(platform_setup, hass):
"""Test setup_platform with no data."""
arlo.setup_platform(hass, None, platform_setup.add_entities)
assert platform_setup.sensors is None
assert not platform_setup.update
def test_setup_with_valid_data(platform_setup, hass):
"""Test setup_platform with valid data."""
config = {
"monitored_conditions": [
"last_capture",
"total_cameras",
"captured_today",
"battery_level",
"signal_strength",
"temperature",
"humidity",
"air_quality",
]
}
hass.data[DATA_ARLO] = _get_named_tuple(
{
"cameras": [_get_named_tuple({"name": "Camera", "model_id": "ABC1000"})],
"base_stations": [
_get_named_tuple({"name": "Base Station", "model_id": "ABC1000"})
],
}
)
arlo.setup_platform(hass, config, platform_setup.add_entities)
assert len(platform_setup.sensors) == 8
assert platform_setup.update
def test_sensor_name(default_sensor):
"""Test the name property."""
assert default_sensor.name == "Last"
async def test_async_added_to_hass(sensor_with_hass_data, mock_dispatch):
"""Test dispatcher called when added."""
await sensor_with_hass_data.async_added_to_hass()
assert len(mock_dispatch.mock_calls) == 1
kall = mock_dispatch.call_args
args, kwargs = kall
assert len(args) == 3
assert args[0] == sensor_with_hass_data.hass
assert args[1] == "arlo_update"
assert not kwargs
def test_sensor_state_default(default_sensor):
"""Test the state property."""
assert default_sensor.state is None
def test_sensor_icon_battery(battery_sensor):
"""Test the battery icon."""
assert battery_sensor.icon == "mdi:battery-50"
def test_sensor_icon(temperature_sensor):
"""Test the icon property."""
assert temperature_sensor.icon == "mdi:thermometer"
def test_unit_of_measure(default_sensor, battery_sensor):
"""Test the unit_of_measurement property."""
assert default_sensor.unit_of_measurement is None
assert battery_sensor.unit_of_measurement == PERCENTAGE
def test_device_class(default_sensor, temperature_sensor, humidity_sensor):
"""Test the device_class property."""
assert default_sensor.device_class is None
assert temperature_sensor.device_class == DEVICE_CLASS_TEMPERATURE
assert humidity_sensor.device_class == DEVICE_CLASS_HUMIDITY
def test_update_total_cameras(cameras_sensor):
"""Test update method for total_cameras sensor type."""
cameras_sensor.update()
assert cameras_sensor.state == 2
def test_update_captured_today(captured_sensor):
"""Test update method for captured_today sensor type."""
captured_sensor.update()
assert captured_sensor.state == 5
def _test_attributes(sensor_type):
data = _get_named_tuple({"model_id": "TEST123"})
sensor = _get_sensor("test", sensor_type, data)
attrs = sensor.extra_state_attributes
assert attrs.get(ATTR_ATTRIBUTION) == "Data provided by arlo.netgear.com"
assert attrs.get("brand") == "Netgear Arlo"
assert attrs.get("model") == "TEST123"
def test_state_attributes():
"""Test attributes for camera sensor types."""
_test_attributes("battery_level")
_test_attributes("signal_strength")
_test_attributes("temperature")
_test_attributes("humidity")
_test_attributes("air_quality")
def test_attributes_total_cameras(cameras_sensor):
"""Test attributes for total cameras sensor type."""
attrs = cameras_sensor.extra_state_attributes
assert attrs.get(ATTR_ATTRIBUTION) == "Data provided by arlo.netgear.com"
assert attrs.get("brand") == "Netgear Arlo"
assert attrs.get("model") is None
def _test_update(sensor_type, key, value):
data = _get_named_tuple({key: value})
sensor = _get_sensor("test", sensor_type, data)
sensor.update()
assert sensor.state == value
def test_update():
"""Test update method for direct transcription sensor types."""
_test_update("battery_level", "battery_level", 100)
_test_update("signal_strength", "signal_strength", 100)
_test_update("temperature", "ambient_temperature", 21.4)
_test_update("humidity", "ambient_humidity", 45.1)
_test_update("air_quality", "ambient_air_quality", 14.2)
| {
"content_hash": "2e2fa85a1f4b167d9b268a0baf508bed",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 85,
"avg_line_length": 29.8068669527897,
"alnum_prop": 0.666234701223902,
"repo_name": "kennedyshead/home-assistant",
"id": "b8389d1903fb8019d472f95695e80bdfb69349c3",
"size": "6945",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/arlo/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import os
import logging
from google.appengine.dist import use_library
use_library('django', '1.1')
APP_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
#Stashboard version
VERSION = "1.1.4"
# If we're debugging, turn the cache off, etc.
# Set to true if we want to have our webapp print stack traces, etc
DEBUG = os.environ['SERVER_SOFTWARE'].startswith('Dev')
logging.info("Starting application in DEBUG mode: %s", DEBUG)
SITE = {
"html_type": "text/html",
"charset": "utf-8",
"title": "Stashboard",
"author": "Kyle Conroy",
# This must be the email address of a registered administrator for the
# application due to mail api restrictions.
"email": "kyle.j.conroy@gmail.com",
"description": "A RESTful Status Tracker on top of App Engine.",
"root_url": "http://stashboard.appspot.com",
"template_path": os.path.join(APP_ROOT_DIR, "views/default"),
"rich_client": True, #If false, the website will go into a simplified read-only view
}
| {
"content_hash": "2eb53ba5fe418d55ee1ab5aa70416dc4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 88,
"avg_line_length": 34.206896551724135,
"alnum_prop": 0.6875,
"repo_name": "progrium/stashboard",
"id": "ab3d48f33f5c8dd57d1c69c3b986e5ca1b34fd56",
"size": "2086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""KNL Spirits Spider implementation.
Example:
$ scrapy runspider knl_spirits_spider.py -o output.json
"""
import datetime
import logging
import scrapy
class KnLSpiritsSpider(scrapy.Spider):
"""Simple extension of scrapy.Spider for KnL.
"""
name = "knlSpiritsSpider"
download_delay = 1
custom_settings = {
'COOKIES_ENABLED': 'false',
'DOWNLOAD_DELAY': str(download_delay)
}
def start_requests(self):
urls = [
'http://www.klwines.com/Products/r?r=0+4294967191&d=1&t=&o=8&z=False' # url for all spirits
]
logging.getLogger(__name__)
for url in urls:
logging.info(
'================================================================')
logging.info('scraping ' + url)
logging.info(
'================================================================')
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
def parse(self, response):
logging.getLogger(__name__)
# grab each entry listed
if response is not None:
for beer in response.css('div.result-desc'):
if beer is not None:
# id used for hashmap
id_ = beer.xpath('./a/@href').extract()
# grab the long name
beer_name = beer.xpath('./a/text()').extract()
# cleanup for json storage
id_ = ''.join(id_).strip()
id_ = id_[7:]
beer_name = ''.join(beer_name).strip()
# filter out location in the name, specific to these scraped results
if beer_name != "Redwood City" and beer_name != "Hollywood" \
and beer_name != "San Francisco" and 'Read More ' not in beer_name:
yield {
'name': beer_name,
'id': int(id_)
}
links = response.xpath(
'//div[@class="floatLeft"]/a[contains(text(),"next")]/@href').extract()
next_page = None
if links:
next_page = links[0]
if next_page is not None:
next_page = response.urljoin(next_page)
logging.info(
'================================================================')
logging.info('scraping ' + str(next_page))
logging.info(
'================================================================')
yield scrapy.Request(next_page, callback=self.parse, dont_filter=True)
| {
"content_hash": "fbd06c28e271d219ece425312f1cb1c5",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 104,
"avg_line_length": 31.22093023255814,
"alnum_prop": 0.4465549348230912,
"repo_name": "wriggityWrecked/WebScraping",
"id": "2300cd9cf6ef539256d8f6424b876f2386070612",
"size": "2685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spiders/knl_spirits_spider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "99042"
},
{
"name": "Shell",
"bytes": "138"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from suite.models import *
# Register your models here.
#class TestModelAdmin(admin.ModelAdmin):
# pass
#admin.site.register(TestModel, TestModelAdmin)
# user
class UserAdmin(admin.ModelAdmin):
pass
admin.site.register(User, UserAdmin)
# account
class AccountAdmin(admin.ModelAdmin):
pass
admin.site.register(Account, AccountAdmin)
# club
class ClubAdmin(admin.ModelAdmin):
pass
admin.site.register(Club, ClubAdmin)
# event
class EventAdmin(admin.ModelAdmin):
pass
admin.site.register(Event, EventAdmin)
# receipt
class ReceiptAdmin(admin.ModelAdmin):
pass
admin.site.register(Receipt, ReceiptAdmin)
# budget
class BudgetAdmin(admin.ModelAdmin):
pass
admin.site.register(Budget, BudgetAdmin)
# division
class DivisionAdmin(admin.ModelAdmin):
pass
admin.site.register(Division, DivisionAdmin)
# event_sign_in
class EventSignInAdmin(admin.ModelAdmin):
pass
admin.site.register(EventSignIn, EventSignInAdmin)
| {
"content_hash": "6c2994d9e3fa0e84b7cad0bc6d6ce83a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 50,
"avg_line_length": 17.660714285714285,
"alnum_prop": 0.7735085945399394,
"repo_name": "fsxfreak/club-suite",
"id": "deaed7df77a22d8b9dc575f785a225b8c35aa553",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clubsuite/suite/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63477"
},
{
"name": "HTML",
"bytes": "77966"
},
{
"name": "JavaScript",
"bytes": "2779"
},
{
"name": "Python",
"bytes": "165393"
}
],
"symlink_target": ""
} |
import os
import cgi
import utilities
import ujson as json
from datetime import datetime
def lookupFiles(urlIdentifier):
sqlCmd = """select
globalId from meetingRegistry
where urlIdentifier = %s"""
sqlData = (urlIdentifier)
resultList = utilities.dbExecution(sqlCmd, sqlData)
globalId = resultList[2][0][0]
globalId = str(globalId)
sqlCmd = """select
fileId,
fileName,
mimeType,
webViewLink,
thumbnailLink,
pageIndex from relatedFiles
where globalId = %s
order by fileName ASC"""
sqlData = (globalId)
resultList = utilities.dbExecution(sqlCmd, sqlData)
return resultList[2]
def main():
errorFound = False
passedArgs = cgi.FieldStorage()
try:
urlIdentifier = passedArgs["urlIdentifier"].value
resultObj = lookupFiles(urlIdentifier)
except:
meetingId = None
errorFound = True
resultObj = None
try:
outputObj = {}
for eachResult in resultObj:
pageIndex = eachResult[5].zfill(2)
fileObj = {}
fileObj["fileId"] = eachResult[0]
fileObj["fileName"] = eachResult[1]
fileObj["fileType"] = eachResult[2]
fileObj["webViewLink"] = eachResult[3]
fileObj["thumbnailLink"] = eachResult[4]
outputObj[pageIndex] = fileObj
except:
errorFound = True
if errorFound is True:
outputObj = {}
outputObj["error"] = "Error"
print "Content-Type: application/json\n"
print json.dumps(outputObj)
if __name__ == "__main__":
main() | {
"content_hash": "97d4f2cb61255480d437dd243e50eb57",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 52,
"avg_line_length": 20.246376811594203,
"alnum_prop": 0.7072297780959198,
"repo_name": "google/gov-meetings-made-searchable",
"id": "d5513e26c972487ed0f61a76204a0005077f0254",
"size": "2108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app-engine-front-end/ep-relatedFiles.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8777"
},
{
"name": "HTML",
"bytes": "55658"
},
{
"name": "Python",
"bytes": "104426"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AbstractUser
class SalesRep(AbstractUser):
USERNAME_FIELD = 'username'
| {
"content_hash": "c3ffde4366778c0cef4a557270282f4b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 51,
"avg_line_length": 23.2,
"alnum_prop": 0.7844827586206896,
"repo_name": "PreppyLLC-opensource/django-advanced-filters",
"id": "b1b7eadf180e30975f796bcc894dde900c500fb0",
"size": "116",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/reps/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9571"
},
{
"name": "HTML",
"bytes": "6767"
},
{
"name": "JavaScript",
"bytes": "16231"
},
{
"name": "Python",
"bytes": "79999"
}
],
"symlink_target": ""
} |
from copy import copy, deepcopy
import pytest
from pandas import MultiIndex
import pandas.util.testing as tm
def assert_multiindex_copied(copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.codes, original.codes)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.codes, original.codes)
assert copy.codes is not original.codes
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(idx):
i_copy = idx.copy()
assert_multiindex_copied(i_copy, idx)
def test_shallow_copy(idx):
i_copy = idx._shallow_copy()
assert_multiindex_copied(i_copy, idx)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
idx.copy(labels=idx.codes)
def test_view(idx):
i_view = idx.view()
assert_multiindex_copied(i_view, idx)
@pytest.mark.parametrize('func', [copy, deepcopy])
def test_copy_and_deepcopy(func):
idx = MultiIndex(
levels=[['foo', 'bar'], ['fizz', 'buzz']],
codes=[[0, 0, 0, 1], [0, 0, 1, 1]],
names=['first', 'second']
)
idx_copy = func(idx)
assert idx_copy is not idx
assert idx_copy.equals(idx)
@pytest.mark.parametrize('deep', [True, False])
def test_copy_method(deep):
idx = MultiIndex(
levels=[['foo', 'bar'], ['fizz', 'buzz']],
codes=[[0, 0, 0, 1], [0, 0, 1, 1]],
names=['first', 'second']
)
idx_copy = idx.copy(deep=deep)
assert idx_copy.equals(idx)
@pytest.mark.parametrize('deep', [True, False])
@pytest.mark.parametrize('kwarg, value', [
('names', ['thrid', 'fourth']),
('levels', [['foo2', 'bar2'], ['fizz2', 'buzz2']]),
('codes', [[1, 0, 0, 0], [1, 1, 0, 0]])
])
def test_copy_method_kwargs(deep, kwarg, value):
# gh-12309: Check that the "name" argument as well other kwargs are honored
idx = MultiIndex(
levels=[['foo', 'bar'], ['fizz', 'buzz']],
codes=[[0, 0, 0, 1], [0, 0, 1, 1]],
names=['first', 'second']
)
return
idx_copy = idx.copy(**{kwarg: value, 'deep': deep})
if kwarg == 'names':
assert getattr(idx_copy, kwarg) == value
else:
assert [list(i) for i in getattr(idx_copy, kwarg)] == value
| {
"content_hash": "5db325d212602f025e6309ff26d9f594",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 27.043956043956044,
"alnum_prop": 0.6156034132466477,
"repo_name": "cbertinato/pandas",
"id": "17e81a348f186571af612955dde5ffa397da2722",
"size": "2461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/multi/test_copy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394466"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "15010333"
},
{
"name": "Shell",
"bytes": "27209"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
def adaptive_start_index(index, input_size, output_size):
return int(np.floor(index * input_size / output_size))
def adaptive_end_index(index, input_size, output_size):
return int(np.ceil((index + 1) * input_size / output_size))
def max_pool2D_forward_naive(x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False,
data_type=np.float32):
N, C, H, W = x.shape
if global_pool == 1:
ksize = [H, W]
if adaptive:
H_out, W_out = ksize
else:
H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - 1
) // strides[0] + 1 if ceil_mode else (
H - ksize[0] + 2 * paddings[0]) // strides[0] + 1
W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - 1
) // strides[1] + 1 if ceil_mode else (
W - ksize[1] + 2 * paddings[1]) // strides[1] + 1
out = np.zeros((N, C, H_out, W_out))
for i in range(H_out):
for j in range(W_out):
if adaptive:
r_start = adaptive_start_index(i, H, ksize[0])
r_end = adaptive_end_index(i, H, ksize[0])
c_start = adaptive_start_index(j, W, ksize[1])
c_end = adaptive_end_index(j, W, ksize[1])
else:
r_start = np.max((i * strides[0] - paddings[0], 0))
r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H))
c_start = np.max((j * strides[1] - paddings[1], 0))
c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W))
x_masked = x[:, :, r_start:r_end, c_start:c_end]
out[:, :, i, j] = np.max(x_masked, axis=(2, 3))
return out
def avg_pool2D_forward_naive(x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False,
data_type=np.float32):
N, C, H, W = x.shape
if global_pool == 1:
ksize = [H, W]
if adaptive:
H_out, W_out = ksize
else:
H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - 1
) // strides[0] + 1 if ceil_mode else (
H - ksize[0] + 2 * paddings[0]) // strides[0] + 1
W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - 1
) // strides[1] + 1 if ceil_mode else (
W - ksize[1] + 2 * paddings[1]) // strides[1] + 1
out = np.zeros((N, C, H_out, W_out))
for i in range(H_out):
for j in range(W_out):
if adaptive:
r_start = adaptive_start_index(i, H, ksize[0])
r_end = adaptive_end_index(i, H, ksize[0])
c_start = adaptive_start_index(j, W, ksize[1])
c_end = adaptive_end_index(j, W, ksize[1])
else:
r_start = np.max((i * strides[0] - paddings[0], 0))
r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H))
c_start = np.max((j * strides[1] - paddings[1], 0))
c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W))
x_masked = x[:, :, r_start:r_end, c_start:c_end]
field_size = ((r_end - r_start) * (c_end - c_start)) \
if (exclusive or adaptive) else (ksize[0] * ksize[1])
if data_type == np.int8 or data_type == np.uint8:
out[:, :, i, j] = (np.rint(
np.sum(x_masked, axis=(2, 3)) /
field_size)).astype(data_type)
else:
out[:, :, i, j] = (np.sum(x_masked, axis=(2, 3)) /
field_size).astype(data_type)
return out
def pool2D_forward_naive(x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False,
data_format='NCHW',
pool_type="max",
padding_algorithm="EXPLICIT"):
# update paddings
def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding = []
for input_size, filter_size, stride_size in zip(input_shape, pool_size,
pool_stride):
out_size = int((input_size + stride_size - 1) / stride_size)
pad_sum = np.max((
(out_size - 1) * stride_size + filter_size - input_size, 0))
pad_0 = int(pad_sum / 2)
pad_1 = int(pad_sum - pad_0)
padding.append(pad_0)
padding.append(pad_1)
return padding
if isinstance(padding_algorithm, str):
padding_algorithm = padding_algorithm.upper()
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." %
str(padding_algorithm))
if padding_algorithm == "VALID":
paddings = [0, 0, 0, 0]
if ceil_mode != False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)"
" must be False. "
"Received ceil_mode: True.")
elif padding_algorithm == "SAME":
input_data_shape = []
if data_format == "NCHW":
input_data_shape = x.shape[2:4]
elif data_format == "NHWC":
input_data_shape = x.shape[1:3]
paddings = _get_padding_with_SAME(input_data_shape, ksize, strides)
assert len(paddings) == 2 or len(paddings) == 4
is_sys = True if len(paddings) == 2 else False
N = x.shape[0]
C, H, W = [x.shape[1], x.shape[2], x.shape[3]] if data_format == 'NCHW' \
else [x.shape[3], x.shape[1], x.shape[2]]
if global_pool == 1:
ksize = [H, W]
paddings = [0 for _ in range(len(paddings))]
pad_h_up = paddings[0] if is_sys else paddings[0]
pad_h_down = paddings[0] if is_sys else paddings[1]
pad_w_left = paddings[1] if is_sys else paddings[2]
pad_w_right = paddings[1] if is_sys else paddings[3]
if adaptive:
H_out, W_out = ksize
else:
H_out = (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) // strides[0] + 1 \
if ceil_mode else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1
W_out = (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) // strides[1] + 1 \
if ceil_mode else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1
out = np.zeros((N, C, H_out, W_out)) if data_format=='NCHW' \
else np.zeros((N, H_out, W_out, C))
for i in range(H_out):
if adaptive:
in_h_start = adaptive_start_index(i, H, ksize[0])
in_h_end = adaptive_end_index(i, H, ksize[0])
else:
in_h_start = np.max((i * strides[0] - pad_h_up, 0))
in_h_end = np.min((i * strides[0] + ksize[0] - pad_h_up, H))
for j in range(W_out):
if adaptive:
in_w_start = adaptive_start_index(j, W, ksize[1])
in_w_end = adaptive_end_index(j, W, ksize[1])
else:
in_w_start = np.max((j * strides[1] - pad_w_left, 0))
in_w_end = np.min((j * strides[1] + ksize[1] - pad_w_left, W))
if data_format == 'NCHW':
x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end]
if pool_type == 'avg':
field_size = ((in_h_end - in_h_start) * (in_w_end - in_w_start)) \
if (exclusive or adaptive) else (ksize[0] * ksize[1])
out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size
elif pool_type == 'max':
out[:, :, i, j] = np.max(x_masked, axis=(2, 3))
elif data_format == 'NHWC':
x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :]
if pool_type == 'avg':
field_size = ((in_h_end - in_h_start) * (in_w_end - in_w_start)) \
if (exclusive or adaptive) else (ksize[0] * ksize[1])
out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size
elif pool_type == 'max':
out[:, i, j, :] = np.max(x_masked, axis=(1, 2))
return out
class TestPool2D_Op(OpTest):
def setUp(self):
self.op_type = "pool2d"
self.use_cudnn = False
self.init_kernel_type()
self.use_mkldnn = False
self.init_data_type()
self.init_test_case()
self.padding_algorithm = "EXPLICIT"
self.init_paddings()
self.init_global_pool()
self.init_kernel_type()
self.init_pool_type()
self.init_ceil_mode()
self.init_exclusive()
self.init_adaptive()
self.init_data_format()
self.init_shape()
input = np.random.random(self.shape).astype(self.dtype)
output = pool2D_forward_naive(
input, self.ksize, self.strides, self.paddings, self.global_pool,
self.ceil_mode, self.exclusive, self.adaptive, self.data_format,
self.pool_type, self.padding_algorithm).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
self.attrs = {
'strides': self.strides,
'paddings': self.paddings,
'ksize': self.ksize,
'pooling_type': self.pool_type,
'global_pooling': self.global_pool,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'ceil_mode': self.ceil_mode,
'data_format': self.data_format,
'exclusive': self.exclusive,
'adaptive': self.adaptive,
"padding_algorithm": self.padding_algorithm,
}
self.outputs = {'Out': output}
def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self):
if self.has_cudnn():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
else:
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
if self.has_cudnn() and self.pool_type != "max":
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07)
elif self.pool_type != "max":
self.check_grad(set(['X']), 'Out', max_relative_error=0.07)
def init_data_format(self):
self.data_format = "NCHW"
def init_shape(self):
self.shape = [2, 3, 5, 5]
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [0, 0]
self.padding_algorithm = "EXPLICIT"
def init_kernel_type(self):
self.use_cudnn = False
def init_data_type(self):
self.dtype = np.float32
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = True
def init_ceil_mode(self):
self.ceil_mode = False
def init_exclusive(self):
self.exclusive = True
def init_adaptive(self):
self.adaptive = False
class TestCase1(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [0, 0]
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase2(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [1, 1]
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase3(TestPool2D_Op):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
class TestCase4(TestCase1):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
class TestCase5(TestCase2):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
#--------------------test pool2d cudnn--------------------
def create_test_cudnn_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOp")
TestCUDNNCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNCase
create_test_cudnn_class(TestPool2D_Op)
create_test_cudnn_class(TestCase1)
create_test_cudnn_class(TestCase2)
create_test_cudnn_class(TestCase3)
create_test_cudnn_class(TestCase4)
create_test_cudnn_class(TestCase5)
#--------------------test pool2d cudnn_fp16--------------------
def create_test_cudnn_fp16_class(parent, check_grad=True):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNFp16Case(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(
place) and self.pool_type != "max" and check_grad:
self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07)
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op")
TestCUDNNFp16Case.__name__ = cls_name
globals()[cls_name] = TestCUDNNFp16Case
create_test_cudnn_fp16_class(TestPool2D_Op)
create_test_cudnn_fp16_class(TestCase1, check_grad=False)
create_test_cudnn_fp16_class(TestCase2)
create_test_cudnn_fp16_class(TestCase3)
create_test_cudnn_fp16_class(TestCase4)
create_test_cudnn_fp16_class(TestCase5)
#--------------------test pool2d use ceil mode--------------------
def create_test_cudnn_use_ceil_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestPool2DUseCeilCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
def init_ceil_mode(self):
self.ceil_mode = True
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOpCeilMode")
TestPool2DUseCeilCase.__name__ = cls_name
globals()[cls_name] = TestPool2DUseCeilCase
create_test_cudnn_use_ceil_class(TestPool2D_Op)
create_test_cudnn_use_ceil_class(TestCase1)
def create_test_use_ceil_class(parent):
class TestPool2DUseCeilCase(parent):
def init_ceil_mode(self):
self.ceil_mode = True
cls_name = "{0}_{1}".format(parent.__name__, "CeilModeCast")
TestPool2DUseCeilCase.__name__ = cls_name
globals()[cls_name] = TestPool2DUseCeilCase
create_test_use_ceil_class(TestCase1)
create_test_use_ceil_class(TestCase2)
class TestAvgInclude(TestCase2):
def init_exclusive(self):
self.exclusive = False
class TestCUDNNAvgInclude(TestCase2):
def init_kernel_type(self):
self.use_cudnn = True
def init_exclusive(self):
self.exclusive = False
class TestAvgPoolAdaptive(TestCase1):
def init_adaptive(self):
self.adaptive = True
#-------test pool2d with asymmetric padding-----
class TestPool2D_AsyPadding(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 2]
def init_shape(self):
self.shape = [2, 3, 5, 5]
class TestCase1_AsyPadding(TestCase1):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 0]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase2_AsyPadding(TestCase2):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 2, 1, 2]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase3_AsyPadding(TestCase3):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 2]
def init_shape(self):
self.shape = [2, 3, 5, 5]
class TestCase4_AsyPadding(TestCase4):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 0]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase5_AsyPadding((TestCase5)):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [2, 2, 1, 2]
def init_shape(self):
self.shape = [2, 3, 7, 7]
create_test_cudnn_class(TestPool2D_AsyPadding)
create_test_cudnn_class(TestCase1_AsyPadding)
create_test_cudnn_class(TestCase2_AsyPadding)
create_test_cudnn_class(TestCase3_AsyPadding)
create_test_cudnn_class(TestCase4_AsyPadding)
create_test_cudnn_class(TestCase5_AsyPadding)
create_test_cudnn_fp16_class(TestPool2D_AsyPadding)
create_test_cudnn_fp16_class(TestCase1_AsyPadding, check_grad=False)
create_test_cudnn_fp16_class(TestCase2_AsyPadding)
create_test_cudnn_fp16_class(TestCase3_AsyPadding)
create_test_cudnn_fp16_class(TestCase4_AsyPadding)
create_test_cudnn_fp16_class(TestCase5_AsyPadding)
create_test_cudnn_use_ceil_class(TestPool2D_AsyPadding)
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding)
create_test_use_ceil_class(TestCase1_AsyPadding)
create_test_use_ceil_class(TestCase2_AsyPadding)
class TestAvgInclude_AsyPadding(TestCase2):
def init_exclusive(self):
self.exclusive = False
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 2, 1, 2]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCUDNNAvgInclude_AsyPadding(TestCase2):
def init_kernel_type(self):
self.use_cudnn = True
def init_exclusive(self):
self.exclusive = False
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [2, 1, 1, 1]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestAvgPoolAdaptive_AsyPadding(TestCase1):
def init_adaptive(self):
self.adaptive = True
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 1, 0, 2]
def init_shape(self):
self.shape = [2, 3, 7, 7]
#----------- test channel_last --------------
class TestPool2D_channel_last(TestPool2D_Op):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase1_channel_last(TestCase1):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase2_channel_last(TestCase2):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase3_channel_last(TestCase3):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase4_channel_last(TestCase4):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase5_channel_last(TestCase5):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
create_test_cudnn_class(TestPool2D_channel_last)
create_test_cudnn_class(TestCase1_channel_last)
create_test_cudnn_class(TestCase2_channel_last)
create_test_cudnn_class(TestCase3_channel_last)
create_test_cudnn_class(TestCase4_channel_last)
create_test_cudnn_class(TestCase5_channel_last)
create_test_cudnn_fp16_class(TestPool2D_channel_last)
create_test_cudnn_fp16_class(TestCase1_channel_last, check_grad=False)
create_test_cudnn_fp16_class(TestCase2_channel_last)
create_test_cudnn_fp16_class(TestCase3_channel_last)
create_test_cudnn_fp16_class(TestCase4_channel_last)
create_test_cudnn_fp16_class(TestCase5_channel_last)
create_test_cudnn_use_ceil_class(TestPool2D_channel_last)
create_test_cudnn_use_ceil_class(TestCase1_channel_last)
create_test_use_ceil_class(TestCase1_channel_last)
create_test_use_ceil_class(TestCase2_channel_last)
class TestCase5_Max(TestCase2):
def init_pool_type(self):
self.pool_type = "max"
def test_check_grad(self):
if self.dtype == np.float16:
return
if self.has_cudnn() and self.pool_type == "max":
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=1.00)
elif self.pool_type == "max":
self.check_grad(set(['X']), 'Out', max_relative_error=1.00)
class TestCase5_channel_last_Max(TestCase5_Max):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
create_test_cudnn_class(TestCase5_Max)
create_test_cudnn_class(TestCase5_channel_last_Max)
class TestAvgInclude_channel_last(TestCase2_channel_last):
def init_exclusive(self):
self.exclusive = False
class TestCUDNNAvgInclude_channel_last(TestCase2_channel_last):
def init_kernel_type(self):
self.use_cudnn = True
def init_exclusive(self):
self.exclusive = False
class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last):
def init_adaptive(self):
self.adaptive = True
class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
create_test_cudnn_class(TestPool2D_AsyPadding_channel_last)
create_test_cudnn_class(TestCase1_AsyPadding_channel_last)
create_test_cudnn_class(TestCase2_AsyPadding_channel_last)
create_test_cudnn_class(TestCase3_AsyPadding_channel_last)
create_test_cudnn_class(TestCase4_AsyPadding_channel_last)
create_test_cudnn_class(TestCase5_AsyPadding_channel_last)
create_test_cudnn_fp16_class(TestPool2D_AsyPadding_channel_last)
create_test_cudnn_fp16_class(
TestCase1_AsyPadding_channel_last, check_grad=False)
create_test_cudnn_fp16_class(TestCase2_AsyPadding_channel_last)
create_test_cudnn_fp16_class(TestCase3_AsyPadding_channel_last)
create_test_cudnn_fp16_class(TestCase4_AsyPadding_channel_last)
create_test_cudnn_fp16_class(TestCase5_AsyPadding_channel_last)
create_test_cudnn_use_ceil_class(TestPool2D_AsyPadding_channel_last)
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding_channel_last)
create_test_use_ceil_class(TestCase1_AsyPadding_channel_last)
create_test_use_ceil_class(TestCase2_AsyPadding_channel_last)
class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCUDNNAvgInclude_AsyPadding_channel_last(
TestCUDNNAvgInclude_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestAvgPoolAdaptive_AsyPadding_channel_last(
TestAvgPoolAdaptive_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
# test paddings: SAME VALID
def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent):
def init_paddings(self):
self.paddings = [0, 0]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
TestPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestPaddingSMAECase
create_test_padding_SAME_class(TestPool2D_Op)
create_test_padding_SAME_class(TestCase1)
create_test_padding_SAME_class(TestCase2)
create_test_padding_SAME_class(TestCase3)
create_test_padding_SAME_class(TestCase4)
create_test_padding_SAME_class(TestCase5)
create_test_padding_SAME_class(TestPool2D_channel_last)
create_test_padding_SAME_class(TestCase1_channel_last)
create_test_padding_SAME_class(TestCase2_channel_last)
create_test_padding_SAME_class(TestCase3_channel_last)
create_test_padding_SAME_class(TestCase4_channel_last)
create_test_padding_SAME_class(TestCase5_channel_last)
def create_test_cudnn_padding_SAME_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingSMAECase(parent):
def init_kernel_type(self):
self.use_cudnn = True
def init_paddings(self):
self.paddings = [1, 1]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
TestCUDNNPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingSMAECase
create_test_cudnn_padding_SAME_class(TestPool2D_Op)
create_test_cudnn_padding_SAME_class(TestCase1)
create_test_cudnn_padding_SAME_class(TestCase2)
create_test_cudnn_padding_SAME_class(TestCase3)
create_test_cudnn_padding_SAME_class(TestCase4)
create_test_cudnn_padding_SAME_class(TestCase5)
create_test_cudnn_padding_SAME_class(TestPool2D_channel_last)
create_test_cudnn_padding_SAME_class(TestCase1_channel_last)
create_test_cudnn_padding_SAME_class(TestCase2_channel_last)
create_test_cudnn_padding_SAME_class(TestCase3_channel_last)
create_test_cudnn_padding_SAME_class(TestCase4_channel_last)
create_test_cudnn_padding_SAME_class(TestCase5_channel_last)
def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent):
def init_paddings(self):
self.paddings = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
TestPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestPaddingVALIDCase
create_test_padding_VALID_class(TestPool2D_Op)
create_test_padding_VALID_class(TestCase1)
create_test_padding_VALID_class(TestCase2)
create_test_padding_VALID_class(TestCase3)
create_test_padding_VALID_class(TestCase4)
create_test_padding_VALID_class(TestCase5)
create_test_padding_VALID_class(TestPool2D_channel_last)
create_test_padding_VALID_class(TestCase1_channel_last)
create_test_padding_VALID_class(TestCase2_channel_last)
create_test_padding_VALID_class(TestCase3_channel_last)
create_test_padding_VALID_class(TestCase4_channel_last)
create_test_padding_VALID_class(TestCase5_channel_last)
def create_test_cudnn_padding_VALID_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingVALIDCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
def init_paddings(self):
self.paddings = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
TestCUDNNPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingVALIDCase
create_test_cudnn_padding_VALID_class(TestPool2D_Op)
create_test_cudnn_padding_VALID_class(TestCase1)
create_test_cudnn_padding_VALID_class(TestCase2)
create_test_cudnn_padding_VALID_class(TestCase3)
create_test_cudnn_padding_VALID_class(TestCase4)
create_test_cudnn_padding_VALID_class(TestCase5)
create_test_cudnn_padding_VALID_class(TestPool2D_channel_last)
create_test_cudnn_padding_VALID_class(TestCase1_channel_last)
create_test_cudnn_padding_VALID_class(TestCase2_channel_last)
create_test_cudnn_padding_VALID_class(TestCase3_channel_last)
create_test_cudnn_padding_VALID_class(TestCase4_channel_last)
create_test_cudnn_padding_VALID_class(TestCase5_channel_last)
# ----- test API
class TestPool2dAPI(OpTest):
def test_api(self):
x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32")
x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32")
input_NHWC = fluid.layers.data(
name="input_NHWC",
shape=[2, 5, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCHW = fluid.layers.data(
name="input_NCHW",
shape=[2, 3, 5, 5],
append_batch_size=False,
dtype="float32")
input_NHWC_negetive = fluid.layers.data(
name="input_NHWC_negetive",
shape=[2, -1, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCHW_negetive = fluid.layers.data(
name="input_NCHW_negetive",
shape=[2, 3, -1, -1],
append_batch_size=False,
dtype="float32")
ksize = [3, 3]
out_1 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding=[1, 1],
use_cudnn=False,
data_format="NHWC")
out_2 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="avg",
pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
use_cudnn=False,
data_format="NHWC")
out_3 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=ksize,
pool_type="avg",
pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]],
use_cudnn=False,
data_format="NCHW")
out_4 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=ksize,
pool_type="avg",
pool_padding=[1, 2, 1, 0],
use_cudnn=False,
data_format="NCHW")
# test VALID
out_5 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=ksize,
pool_type="avg",
pool_padding="VALID",
use_cudnn=False,
data_format="NCHW")
out_6 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding="VALID",
use_cudnn=False,
data_format="NHWC")
# test SAME
out_7 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=[4, 4],
pool_type="avg",
pool_padding="SAME",
use_cudnn=False,
data_format="NCHW")
out_8 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=[4, 4],
pool_type="max",
pool_padding="SAME",
use_cudnn=False,
data_format="NHWC")
# test negetive
out_9 = fluid.layers.pool2d(
input=input_NHWC_negetive,
pool_size=ksize,
pool_type="avg",
pool_padding=[0, 0],
use_cudnn=False,
data_format="NHWC")
assert out_9.shape == (2, -1, 3, 3)
out_10 = fluid.layers.pool2d(
input=input_NCHW_negetive,
pool_size=ksize,
pool_type="avg",
pool_padding=[0, 0],
use_cudnn=False,
data_format="NCHW")
assert out_10.shape == (2, 3, -1, -1)
exe = fluid.Executor(place=fluid.CPUPlace())
[res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8] = exe.run(
fluid.default_main_program(),
feed={
"input_NHWC": x_NHWC,
"input_NCHW": x_NCHW,
"input_NHWC_negetive": x_NHWC,
"input_NCHW_negetive": x_NCHW
},
fetch_list=[
out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8
])
assert np.allclose(
res_1,
pool2D_forward_naive(
x=x_NHWC,
ksize=ksize,
pool_type="max",
strides=[1, 1],
paddings=[1, 1],
data_format="NHWC"))
assert np.allclose(
res_2,
pool2D_forward_naive(
x=x_NHWC,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[1, 1, 1, 1],
data_format="NHWC"))
assert np.allclose(
res_3,
pool2D_forward_naive(
x=x_NCHW,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[1, 1, 1, 1],
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
assert np.allclose(
res_4,
pool2D_forward_naive(
x=x_NCHW,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[1, 2, 1, 0],
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
# VALID
assert np.allclose(
res_5,
pool2D_forward_naive(
x=x_NCHW,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[10, 20], # any ele is ok
padding_algorithm="VALID",
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
assert np.allclose(
res_6,
pool2D_forward_naive(
x=x_NHWC,
ksize=ksize,
pool_type="max",
strides=[1, 1],
paddings=[10, 20],
padding_algorithm="VALID",
data_format="NHWC"))
# SAME
assert np.allclose(
res_7,
pool2D_forward_naive(
x=x_NCHW,
ksize=[4, 4],
pool_type="avg",
strides=[1, 1],
paddings=[10, 20],
padding_algorithm="SAME",
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
assert np.allclose(
res_8,
pool2D_forward_naive(
x=x_NHWC,
ksize=[4, 4],
pool_type="max",
strides=[1, 1],
paddings=[10, 20],
padding_algorithm="SAME",
data_format="NHWC"))
class TestPool2dAPI_Error(OpTest):
def test_api(self):
input_NHWC = fluid.layers.data(
name="input_NHWC",
shape=[2, 5, 5, 3],
append_batch_size=False,
dtype="float32")
ksize = [3, 3]
# cudnn value error
def run_1():
out_1 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding=[1, 1],
use_cudnn=[0],
data_format="NHWC")
self.assertRaises(ValueError, run_1)
# data_format value error
def run_2():
out_2 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding=[1, 1],
use_cudnn=False,
data_format="NHWCC")
self.assertRaises(ValueError, run_2)
# padding str value error
def run_3():
out_3 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding="VALIDSAME",
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_3)
# padding str valid and ceil_mode value error
def run_4():
out_4 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding="VALID",
use_cudnn=False,
ceil_mode=True,
data_format="NHWC")
self.assertRaises(ValueError, run_4)
# padding with 8 ele. value error
def run_5():
out_5 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1]],
use_cudnn=False,
data_format="NHWC")
self.assertRaises(ValueError, run_5)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e3ef5ee5fedc5aea97522090b8090e78",
"timestamp": "",
"source": "github",
"line_count": 1231,
"max_line_length": 94,
"avg_line_length": 31.751421608448418,
"alnum_prop": 0.5600214910709717,
"repo_name": "chengduoZH/Paddle",
"id": "d5cc142b2a856cce93c8f8ef8f1bdf47f3a8e9d7",
"size": "39699",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_pool2d_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10146609"
},
{
"name": "CMake",
"bytes": "291349"
},
{
"name": "Cuda",
"bytes": "1192566"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7124331"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
} |
import os
from datetime import datetime
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.forms.formsets import BaseFormSet, formset_factory
from django.utils.translation import ugettext, ugettext_lazy as _, ungettext
import six
import waffle
from six.moves.urllib_parse import urlsplit
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.activity.models import ActivityLog
from olympia.addons import tasks as addons_tasks
from olympia.addons.models import (
Addon, AddonCategory, Category, DeniedSlug, Persona)
from olympia.addons.widgets import CategoriesSelectMultiple, IconTypeSelect
from olympia.addons.utils import verify_mozilla_trademark
from olympia.amo.fields import (
ColorField, HttpHttpsOnlyURLField, ReCaptchaField)
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import (
remove_icons, slug_validator, slugify, sorted_groupby)
from olympia.amo.validators import OneOrMoreLetterOrNumberCharacterValidator
from olympia.devhub import tasks as devhub_tasks
from olympia.devhub.utils import (
fetch_existing_translations_from_addon, get_addon_akismet_reports)
from olympia.tags.models import Tag
from olympia.translations import LOCALES
from olympia.translations.fields import TransField, TransTextarea
from olympia.translations.forms import TranslationFormMixin
from olympia.translations.models import Translation
from olympia.translations.utils import transfield_changed
from olympia.users.models import UserEmailField
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.addons')
def clean_addon_slug(slug, instance):
slug_validator(slug, lower=False)
if slug != instance.slug:
if Addon.objects.filter(slug=slug).exists():
raise forms.ValidationError(ugettext(
'This slug is already in use. Please choose another.'))
if DeniedSlug.blocked(slug):
msg = ugettext(u'The slug cannot be "%(slug)s". '
u'Please choose another.')
raise forms.ValidationError(msg % {'slug': slug})
return slug
def clean_tags(request, tags):
target = [slugify(t, spaces=True, lower=True) for t in tags.split(',')]
target = set(filter(None, target))
min_len = amo.MIN_TAG_LENGTH
max_len = Tag._meta.get_field('tag_text').max_length
max_tags = amo.MAX_TAGS
total = len(target)
denied = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, denied=True))
if denied:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ungettext('Invalid tag: {0}', 'Invalid tags: {0}',
len(denied)).format(', '.join(denied))
raise forms.ValidationError(msg)
restricted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, restricted=True))
if not acl.action_allowed(request, amo.permissions.ADDONS_EDIT):
if restricted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ungettext('"{0}" is a reserved tag and cannot be used.',
'"{0}" are reserved tags and cannot be used.',
len(restricted)).format('", "'.join(restricted))
raise forms.ValidationError(msg)
else:
# Admin's restricted tags don't count towards the limit.
total = len(target - set(restricted))
if total > max_tags:
num = total - max_tags
msg = ungettext('You have {0} too many tags.',
'You have {0} too many tags.', num).format(num)
raise forms.ValidationError(msg)
if any(t for t in target if len(t) > max_len):
raise forms.ValidationError(
ugettext(
'All tags must be %s characters or less after invalid '
'characters are removed.' % max_len))
if any(t for t in target if len(t) < min_len):
msg = ungettext('All tags must be at least {0} character.',
'All tags must be at least {0} characters.',
min_len).format(min_len)
raise forms.ValidationError(msg)
return target
class AkismetSpamCheckFormMixin(object):
fields_to_akismet_comment_check = []
def clean(self):
data = {
prop: value for prop, value in self.cleaned_data.items()
if prop in self.fields_to_akismet_comment_check}
request_meta = getattr(self.request, 'META', {})
# Find out if there is existing metadata that's been spam checked.
addon_listed_versions = self.instance.versions.filter(
channel=amo.RELEASE_CHANNEL_LISTED)
if self.version:
# If this is in the submission flow, exclude version in progress.
addon_listed_versions = addon_listed_versions.exclude(
id=self.version.id)
existing_data = (
fetch_existing_translations_from_addon(
self.instance, self.fields_to_akismet_comment_check)
if addon_listed_versions.exists() else ())
reports = get_addon_akismet_reports(
user=getattr(self.request, 'user', None),
user_agent=request_meta.get('HTTP_USER_AGENT'),
referrer=request_meta.get('HTTP_REFERER'),
addon=self.instance,
data=data,
existing_data=existing_data)
error_msg = ugettext('The text entered has been flagged as spam.')
error_if_spam = waffle.switch_is_active('akismet-addon-action')
for prop, report in reports:
is_spam = report.is_spam
if error_if_spam and is_spam:
self.add_error(prop, forms.ValidationError(error_msg))
return super(AkismetSpamCheckFormMixin, self).clean()
class AddonFormBase(TranslationFormMixin, forms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.version = kw.pop('version', None)
super(AddonFormBase, self).__init__(*args, **kw)
for field in ('name', 'summary'):
if field in self.fields:
self.fields[field].validators.append(
OneOrMoreLetterOrNumberCharacterValidator())
class Meta:
models = Addon
fields = ('name', 'slug', 'summary', 'tags')
def clean_slug(self):
return clean_addon_slug(self.cleaned_data['slug'], self.instance)
def clean_name(self):
user = getattr(self.request, 'user', None)
name = verify_mozilla_trademark(
self.cleaned_data['name'], user,
form=self)
return name
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if acl.action_allowed(self.request, amo.permissions.ADDONS_EDIT):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
class CategoryForm(forms.Form):
application = forms.TypedChoiceField(amo.APPS_CHOICES, coerce=int,
widget=forms.HiddenInput,
required=True)
categories = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(), widget=CategoriesSelectMultiple)
def save(self, addon):
application = self.cleaned_data.get('application')
categories_new = [c.id for c in self.cleaned_data['categories']]
categories_old = [
c.id for c in
addon.app_categories.get(amo.APP_IDS[application].short, [])]
# Add new categories.
for c_id in set(categories_new) - set(categories_old):
AddonCategory(addon=addon, category_id=c_id).save()
# Remove old categories.
for c_id in set(categories_old) - set(categories_new):
AddonCategory.objects.filter(
addon=addon, category_id=c_id).delete()
# Remove old, outdated categories cache on the model.
del addon.all_categories
# Make sure the add-on is properly re-indexed
addons_tasks.index_addons.delay([addon.id])
def clean_categories(self):
categories = self.cleaned_data['categories']
total = categories.count()
max_cat = amo.MAX_CATEGORIES
if getattr(self, 'disabled', False) and total:
raise forms.ValidationError(ugettext(
'Categories cannot be changed while your add-on is featured '
'for this application.'))
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ungettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
has_misc = filter(lambda x: x.misc, categories)
if has_misc and total > 1:
raise forms.ValidationError(ugettext(
'The miscellaneous category cannot be combined with '
'additional categories.'))
return categories
class BaseCategoryFormSet(BaseFormSet):
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.request = kw.pop('request', None)
super(BaseCategoryFormSet, self).__init__(*args, **kw)
self.initial = []
apps = sorted(self.addon.compatible_apps.keys(), key=lambda x: x.id)
# Drop any apps that don't have appropriate categories.
qs = Category.objects.filter(type=self.addon.type)
app_cats = {k: list(v) for k, v in sorted_groupby(qs, 'application')}
for app in list(apps):
if app and not app_cats.get(app.id):
apps.remove(app)
if not app_cats:
apps = []
for app in apps:
cats = self.addon.app_categories.get(app.short, [])
self.initial.append({'categories': [c.id for c in cats]})
for app, form in zip(apps, self.forms):
key = app.id if app else None
form.request = self.request
form.initial['application'] = key
form.app = app
cats = sorted(app_cats[key], key=lambda x: x.name)
form.fields['categories'].choices = [(c.id, c.name) for c in cats]
# If this add-on is featured for this application, category
# changes are forbidden.
if not acl.action_allowed(self.request,
amo.permissions.ADDONS_EDIT):
form.disabled = (app and self.addon.is_featured(app))
def save(self):
for f in self.forms:
f.save(self.addon)
CategoryFormSet = formset_factory(form=CategoryForm,
formset=BaseCategoryFormSet, extra=0)
def icons():
"""
Generates a list of tuples for the default icons for add-ons,
in the format (pseudo-mime-type, description).
"""
icons = [('image/jpeg', 'jpeg'), ('image/png', 'png'), ('', 'default')]
dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH)
for fname in files:
if b'32' in fname and b'default' not in fname:
icon_name = fname.split(b'-')[0]
icons.append(('icon/%s' % icon_name, icon_name))
return sorted(icons)
class AddonFormMedia(AddonFormBase):
icon_type = forms.CharField(widget=IconTypeSelect(
choices=[]), required=False)
icon_upload_hash = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('icon_upload_hash', 'icon_type')
def __init__(self, *args, **kwargs):
super(AddonFormMedia, self).__init__(*args, **kwargs)
# Add icons here so we only read the directory when
# AddonFormMedia is actually being used.
self.fields['icon_type'].widget.choices = icons()
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
devhub_tasks.resize_icon.delay(
upload_path, destination, amo.ADDON_ICON_SIZES,
set_modified_on=addon.serializable_reference())
return super(AddonFormMedia, self).save(commit)
class AdditionalDetailsForm(AddonFormBase):
default_locale = forms.TypedChoiceField(choices=LOCALES)
homepage = TransField.adapt(HttpHttpsOnlyURLField)(required=False)
tags = forms.CharField(required=False)
contributions = HttpHttpsOnlyURLField(required=False, max_length=255)
class Meta:
model = Addon
fields = ('default_locale', 'homepage', 'tags', 'contributions')
def __init__(self, *args, **kw):
super(AdditionalDetailsForm, self).__init__(*args, **kw)
if self.fields.get('tags'):
self.fields['tags'].initial = ', '.join(
self.get_tags(self.instance))
def clean_contributions(self):
if self.cleaned_data['contributions']:
hostname = urlsplit(self.cleaned_data['contributions']).hostname
if not hostname.endswith(amo.VALID_CONTRIBUTION_DOMAINS):
raise forms.ValidationError(ugettext(
'URL domain must be one of [%s], or a subdomain.'
) % ', '.join(amo.VALID_CONTRIBUTION_DOMAINS))
return self.cleaned_data['contributions']
def clean(self):
# Make sure we have the required translations in the new locale.
required = 'name', 'summary', 'description'
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = self.cleaned_data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
if missing:
raise forms.ValidationError(ugettext(
'Before changing your default locale you must have a '
'name, summary, and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return super(AdditionalDetailsForm, self).clean()
def save(self, addon, commit=False):
if self.fields.get('tags'):
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AdditionalDetailsForm, self).save(commit=False)
addonform.save()
return addonform
class AdditionalDetailsFormUnlisted(AdditionalDetailsForm):
# We want the same fields as the listed version. In particular,
# default_locale is referenced in the template and needs to exist.
pass
class AddonFormTechnical(AddonFormBase):
developer_comments = TransField(widget=TransTextarea, required=False)
class Meta:
model = Addon
fields = ('developer_comments', 'view_source', 'public_stats')
class AddonFormTechnicalUnlisted(AddonFormBase):
class Meta:
model = Addon
fields = ()
class AbuseForm(forms.Form):
recaptcha = ReCaptchaField(label='')
text = forms.CharField(required=True,
label='',
widget=forms.Textarea())
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(AbuseForm, self).__init__(*args, **kwargs)
if (not self.request.user.is_anonymous or
not settings.NOBOT_RECAPTCHA_PRIVATE_KEY):
del self.fields['recaptcha']
class ThemeFormBase(AddonFormBase):
def __init__(self, *args, **kwargs):
super(ThemeFormBase, self).__init__(*args, **kwargs)
cats = Category.objects.filter(type=amo.ADDON_PERSONA, weight__gte=0)
cats = sorted(cats, key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
for field in ('header', ):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.upload_persona',
args=['persona_%s' % field]),
'data-allowed-types': amo.SUPPORTED_IMAGE_TYPES
}
class ThemeForm(ThemeFormBase):
name = forms.CharField(max_length=50)
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 4}),
max_length=500, required=False)
tags = forms.CharField(required=False)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES,
coerce=int, empty_value=None, widget=forms.HiddenInput,
error_messages={'required': _(u'A license must be selected.')})
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput)
# Native color picker doesn't allow real time tracking of user input
# and empty values, thus force the JavaScript color picker for now.
# See bugs 1005206 and 1003575.
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
agreed = forms.BooleanField()
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors. It's really clever.
unsaved_data = forms.CharField(required=False, widget=forms.HiddenInput)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def save(self, commit=False):
data = self.cleaned_data
addon = Addon.objects.create(
slug=data.get('slug'),
status=amo.STATUS_PENDING, type=amo.ADDON_PERSONA)
addon.name = {'en-US': data['name']}
if data.get('description'):
addon.description = data['description']
addon._current_version = Version.objects.create(addon=addon,
version='0')
addon.save()
# Create Persona instance.
p = Persona()
p.persona_id = 0
p.addon = addon
p.header = 'header.png'
if data['accentcolor']:
p.accentcolor = data['accentcolor'].lstrip('#')
if data['textcolor']:
p.textcolor = data['textcolor'].lstrip('#')
p.license = data['license']
p.submit = datetime.now()
user = self.request.user
p.author = user.username
p.display_username = user.name
p.save()
# Save header and preview images.
addons_tasks.save_theme.delay(data['header_hash'], addon.pk)
# Save user info.
addon.addonuser_set.create(user=user, role=amo.AUTHOR_ROLE_OWNER)
# Save tags.
for t in data['tags']:
Tag(tag_text=t).save_tag(addon)
# Save categories.
AddonCategory(addon=addon, category=data['category']).save()
return addon
class EditThemeForm(AddonFormBase):
name = TransField(max_length=50, label=_('Give Your Theme a Name.'))
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = TransField(
widget=TransTextarea(attrs={'rows': 4}),
max_length=500, required=False, label=_('Describe your Theme.'))
tags = forms.CharField(required=False)
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES, coerce=int, empty_value=None,
widget=forms.HiddenInput,
error_messages={'required': _(u'A license must be selected.')})
# Theme re-upload.
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput, required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonFormBase, self).__init__(*args, **kw)
addon = Addon.objects.get(id=self.instance.id)
persona = addon.persona
# Allow theme artists to localize Name and Description.
for trans in Translation.objects.filter(id=self.initial['name']):
self.initial['name_' + trans.locale.lower()] = trans
for trans in Translation.objects.filter(
id=self.initial['description']):
self.initial['description_' + trans.locale.lower()] = trans
self.old_tags = self.get_tags(addon)
self.initial['tags'] = ', '.join(self.old_tags)
if persona.accentcolor:
self.initial['accentcolor'] = '#' + persona.accentcolor
if persona.textcolor:
self.initial['textcolor'] = '#' + persona.textcolor
self.initial['license'] = persona.license
cats = sorted(Category.objects.filter(type=amo.ADDON_PERSONA,
weight__gte=0),
key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
try:
self.initial['category'] = addon.categories.values_list(
'id', flat=True)[0]
except IndexError:
pass
for field in ('header', ):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.reupload_persona',
args=[addon.slug,
'persona_%s' % field]),
'data-allowed-types': amo.SUPPORTED_IMAGE_TYPES
}
def clean_slug(self):
return clean_addon_slug(self.cleaned_data['slug'], self.instance)
def save(self):
addon = self.instance
persona = addon.persona
data = self.cleaned_data
# Update Persona-specific data.
persona_data = {
'license': int(data['license']),
'accentcolor': data['accentcolor'].lstrip('#'),
'textcolor': data['textcolor'].lstrip('#'),
'author': self.request.user.username,
'display_username': self.request.user.name
}
changed = False
for k, v in six.iteritems(persona_data):
if v != getattr(persona, k):
changed = True
setattr(persona, k, v)
if changed:
persona.save()
if self.changed_data:
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
self.instance.modified = datetime.now()
# Update Addon-specific data.
changed = (
set(self.old_tags) != data['tags'] or # Check if tags changed.
self.initial['slug'] != data['slug'] or # Check if slug changed.
transfield_changed('description', self.initial, data) or
transfield_changed('name', self.initial, data))
if changed:
# Only save if addon data changed.
super(EditThemeForm, self).save()
# Update tags.
tags_new = data['tags']
tags_old = [slugify(t, spaces=True) for t in self.old_tags]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# Update category.
if data['category'].id != self.initial['category']:
addon_cat = addon.addoncategory_set.all()[0]
addon_cat.category = data['category']
addon_cat.save()
# Theme reupload.
if not addon.is_pending():
if data['header_hash']:
addons_tasks.save_theme_reupload.delay(
data['header_hash'], addon.pk)
return data
class EditThemeOwnerForm(forms.Form):
owner = UserEmailField()
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(EditThemeOwnerForm, self).__init__(*args, **kw)
addon = self.instance
self.fields['owner'].widget.attrs['placeholder'] = _(
"Enter a new author's email address")
try:
self.instance_addonuser = addon.addonuser_set.all()[0]
self.initial['owner'] = self.instance_addonuser.user.email
except IndexError:
# If there was never an author before, then don't require one now.
self.instance_addonuser = None
self.fields['owner'].required = False
def save(self):
data = self.cleaned_data
if data.get('owner'):
changed = (not self.instance_addonuser or
self.instance_addonuser != data['owner'])
if changed:
# Update Persona-specific data.
persona = self.instance.persona
persona.author = data['owner'].username
persona.display_username = data['owner'].name
persona.save()
if not self.instance_addonuser:
# If there previously never another owner, create one.
self.instance.addonuser_set.create(user=data['owner'],
role=amo.AUTHOR_ROLE_OWNER)
elif self.instance_addonuser != data['owner']:
# If the owner has changed, update the `AddonUser` object.
self.instance_addonuser.user = data['owner']
self.instance_addonuser.role = amo.AUTHOR_ROLE_OWNER
self.instance_addonuser.save()
self.instance.modified = datetime.now()
self.instance.save()
return data
| {
"content_hash": "47d3ded6332ce93f22c9b0169f5c9f7f",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 78,
"avg_line_length": 38.44569816643159,
"alnum_prop": 0.5974759703573262,
"repo_name": "aviarypl/mozilla-l10n-addons-server",
"id": "762a1b2a72791ff8f220a3b9eb73a60bab9588d4",
"size": "27258",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/olympia/addons/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "809734"
},
{
"name": "Dockerfile",
"bytes": "2898"
},
{
"name": "HTML",
"bytes": "515798"
},
{
"name": "JavaScript",
"bytes": "1070508"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "10596"
},
{
"name": "Python",
"bytes": "5462821"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "8821"
},
{
"name": "Smarty",
"bytes": "1388"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_organization_slug'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='slug',
field=models.SlugField(unique=True),
),
]
| {
"content_hash": "f8384f64345ba8da06d64d00617abcbf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 20.9375,
"alnum_prop": 0.573134328358209,
"repo_name": "CobwebOrg/cobweb-django",
"id": "1381a2ba075f79902e9149f929273bda80fd3170",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/migrations/0008_auto_20180907_1631.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212767"
},
{
"name": "Dockerfile",
"bytes": "760"
},
{
"name": "HTML",
"bytes": "48752"
},
{
"name": "JavaScript",
"bytes": "17323"
},
{
"name": "Python",
"bytes": "282001"
},
{
"name": "Shell",
"bytes": "1692"
}
],
"symlink_target": ""
} |
import os
import yaml
IDF_PATH = os.getenv("IDF_PATH")
if not IDF_PATH:
print("Please set IDF_PATH before running this script")
raise SystemExit(-1)
GITLAB_CONFIG_FILE = os.path.join(os.getenv("IDF_PATH"), ".gitlab-ci.yml")
def check_artifacts_expire_time():
with open(GITLAB_CONFIG_FILE, "r") as f:
config = yaml.load(f)
errors = []
print("expire time for jobs:")
job_names = list(config.keys())
job_names.sort()
for job_name in job_names:
if job_name.startswith("."):
# skip ignored jobs
continue
try:
if "expire_in" not in config[job_name]["artifacts"]:
errors.append(job_name)
else:
print("{}: {}".format(job_name, config[job_name]["artifacts"]["expire_in"]))
except (KeyError, TypeError):
# this is not job, or the job does not have artifacts
pass
if errors:
print("\n\nThe following jobs did not set expire time for its artifacts")
for error in errors:
print(error)
raise SystemExit(-2)
if __name__ == '__main__':
check_artifacts_expire_time()
| {
"content_hash": "805c5e86a5e4e335484112af23147a07",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 92,
"avg_line_length": 25.02127659574468,
"alnum_prop": 0.5756802721088435,
"repo_name": "krzychb/rtd-test-bed",
"id": "3eb06c177c6ec5fd7af2425e4fceca51b9cc131f",
"size": "1268",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/ci/check_artifacts_expire_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "248929"
},
{
"name": "Batchfile",
"bytes": "9428"
},
{
"name": "C",
"bytes": "42611901"
},
{
"name": "C++",
"bytes": "10437923"
},
{
"name": "CMake",
"bytes": "316611"
},
{
"name": "CSS",
"bytes": "1340"
},
{
"name": "Dockerfile",
"bytes": "4319"
},
{
"name": "GDB",
"bytes": "2764"
},
{
"name": "Go",
"bytes": "146670"
},
{
"name": "HCL",
"bytes": "468"
},
{
"name": "HTML",
"bytes": "115431"
},
{
"name": "Inno Setup",
"bytes": "14977"
},
{
"name": "Lex",
"bytes": "7273"
},
{
"name": "M4",
"bytes": "189150"
},
{
"name": "Makefile",
"bytes": "439631"
},
{
"name": "Objective-C",
"bytes": "133538"
},
{
"name": "PHP",
"bytes": "498"
},
{
"name": "Pawn",
"bytes": "151052"
},
{
"name": "Perl",
"bytes": "141532"
},
{
"name": "Python",
"bytes": "1868534"
},
{
"name": "Roff",
"bytes": "102712"
},
{
"name": "Ruby",
"bytes": "206821"
},
{
"name": "Shell",
"bytes": "625528"
},
{
"name": "Smarty",
"bytes": "5972"
},
{
"name": "Tcl",
"bytes": "110"
},
{
"name": "TeX",
"bytes": "1961"
},
{
"name": "Visual Basic",
"bytes": "294"
},
{
"name": "XSLT",
"bytes": "80335"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
} |
"""Loads all local server plugins."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
| {
"content_hash": "bd224fd349fea05b0d33454a171a93fd",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 30,
"alnum_prop": 0.74,
"repo_name": "dunkhong/grr",
"id": "490487f27a0bc08bac264d774170733aa3615c6d",
"size": "172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/local/registry_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
import argparse
def main():
parser = argparse.ArgumentParser(description='Creates a file and writes into it.')
parser.add_argument('file_name', help='File name to be written to.')
parser.add_argument('contents', help='A string to be written into the file.')
args, unknown_args = parser.parse_known_args()
with open(args.file_name, "w") as f:
f.write(args.contents)
if __name__=="__main__":
main()
| {
"content_hash": "5b7d67b217f176b3e144d521891fa8a1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 86,
"avg_line_length": 28.933333333333334,
"alnum_prop": 0.6589861751152074,
"repo_name": "BD2KGenomics/slugflow",
"id": "4fbdb045f49b9b03a0b83597af2517bb73ffb94a",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/toil/test/utils/ABCWorkflowDebug/mkFile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4074"
},
{
"name": "Python",
"bytes": "618803"
},
{
"name": "Shell",
"bytes": "19115"
}
],
"symlink_target": ""
} |
"""Utilities and helper functions."""
import threading
from oslo_config import cfg
from oslo_utils import timeutils
ROOTWRAP_CONF = "/etc/ceilometer/rootwrap.conf"
OPTS = [
cfg.StrOpt('rootwrap_config',
default=ROOTWRAP_CONF,
help='Path to the rootwrap configuration file to '
'use for running commands as root'),
]
def _get_root_helper():
global ROOTWRAP_CONF
return 'sudo ceilometer-rootwrap %s' % ROOTWRAP_CONF
def setup_root_helper(conf):
global ROOTWRAP_CONF
ROOTWRAP_CONF = conf.rootwrap_config
def spawn_thread(target, *args, **kwargs):
t = threading.Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def isotime(at=None):
"""Current time as ISO string,
:returns: Current time in ISO format
"""
if not at:
at = timeutils.utcnow()
date_string = at.strftime("%Y-%m-%dT%H:%M:%S")
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
date_string += ('Z' if tz == 'UTC' else tz)
return date_string
| {
"content_hash": "785ce59174caeecc4e2809651f91d0c6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 65,
"avg_line_length": 23.8,
"alnum_prop": 0.6395891690009337,
"repo_name": "openstack/ceilometer",
"id": "a86ebeee0b68b235fe77e34d143a4050e5035e33",
"size": "1842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1333367"
},
{
"name": "Shell",
"bytes": "18703"
}
],
"symlink_target": ""
} |
"""Install Pegasus."""
import setuptools
# Get the long description from the README file.
with open('README.md') as fp:
_LONG_DESCRIPTION = fp.read()
setuptools.setup(
name='pegasus',
version='0.0.1',
description='Pretraining with Extracted Gap Sentences for Abstractive Summarization with Sequence-to-sequence model',
long_description=_LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author='Google Inc.',
author_email='no-reply@google.com',
url='http://github.com/google-research/pegasus',
license='Apache 2.0',
packages=setuptools.find_packages(),
package_data={},
scripts=[],
install_requires=[
'absl-py',
'mock',
'numpy',
'rouge-score',
'sacrebleu',
'sentencepiece',
'tensorflow==1.15',
'tensorflow-text==1.15.0rc0',
'tfds-nightly',
'tensor2tensor==1.15.0',
],
extras_require={
'tensorflow': ['tensorflow==1.15'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='deeplearning machinelearning nlp summarization transformer pretraining',
)
| {
"content_hash": "454f4c135d344576b990a3f66c005aa8",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 121,
"avg_line_length": 30.57777777777778,
"alnum_prop": 0.6293604651162791,
"repo_name": "google-research/pegasus",
"id": "918d4b5c70679a372ff5b37c8706670c7b38b9bd",
"size": "1962",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "112333"
},
{
"name": "HTML",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "673550"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import stat
import sys
import tempfile
from django.core.management import execute_from_command_line
from django.test.simple import DjangoTestSuiteRunner
import nose
try:
import cProfile as profile
except ImportError:
import profile
try:
# Make sure to pre-load all the image handlers. If we do this later during
# unit tests, we don't seem to always get our list, causing tests to fail.
from PIL import Image
Image.init()
except ImportError:
try:
import Image
Image.init()
except ImportError:
pass
from django.conf import settings
from djblets.cache.serials import generate_media_serial
class RBTestRunner(DjangoTestSuiteRunner):
def setup_test_environment(self, *args, **kwargs):
super(RBTestRunner, self).setup_test_environment(*args, **kwargs)
# Default to testing in a non-subdir install.
settings.SITE_ROOT = "/"
settings.AJAX_SERIAL = 123
settings.TEMPLATE_SERIAL = 123
settings.STATIC_URL = settings.SITE_ROOT + 'static/'
settings.MEDIA_URL = settings.SITE_ROOT + 'media/'
settings.PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
)
settings.RUNNING_TEST = True
os.environ[b'RB_RUNNING_TESTS'] = b'1'
self._setup_media_dirs()
def teardown_test_environment(self, *args, **kwargs):
self._destroy_media_dirs()
super(RBTestRunner, self).teardown_test_environment(*args, **kwargs)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
self.setup_test_environment()
old_config = self.setup_databases()
self.nose_argv = [
sys.argv[0],
'-v',
'--match=^test',
'--with-doctest',
'--doctest-extension=.txt',
]
if '--with-coverage' in sys.argv:
self.nose_argv += ['--with-coverage',
'--cover-package=reviewboard']
sys.argv.remove('--with-coverage')
for package in settings.TEST_PACKAGES:
self.nose_argv.append('--where=%s' % package)
if '--with-profiling' in sys.argv:
sys.argv.remove('--with-profiling')
profiling = True
else:
profiling = False
# If the test files are executable on the file system, nose will need
# the --exe argument to run them
known_file = os.path.join(os.path.dirname(__file__), 'settings.py')
if (os.path.exists(known_file) and
os.stat(known_file).st_mode & stat.S_IXUSR):
self.nose_argv.append('--exe')
# manage.py captures everything before "--"
if len(sys.argv) > 2 and '--' in sys.argv:
self.nose_argv += sys.argv[(sys.argv.index("--") + 1):]
if profiling:
profile.runctx('run_nose()',
{'run_nose': self.run_nose},
{},
os.path.join(os.getcwd(), 'tests.profile'))
else:
self.run_nose()
self.teardown_databases(old_config)
self.teardown_test_environment()
if self.result.success:
return 0
else:
return 1
def run_nose(self):
self.result = nose.main(argv=self.nose_argv, exit=False)
def _setup_media_dirs(self):
self.tempdir = tempfile.mkdtemp(prefix='rb-tests-')
# Don't go through Pipeline for everything, since we're not
# triggering pipelining of our media.
settings.STATICFILES_STORAGE = \
'django.contrib.staticfiles.storage.StaticFilesStorage'
if os.path.exists(self.tempdir):
self._destroy_media_dirs()
settings.STATIC_ROOT = os.path.join(self.tempdir, 'static')
settings.MEDIA_ROOT = os.path.join(self.tempdir, 'media')
settings.SITE_DATA_DIR = os.path.join(self.tempdir, 'data')
images_dir = os.path.join(settings.MEDIA_ROOT, "uploaded", "images")
legacy_extensions_media = os.path.join(settings.MEDIA_ROOT, 'ext')
extensions_media = os.path.join(settings.STATIC_ROOT, 'ext')
for dirname in (images_dir, legacy_extensions_media, extensions_media):
if not os.path.exists(dirname):
os.makedirs(dirname)
# Collect all static media needed for tests, including web-based tests.
execute_from_command_line([
__file__, 'collectstatic', '--noinput', '-v', '0',
])
generate_media_serial()
def _destroy_media_dirs(self):
for root, dirs, files in os.walk(self.tempdir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
path = os.path.join(root, name)
if os.path.islink(path):
os.remove(path)
else:
os.rmdir(path)
os.rmdir(self.tempdir)
| {
"content_hash": "ba0853ef914a1bd640edf9013bde8df0",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 32.73376623376623,
"alnum_prop": 0.5863915889704424,
"repo_name": "bkochendorfer/reviewboard",
"id": "b684ed5f5227ecb33d070ee9eaea80f3d6d7bbfb",
"size": "6177",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "reviewboard/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212721"
},
{
"name": "HTML",
"bytes": "179427"
},
{
"name": "JavaScript",
"bytes": "1463002"
},
{
"name": "Python",
"bytes": "3686542"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
"""Various helpers to handle config entry and api schema migrations."""
import logging
from aiohue import HueBridgeV2
from aiohue.discovery import is_v2_bridge
from aiohue.v2.models.device import DeviceArchetypes
from aiohue.v2.models.resource import ResourceTypes
from homeassistant import core
from homeassistant.components.binary_sensor import DEVICE_CLASS_MOTION
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_USERNAME,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.device_registry import (
async_entries_for_config_entry as devices_for_config_entries,
async_get as async_get_device_registry,
)
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry as entities_for_config_entry,
async_entries_for_device,
async_get as async_get_entity_registry,
)
from .const import CONF_API_VERSION, DOMAIN
LOGGER = logging.getLogger(__name__)
async def check_migration(hass: core.HomeAssistant, entry: ConfigEntry) -> None:
"""Check if config entry needs any migration actions."""
host = entry.data[CONF_HOST]
# migrate CONF_USERNAME --> CONF_API_KEY
if CONF_USERNAME in entry.data:
LOGGER.info("Migrate %s to %s in schema", CONF_USERNAME, CONF_API_KEY)
data = dict(entry.data)
data[CONF_API_KEY] = data.pop(CONF_USERNAME)
hass.config_entries.async_update_entry(entry, data=data)
conf_api_version = entry.data.get(CONF_API_VERSION, 1)
if conf_api_version == 1:
# a bridge might have upgraded firmware since last run so
# we discover its capabilities at every startup
websession = aiohttp_client.async_get_clientsession(hass)
if await is_v2_bridge(host, websession):
supported_api_version = 2
else:
supported_api_version = 1
LOGGER.debug(
"Configured api version is %s and supported api version %s for bridge %s",
conf_api_version,
supported_api_version,
host,
)
# the call to `is_v2_bridge` returns (silently) False even on connection error
# so if a migration is needed it will be done on next startup
if conf_api_version == 1 and supported_api_version == 2:
# run entity/device schema migration for v2
await handle_v2_migration(hass, entry)
# store api version in entry data
if (
CONF_API_VERSION not in entry.data
or conf_api_version != supported_api_version
):
data = dict(entry.data)
data[CONF_API_VERSION] = supported_api_version
hass.config_entries.async_update_entry(entry, data=data)
async def handle_v2_migration(hass: core.HomeAssistant, entry: ConfigEntry) -> None:
"""Perform migration of devices and entities to V2 Id's."""
host = entry.data[CONF_HOST]
api_key = entry.data[CONF_API_KEY]
websession = aiohttp_client.async_get_clientsession(hass)
dev_reg = async_get_device_registry(hass)
ent_reg = async_get_entity_registry(hass)
LOGGER.info("Start of migration of devices and entities to support API schema 2")
# Create mapping of mac address to HA device id's.
# Identifier in dev reg should be mac-address,
# but in some cases it has a postfix like `-0b` or `-01`.
dev_ids = {}
for hass_dev in devices_for_config_entries(dev_reg, entry.entry_id):
for domain, mac in hass_dev.identifiers:
if domain != DOMAIN:
continue
normalized_mac = mac.split("-")[0]
dev_ids[normalized_mac] = hass_dev.id
# initialize bridge connection just for the migration
async with HueBridgeV2(host, api_key, websession) as api:
sensor_class_mapping = {
DEVICE_CLASS_BATTERY: ResourceTypes.DEVICE_POWER,
DEVICE_CLASS_MOTION: ResourceTypes.MOTION,
DEVICE_CLASS_ILLUMINANCE: ResourceTypes.LIGHT_LEVEL,
DEVICE_CLASS_TEMPERATURE: ResourceTypes.TEMPERATURE,
}
# migrate entities attached to a device
for hue_dev in api.devices:
zigbee = api.devices.get_zigbee_connectivity(hue_dev.id)
if not zigbee or not zigbee.mac_address:
# not a zigbee device or invalid mac
continue
# get existing device by V1 identifier (mac address)
if hue_dev.product_data.product_archetype == DeviceArchetypes.BRIDGE_V2:
hass_dev_id = dev_ids.get(api.config.bridge_id.upper())
else:
hass_dev_id = dev_ids.get(zigbee.mac_address)
if hass_dev_id is None:
# can be safely ignored, this device does not exist in current config
LOGGER.debug(
"Ignoring device %s (%s) as it does not (yet) exist in the device registry",
hue_dev.metadata.name,
hue_dev.id,
)
continue
dev_reg.async_update_device(
hass_dev_id, new_identifiers={(DOMAIN, hue_dev.id)}
)
LOGGER.info("Migrated device %s (%s)", hue_dev.metadata.name, hass_dev_id)
# loop through all entities for device and find match
for ent in async_entries_for_device(ent_reg, hass_dev_id, True):
if ent.entity_id.startswith("light"):
# migrate light
# should always return one lightid here
new_unique_id = next(iter(hue_dev.lights), None)
else:
# migrate sensors
matched_dev_class = sensor_class_mapping.get(
ent.original_device_class or "unknown"
)
new_unique_id = next(
(
sensor.id
for sensor in api.devices.get_sensors(hue_dev.id)
if sensor.type == matched_dev_class
),
None,
)
if new_unique_id is None:
# this may happen if we're looking at orphaned or unsupported entity
LOGGER.warning(
"Skip migration of %s because it no longer exists on the bridge",
ent.entity_id,
)
continue
try:
ent_reg.async_update_entity(
ent.entity_id, new_unique_id=new_unique_id
)
except ValueError:
# assume edge case where the entity was already migrated in a previous run
# which got aborted somehow and we do not want
# to crash the entire integration init
LOGGER.warning(
"Skip migration of %s because it already exists",
ent.entity_id,
)
else:
LOGGER.info(
"Migrated entity %s from unique id %s to %s",
ent.entity_id,
ent.unique_id,
new_unique_id,
)
# migrate entities that are not connected to a device (groups)
for ent in entities_for_config_entry(ent_reg, entry.entry_id):
if ent.device_id is not None:
continue
if "-" in ent.unique_id:
# handle case where unique id is v2-id of group/zone
hue_group = api.groups.get(ent.unique_id)
else:
# handle case where the unique id is just the v1 id
v1_id = f"/groups/{ent.unique_id}"
hue_group = api.groups.room.get_by_v1_id(
v1_id
) or api.groups.zone.get_by_v1_id(v1_id)
if hue_group is None or hue_group.grouped_light is None:
# this may happen if we're looking at some orphaned entity
LOGGER.warning(
"Skip migration of %s because it no longer exist on the bridge",
ent.entity_id,
)
continue
new_unique_id = hue_group.grouped_light
LOGGER.info(
"Migrating %s from unique id %s to %s ",
ent.entity_id,
ent.unique_id,
new_unique_id,
)
try:
ent_reg.async_update_entity(ent.entity_id, new_unique_id=new_unique_id)
except ValueError:
# assume edge case where the entity was already migrated in a previous run
# which got aborted somehow and we do not want
# to crash the entire integration init
LOGGER.warning(
"Skip migration of %s because it already exists",
ent.entity_id,
)
LOGGER.info("Migration of devices and entities to support API schema 2 finished")
| {
"content_hash": "a87f8c73613140ae5f6b6c96d30d86c2",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 96,
"avg_line_length": 41.806306306306304,
"alnum_prop": 0.5691197069281327,
"repo_name": "jawilson/home-assistant",
"id": "9891cc65b0cc308c0162dc1b066ec4d94eb7a56d",
"size": "9281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/hue/migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""
Generic GeoRSS events service.
Retrieves current events (typically incidents or alerts) in GeoRSS format, and
shows information on events filtered by distance to the HA instance's location
and grouped by category.
"""
from datetime import timedelta
import logging
from georss_client import UPDATE_OK, UPDATE_OK_NO_DATA
from georss_client.generic_feed import GenericFeed
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
CONF_UNIT_OF_MEASUREMENT,
CONF_URL,
LENGTH_KILOMETERS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = "category"
ATTR_DISTANCE = "distance"
ATTR_TITLE = "title"
CONF_CATEGORIES = "categories"
DEFAULT_ICON = "mdi:alert"
DEFAULT_NAME = "Event Service"
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = "Events"
DOMAIN = "geo_rss_events"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CATEGORIES, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(
CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT_OF_MEASUREMENT
): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GeoRSS component."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
url = config.get(CONF_URL)
radius_in_km = config.get(CONF_RADIUS)
name = config.get(CONF_NAME)
categories = config.get(CONF_CATEGORIES)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
_LOGGER.debug(
"latitude=%s, longitude=%s, url=%s, radius=%s",
latitude,
longitude,
url,
radius_in_km,
)
# Create all sensors based on categories.
devices = []
if not categories:
device = GeoRssServiceSensor(
(latitude, longitude), url, radius_in_km, None, name, unit_of_measurement
)
devices.append(device)
else:
for category in categories:
device = GeoRssServiceSensor(
(latitude, longitude),
url,
radius_in_km,
category,
name,
unit_of_measurement,
)
devices.append(device)
add_entities(devices, True)
class GeoRssServiceSensor(Entity):
"""Representation of a Sensor."""
def __init__(
self, coordinates, url, radius, category, service_name, unit_of_measurement
):
"""Initialize the sensor."""
self._category = category
self._service_name = service_name
self._state = None
self._state_attributes = None
self._unit_of_measurement = unit_of_measurement
self._feed = GenericFeed(
coordinates,
url,
filter_radius=radius,
filter_categories=None if not category else [category],
)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._service_name} {'Any' if self._category is None else self._category}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the default icon to use in the frontend."""
return DEFAULT_ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
def update(self):
"""Update this sensor from the GeoRSS service."""
status, feed_entries = self._feed.update()
if status == UPDATE_OK:
_LOGGER.debug(
"Adding events to sensor %s: %s", self.entity_id, feed_entries
)
self._state = len(feed_entries)
# And now compute the attributes from the filtered events.
matrix = {}
for entry in feed_entries:
matrix[entry.title] = f"{entry.distance_to_home:.0f}{LENGTH_KILOMETERS}"
self._state_attributes = matrix
elif status == UPDATE_OK_NO_DATA:
_LOGGER.debug("Update successful, but no data received from %s", self._feed)
# Don't change the state or state attributes.
else:
_LOGGER.warning(
"Update not successful, no data received from %s", self._feed
)
# If no events were found due to an error then just set state to
# zero.
self._state = 0
self._state_attributes = {}
| {
"content_hash": "31e53b5200c40d4e552a4153ecd750a3",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 92,
"avg_line_length": 31.011904761904763,
"alnum_prop": 0.6214971209213052,
"repo_name": "pschmitt/home-assistant",
"id": "5a11136fd43131165e3be702ec9b4391e1ff7211",
"size": "5210",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/geo_rss_events/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
import os
import vision_batch_annotate_files
RESOURCES = os.path.join(os.path.dirname(__file__), "resources")
def test_sample_batch_annotate_files(capsys):
file_path = os.path.join(RESOURCES, "kafka.pdf")
vision_batch_annotate_files.sample_batch_annotate_files(file_path=file_path)
out, _ = capsys.readouterr()
assert "Full text" in out
assert "Block confidence" in out
| {
"content_hash": "cdaaa0b8cf6565aab3852be06021526e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 24.8125,
"alnum_prop": 0.7128463476070529,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "f8dbe7329165b2c84ae103adecd9140ce041a1e1",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "vision/snippets/detect/vision_batch_annotate_files_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
import argparse
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
def read_json(fname):
df = pd.read_json(fname, lines=True, convert_dates=['time'], date_unit='ms')
df['conference_id'] = df['conf_name'] + df['conf_creation_time_ms'].astype(str)
return df
def show(args):
df = read_json(args.infile)
print('series: {}'.format(df['series'].unique()))
print('conferences: {}'.format(df['conf_name'].unique()))
print('endpoints: {}'.format(df['endpoint_id'].unique()))
def vp8_inspect(df):
if args.ssrc:
df = df[df['rtp.ssrc'] == args.ssrc]
if args.endpoint:
df = df[df['endpoint_id'] == args.endpoint]
if args.conference:
df = df[df['conf_name'] == args.conference]
df['time_delta'] = df['time'] - df['time'].shift(1)
df['rtp.seq_delta'] = df['rtp.seq'] - df['rtp.seq'].shift(1)
df['vp8.pictureid_delta'] = df['vp8.pictureid'] - df['vp8.pictureid'].shift(1)
df['vp8.timestamp_delta'] = df['rtp.timestamp'] - df['rtp.timestamp'].shift(1)
print(df)
def vp8_verify(df):
# tl0picidx monotonically increases
# if the timestamp changes, then the pictureid changes
# if the pictureid changes, then the timestamp changes
# the timestamp delta needs to be proportional to the pictureid delta
# there can't be big gaps in the sequence numbers
# the webrtc pacer outputs packets every 10ms
pass
def plot_endpoint(df, series, endpoint_id, remote_endpoint_id):
df = df[df['endpoint_id'] == endpoint_id]
if remote_endpoint_id:
df = df[df['remote_endpoint_id'] == remote_endpoint_id]
fig = plt.figure()
fig.suptitle('Endpoint: {}'.format(endpoint_id))
ax_bitrate = fig.subplots(1, sharex=True)
ax_bitrate.set_xlabel('time')
ax_bitrate.set_ylabel('bitrate (bps)')
if 'calculated_rate' in series:
df_rates = df['series' == 'calculated_rate']
# make sure we're plotting a single remote endpoint.
remote_endpoints = df_rates['remote_endpoint_id'].unique()
if len(remote_endpoints) > 1:
raise Exception('specify a --remote-endpoint {}'.format(remote_endpoints))
for i in range(9):
encoding_quality = str(i)
if encoding_quality in df_rates.columns:
ax_bitrate.plot(df_rates['time'],
df_rates[encoding_quality],
label=encoding_quality)
df_series = df['series'].unique()
if 'sent_padding' in series and 'sent_padding' in df_series:
df_padding = df[df['series'] == 'sent_padding']
ax_bitrate.plot(
df_padding['time'],
df_padding['padding_bps'] + df_padding['total_target_bps'],
label='target + padding',
marker='^',
markersize=4,
linestyle='None')
if 'new_bwe' in series and 'new_bwe' in df_series:
df_bwe = df[df['series'] == 'new_bwe']
ax_bitrate.plot(df_bwe['time'],
df_bwe['bitrate_bps'],
label='estimate',
drawstyle='steps-post')
if 'did_update' in series and 'did_update' in df_series:
df_update = df[df['series'] == 'did_update']
ax_bitrate.plot(df_update['time'],
df_update['total_target_bps'],
label='target',
drawstyle='steps-post')
ax_bitrate.plot(df_update['time'],
df_update['total_ideal_bps'],
label='ideal',
drawstyle='steps-post')
if 'in_pkt' in series and 'in_pkt' in df_series:
df_pkt = df[df['series'] == 'in_pkt']
if len(df_pkt['rbe_id'].unique()) is not 1:
raise Exception('There cannot be multiple remote bitrate estimators')
df_sz = pd.DataFrame(
{'bits': df_pkt['pkt_sz_bytes'] * 8, 'time': df_pkt['time']})
df_rate = df_sz.resample('1S', on='time').sum()
ax_bitrate.plot(df_rate.index, df_rate['bits'], label='sent')
# todo include rtt and packet loss
ax_bitrate.legend()
def plot(args):
df = read_json(args.infile)
endpoint_ids = [args.endpoint_id] if args.endpoint_id else df['endpoint_id'].unique()
for endpoint_id in endpoint_ids:
plot_endpoint(df, args.series, endpoint_id, args.remote_endpoint_id)
plt.show()
def check_endpoint(df, endpoint_id):
# if a property is violated (for example property 3):
# zgrep 7d2b7ecf data/4/series.json.gz| grep did_update | jq '. | select(.bwe_bps >= .total_ideal_bps) | select(.total_target_idx < .total_ideal_idx)' | less -S
df_update = df[df['series'] == 'did_update']
if not len(df_update):
return
# property 1: check if ideal <= estimate => target == ideal
mask_1 = df_update['bwe_bps'] >= df_update['total_ideal_bps']
mask_2 = df_update['total_target_idx'] < df_update['total_ideal_idx']
mask_3 = df_update['total_ideal_bps'] != 0
mask_4 = df_update['total_target_bps'] != 0
assert not len(df_update[mask_1 & mask_2 & mask_3 & mask_4]), '{} is not sending as much as it should.'.format(endpoint_id)
# property 2: check that the target never exceeds the ideal
mask_5 = (df_update['total_ideal_bps'] - df_update['total_target_bps']) < 0
assert not len(df_update[mask_5]), '{} has a target bitrate that exceeds the ideal bitrate.'.format(endpoint_id)
# property 3: make sure that we are calculating bitrate for encodings
# disabled for now as it's a legitimate case, being video muted, for example
#mask_6 = df_update['total_ideal_bps'] > 0
#assert len(df_update[mask_6]), '{} never computed an ideal bitrate.'.format(endpoint_id)
#mask_7 = df_update['total_target_bps'] > 0
#assert len(df_update[mask_7]), '{} never computed a target bitrate.'.format(endpoint_id)
# property 4: check that the target never exceeds the estimate
# disabled for now as can happen and still be correct because we never suspend the on-stage participant
# mask_8 = (df_update['bwe_bps'] - df_update['total_target_bps']) < 0
# assert not len(df_update[mask_8]), '{} has a target bitrate that exceeds the bandwidth estimation.'.format(endpoint_id)
def check_conference(df, conference_id):
endpoint_ids = [args.endpoint_id] if args.endpoint_id else df['endpoint_id'].unique()
for endpoint_id in endpoint_ids:
check_endpoint(df[df['endpoint_id'] == endpoint_id], endpoint_id)
def check(args):
df = read_json(args.infile)
conference_ids = [args.conference_id] if args.conference_id else df['conference_id'].unique()
for conference_id in conference_ids:
check_conference(df[df['conference_id'] == conference_id], conference_id)
if "__main__" == __name__:
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType('r'))
subparsers = parser.add_subparsers()
parser_show = subparsers.add_parser('show')
parser_show.set_defaults(func=show)
parser_plot = subparsers.add_parser('plot')
parser_plot.add_argument(
'--series', nargs='+', choices=[
'did_update', 'new_bwe', 'sent_padding', 'calculated_rate'],
default='did_update new_bwe sent_padding in_pkt')
parser_plot.add_argument('--endpoint-id')
parser_plot.add_argument('--remote-endpoint-id')
parser_plot.set_defaults(func=plot)
parser_check = subparsers.add_parser('check')
parser_check.add_argument('--endpoint-id')
parser_check.add_argument('--conference-id')
parser_check.set_defaults(func=check)
args = parser.parse_args()
args.func(args)
| {
"content_hash": "91ab8097029d69b6049bdcd3c8043f0c",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 168,
"avg_line_length": 38.701492537313435,
"alnum_prop": 0.6171744440159403,
"repo_name": "jitsi/jitsi-videobridge",
"id": "60f602b9bb6b6baa712f7a75e98c89801342abf1",
"size": "7802",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resources/timeseries-cli.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "377"
},
{
"name": "HTML",
"bytes": "445"
},
{
"name": "Java",
"bytes": "684145"
},
{
"name": "JavaScript",
"bytes": "1488"
},
{
"name": "Kotlin",
"bytes": "2028878"
},
{
"name": "Perl",
"bytes": "6696"
},
{
"name": "Python",
"bytes": "7802"
},
{
"name": "Shell",
"bytes": "10514"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
name='NestedModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
],
),
]
| {
"content_hash": "442f739891a5583b5687bd8e524f0389",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 114,
"avg_line_length": 26.3125,
"alnum_prop": 0.5748218527315915,
"repo_name": "jimlyndon/django-activity-stream",
"id": "45faf01215cd0304a09c9b6af6745edb3f80ffee",
"size": "445",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "actstream/runtests/testapp_nested/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3119"
},
{
"name": "Makefile",
"bytes": "464"
},
{
"name": "Python",
"bytes": "158681"
}
],
"symlink_target": ""
} |
"""Test result related classes."""
from collections import OrderedDict
import shard_util
import time
from result_sink_util import ResultSinkClient
_VALID_RESULT_COLLECTION_INIT_KWARGS = set(['test_results', 'crashed'])
_VALID_TEST_RESULT_INIT_KWARGS = set(
['attachments', 'duration', 'expected_status', 'test_log'])
_VALID_TEST_STATUSES = set(['PASS', 'FAIL', 'CRASH', 'ABORT', 'SKIP'])
class TestStatus:
"""Enum storing possible test status(outcome).
Confirms to ResultDB TestStatus definitions:
https://source.chromium.org/chromium/infra/infra/+/main:go/src/go.chromium.org/luci/resultdb/proto/v1/test_result.proto
"""
PASS = 'PASS'
FAIL = 'FAIL'
CRASH = 'CRASH'
ABORT = 'ABORT'
SKIP = 'SKIP'
def _validate_kwargs(kwargs, valid_args_set):
"""Validates if keywords in kwargs are accepted."""
diff = set(kwargs.keys()) - valid_args_set
assert len(diff) == 0, 'Invalid keyword argument(s) in %s passed in!' % diff
def _validate_test_status(status):
"""Raises if input isn't valid."""
if not status in _VALID_TEST_STATUSES:
raise TypeError('Invalid test status: %s. Should be one of %s.' %
(status, _VALID_TEST_STATUSES))
def _to_standard_json_literal(status):
"""Converts TestStatus literal to standard JSON format requirement.
Standard JSON format defined at:
https://source.chromium.org/chromium/infra/infra/+/main:go/src/go.chromium.org/luci/resultdb/proto/v1/test_result.proto
ABORT is reported as "TIMEOUT" in standard JSON. The rest are the same.
"""
_validate_test_status(status)
return 'TIMEOUT' if status == TestStatus.ABORT else status
class TestResult(object):
"""Stores test outcome information of a single test run."""
def __init__(self, name, status, **kwargs):
"""Initializes an object.
Args:
name: (str) Name of a test. Typically includes
status: (str) Outcome of the test.
(Following are possible arguments in **kwargs):
attachments: (dict): Dict of unique attachment name to abs path mapping.
duration: (int) Test duration in milliseconds or None if unknown.
expected_status: (str) Expected test outcome for the run.
test_log: (str) Logs of the test.
"""
_validate_kwargs(kwargs, _VALID_TEST_RESULT_INIT_KWARGS)
assert isinstance(name, str), (
'Test name should be an instance of str. We got: %s') % type(name)
self.name = name
_validate_test_status(status)
self.status = status
self.attachments = kwargs.get('attachments', {})
self.duration = kwargs.get('duration')
self.expected_status = kwargs.get('expected_status', TestStatus.PASS)
self.test_log = kwargs.get('test_log', '')
# Use the var to avoid duplicate reporting.
self._reported_to_result_sink = False
def _compose_result_sink_tags(self):
"""Composes tags received by Result Sink from test result info."""
tags = [('test_name', self.name)]
# Only SKIP results have tags other than test name, to distinguish whether
# the SKIP is expected (disabled test) or not.
if self.status == TestStatus.SKIP:
if self.disabled():
tags.append(('disabled_test', 'true'))
else:
tags.append(('disabled_test', 'false'))
return tags
def disabled(self):
"""Returns whether the result represents a disabled test."""
return self.expected() and self.status == TestStatus.SKIP
def expected(self):
"""Returns whether the result is expected."""
return self.expected_status == self.status
def report_to_result_sink(self, result_sink_client):
"""Reports the single result to result sink if never reported.
Args:
result_sink_client: (result_sink_util.ResultSinkClient) Result sink client
to report test result.
"""
if not self._reported_to_result_sink:
result_sink_client.post(
self.name,
self.status,
self.expected(),
duration=self.duration,
test_log=self.test_log,
tags=self._compose_result_sink_tags(),
file_artifacts=self.attachments)
self._reported_to_result_sink = True
class ResultCollection(object):
"""Stores a collection of TestResult for one or more test app launches."""
def __init__(self, **kwargs):
"""Initializes the object.
Args:
(Following are possible arguments in **kwargs):
crashed: (bool) Whether the ResultCollection is of a crashed test launch.
test_results: (list) A list of test_results to initialize the collection.
"""
_validate_kwargs(kwargs, _VALID_RESULT_COLLECTION_INIT_KWARGS)
self._test_results = []
self._crashed = kwargs.get('crashed', False)
self._crash_message = ''
self.add_results(kwargs.get('test_results', []))
@property
def crashed(self):
"""Whether the invocation(s) of the collection is regarded as crashed.
Crash indicates there might be tests unexpectedly not run that's not
included in |_test_results| in the collection.
"""
return self._crashed
@crashed.setter
def crashed(self, value):
"""Sets crash value."""
assert (type(value) == bool)
self._crashed = value
@property
def crash_message(self):
"""Logs from crashes in collection which are unrelated to single tests."""
return self._crash_message
@crash_message.setter
def crash_message(self, value):
"""Sets crash_message value."""
self._crash_message = value
@property
def test_results(self):
return self._test_results
def add_test_result(self, test_result):
"""Adds a single test result to collection.
Any new test addition should go through this method for all needed setups.
"""
self._test_results.append(test_result)
def add_result_collection(self,
another_collection,
ignore_crash=False,
overwrite_crash=False):
"""Adds results and status from another ResultCollection.
Args:
another_collection: (ResultCollection) The other collection to be added.
ignore_crash: (bool) Ignore any crashes from newly added collection.
overwrite_crash: (bool) Overwrite crash status of |self| and crash
message. Only applicable when ignore_crash=False.
"""
assert (not (ignore_crash and overwrite_crash))
if not ignore_crash:
if overwrite_crash:
self._crashed = False
self._crash_message = ''
self._crashed = self.crashed or another_collection.crashed
self.append_crash_message(another_collection.crash_message)
for test_result in another_collection.test_results:
self.add_test_result(test_result)
def add_results(self, test_results):
"""Adds a list of |TestResult|."""
for test_result in test_results:
self.add_test_result(test_result)
def add_name_prefix_to_tests(self, prefix):
"""Adds a prefix to all test names of results."""
for test_result in self._test_results:
test_result.name = '%s%s' % (prefix, test_result.name)
def add_test_names_status(self, test_names, test_status, **kwargs):
"""Adds a list of test names with given test status.
Args:
test_names: (list) A list of names of tests to add.
test_status: (str) The test outcome of the tests to add.
**kwargs: See possible **kwargs in TestResult.__init__ docstring.
"""
for test_name in test_names:
self.add_test_result(TestResult(test_name, test_status, **kwargs))
def add_and_report_test_names_status(self, test_names, test_status, **kwargs):
"""Adds a list of test names with status and report these to ResultSink.
Args:
test_names: (list) A list of names of tests to add.
test_status: (str) The test outcome of the tests to add.
**kwargs: See possible **kwargs in TestResult.__init__ docstring.
"""
another_collection = ResultCollection()
another_collection.add_test_names_status(test_names, test_status, **kwargs)
another_collection.report_to_result_sink()
self.add_result_collection(another_collection)
def append_crash_message(self, message):
"""Appends crash message str to current."""
if not message:
return
if self._crash_message:
self._crash_message += '\n'
self._crash_message += message
def all_test_names(self):
"""Returns a set of all test names in collection."""
return self.tests_by_expression(lambda result: True)
def tests_by_expression(self, expression):
"""A set of test names by filtering test results with given |expression|.
Args:
expression: (TestResult -> bool) A function or lambda expression which
accepts a TestResult object and returns bool.
"""
return set(
map(lambda result: result.name, filter(expression, self._test_results)))
def crashed_tests(self):
"""A set of test names with any crashed status in the collection."""
return self.tests_by_expression(lambda result: result.status == TestStatus.
CRASH)
def disabled_tests(self):
"""A set of disabled test names in the collection."""
return self.tests_by_expression(lambda result: result.disabled())
def expected_tests(self):
"""A set of test names with any expected status in the collection."""
return self.tests_by_expression(lambda result: result.expected())
def unexpected_tests(self):
"""A set of test names with any unexpected status in the collection."""
return self.tests_by_expression(lambda result: not result.expected())
def passed_tests(self):
"""A set of test names with any passed status in the collection."""
return self.tests_by_expression(lambda result: result.status == TestStatus.
PASS)
def failed_tests(self):
"""A set of test names with any failed status in the collection."""
return self.tests_by_expression(lambda result: result.status == TestStatus.
FAIL)
def flaky_tests(self):
"""A set of flaky test names in the collection."""
return self.expected_tests().intersection(self.unexpected_tests())
def never_expected_tests(self):
"""A set of test names with only unexpected status in the collection."""
return self.unexpected_tests().difference(self.expected_tests())
def pure_expected_tests(self):
"""A set of test names with only expected status in the collection."""
return self.expected_tests().difference(self.unexpected_tests())
def set_crashed_with_prefix(self, crash_message_prefix_line=''):
"""Updates collection with the crash status and add prefix to crash message.
Typically called at the end of runner run when runner reports failure due to
crash but there isn't unexpected tests. The crash status and crash message
will reflect in LUCI build page step log.
"""
self._crashed = True
if crash_message_prefix_line:
crash_message_prefix_line += '\n'
self._crash_message = crash_message_prefix_line + self.crash_message
def report_to_result_sink(self):
"""Reports current results to result sink once.
Note that each |TestResult| object stores whether it's been reported and
will only report itself once.
"""
result_sink_client = ResultSinkClient()
for test_result in self._test_results:
test_result.report_to_result_sink(result_sink_client)
result_sink_client.close()
def standard_json_output(self, path_delimiter='.'):
"""Returns a dict object confirming to Chromium standard format.
Format defined at:
https://chromium.googlesource.com/chromium/src/+/main/docs/testing/json_test_results_format.md
"""
num_failures_by_type = {}
tests = OrderedDict()
seen_names = set()
shard_index = shard_util.shard_index()
for test_result in self._test_results:
test_name = test_result.name
# For "num_failures_by_type" field. The field contains result count map of
# the first result of each test.
if test_name not in seen_names:
seen_names.add(test_name)
result_type = _to_standard_json_literal(test_result.status)
num_failures_by_type[result_type] = num_failures_by_type.get(
result_type, 0) + 1
# For "tests" field.
if test_name not in tests:
tests[test_name] = {
'expected': _to_standard_json_literal(test_result.expected_status),
'actual': _to_standard_json_literal(test_result.status),
'shard': shard_index,
'is_unexpected': not test_result.expected()
}
else:
tests[test_name]['actual'] += (
' ' + _to_standard_json_literal(test_result.status))
# This means there are both expected & unexpected results for the test.
# Thus, the overall status would be expected (is_unexpected = False)
# and the test is regarded flaky.
if tests[test_name]['is_unexpected'] != (not test_result.expected()):
tests[test_name]['is_unexpected'] = False
tests[test_name]['is_flaky'] = True
return {
'version': 3,
'path_delimiter': path_delimiter,
'seconds_since_epoch': int(time.time()),
'interrupted': self.crashed,
'num_failures_by_type': num_failures_by_type,
'tests': tests
}
def test_runner_logs(self):
"""Returns a dict object with test results as part of test runner logs."""
# Test name to merged test log in all unexpected results. Logs are
# only preserved for unexpected results.
unexpected_logs = {}
name_count = {}
for test_result in self._test_results:
if not test_result.expected():
test_name = test_result.name
name_count[test_name] = name_count.get(test_name, 0) + 1
logs = unexpected_logs.get(test_name, [])
logs.append('Failure log of attempt %d:' % name_count[test_name])
logs.extend(test_result.test_log.split('\n'))
unexpected_logs[test_name] = logs
passed = list(self.passed_tests() & self.pure_expected_tests())
disabled = list(self.disabled_tests())
flaked = {
test_name: unexpected_logs[test_name]
for test_name in self.flaky_tests()
}
# "failed" in test runner logs are all unexpected failures (including
# crash, etc).
failed = {
test_name: unexpected_logs[test_name]
for test_name in self.never_expected_tests()
}
logs = OrderedDict()
logs['passed tests'] = passed
if disabled:
logs['disabled tests'] = disabled
if flaked:
logs['flaked tests'] = flaked
if failed:
logs['failed tests'] = failed
for test, log_lines in failed.items():
logs[test] = log_lines
for test, log_lines in flaked.items():
logs[test] = log_lines
if self.crashed:
logs['test suite crash'] = self.crash_message.split('\n')
return logs
| {
"content_hash": "c916249e458453676fd169cc3ddae2b1",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 125,
"avg_line_length": 36.50735294117647,
"alnum_prop": 0.6598858677408527,
"repo_name": "scheib/chromium",
"id": "08c0a5ce817f3fb6c87ed1a874e74711bd60d9b6",
"size": "15057",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ios/build/bots/scripts/test_result_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Wrappers to make getting and using hosts simpler."""
from aquilon.exceptions_ import NotFoundException, ArgumentError
from aquilon.aqdb.model import Machine
from aquilon.aqdb.model.dns_domain import parse_fqdn
def hostname_to_host(session, hostname):
# When the user asked for a host, returning "machine not found" does not
# feel to be the right error message, even if it is technically correct.
# It's a little tricky though: we don't want to suppress "dns domain not
# found"
parse_fqdn(session, hostname)
try:
dbmachine = Machine.get_unique(session, hostname, compel=True)
except NotFoundException:
raise NotFoundException("Host %s not found." % hostname)
if not dbmachine.host:
raise NotFoundException("{0} does not have a host "
"assigned.".format(dbmachine))
return dbmachine.host
def hostlist_to_hosts(session, hostlist):
dbhosts = []
failed = []
for host in hostlist:
try:
dbhosts.append(hostname_to_host(session, host))
except NotFoundException, nfe:
failed.append("%s: %s" % (host, nfe))
except ArgumentError, ae:
failed.append("%s: %s" % (host, ae))
if failed:
raise ArgumentError("Invalid hosts in list:\n%s" %
"\n".join(failed))
if not dbhosts:
raise ArgumentError("Empty list.")
return dbhosts
def get_host_bound_service(dbhost, dbservice):
for si in dbhost.services_used:
if si.service == dbservice:
return si
return None
def get_host_dependencies(session, dbhost):
""" returns a list of strings describing how a host is being used.
If the host has no dependencies, then an empty list is returned
"""
ret = []
for si in dbhost.services_provided:
ret.append("%s is bound as a server for service %s instance %s" %
(dbhost.fqdn, si.service.name, si.name))
return ret
def check_hostlist_size(command, config, hostlist):
if not hostlist:
return
default_max_size = config.getint("broker", "default_max_list_size")
max_size_opt = "%s_max_list_size" % command
if config.has_option("broker", max_size_opt):
if config.get("broker", max_size_opt) != '':
hostlist_max_size = config.getint("broker", max_size_opt)
else:
hostlist_max_size = 0
else:
hostlist_max_size = default_max_size
if not hostlist_max_size:
return
if len(hostlist) > hostlist_max_size:
raise ArgumentError("The number of hosts in list {0:d} can not be "
"more than {1:d}".format(len(hostlist), hostlist_max_size))
return
| {
"content_hash": "eef89da3ac42d24c15715eec16684ccd",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 87,
"avg_line_length": 33.0722891566265,
"alnum_prop": 0.6262295081967213,
"repo_name": "jrha/aquilon",
"id": "0dba4f060e4c64ba4e1166a9bfd16f5947f51ad3",
"size": "3453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.6/aquilon/worker/dbwrappers/host.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pylab as plt
from openmdao.api import Group, Problem, IndepVarComp
from hyperloop.Python import tube_and_pod
# def create_problem(component):
# root = Group()
# prob = Problem(root)
# prob.root.add('comp', component)
# return prob
# class PressureTradeStudy(object):
# def test_case1_vs_npss(self):
# component = tube_and_pod.TubeAndPod()
# prob = create_problem(component)
if __name__ == '__main__':
prob = Problem()
root = prob.root = Group()
root.add('TubeAndPod', tube_and_pod.TubeAndPod())
params = (('tube_pressure', 850.0, {'units' : 'Pa'}),
('pressure_initial', 760.2, {'units' : 'torr'}),
('num_pods', 18.),
('pwr', 18.5, {'units' : 'kW'}),
('speed', 163333.3, {'units' : 'L/min'}),
('time_down', 1440.0, {'units' : 'min'}),
('gamma', .8, {'units' : 'unitless'}),
('pump_weight', 715.0, {'units' : 'kg'}),
('electricity_price', 0.13, {'units' : 'USD/(kW*h)'}),
('tube_thickness', .0415014, {'units' : 'm'}),
('tube_length', 480000., {'units' : 'm'}),
('vf', 286.85, {'units' : 'm/s'}),
('v0', 286.85-15.0, {'units' : 'm/s'}),
('time_thrust', 1.5, {'units' : 's'}),
('pod_mach', .8, {'units': 'unitless'}),
('comp_inlet_area', 2.3884, {'units': 'm**2'}),
('comp_PR', 6.0, {'units': 'unitless'}),
('PsE', 0.05588, {'units': 'psi'}),
('des_time', 1.0),
('time_of_flight', 1.0),
('motor_max_current', 800.0),
('motor_LD_ratio', 0.83),
('motor_oversize_factor', 1.0),
('inverter_efficiency', 1.0),
('battery_cross_section_area', 15000.0, {'units': 'cm**2'}),
('n_passengers', 28.),
('A_payload', 2.3248, {'units' : 'm**2'}),
('r_pylon', 0.232, {'units' : 'm'}),
('h', 10.0, {'units' : 'm'}),
('vel_b', 23.0, {'units': 'm/s'}),
('h_lev', 0.01, {'unit': 'm'}),
('vel', 286.86, {'units': 'm/s'}),
('pod_period', 120.0, {'units' : 's'}),
('ib', .04),
('bm', 20.0, {'units' : 'yr'}),
('track_length', 600.0, {'units' : 'km'}),
('avg_speed', 286.86, {'units' : 'm/s'}),
('depth', 10.0, {'units' : 'm'}),
('land_length', 600.0e3, {'units' : 'm'}),
('water_length', 0.0e3, {'units' : 'm'}),
('W', 1.0, {'units' : 'kg/s'}),
('operating_time', 16.0*3600.0, {'units' : 's'})
)
prob.root.add('des_vars', IndepVarComp(params))
prob.root.connect('des_vars.tube_pressure', 'TubeAndPod.tube_pressure')
prob.root.connect('des_vars.pressure_initial', 'TubeAndPod.pressure_initial')
prob.root.connect('des_vars.num_pods', 'TubeAndPod.num_pods')
prob.root.connect('des_vars.pwr','TubeAndPod.pwr')
prob.root.connect('des_vars.speed', 'TubeAndPod.speed')
prob.root.connect('des_vars.time_down', 'TubeAndPod.time_down')
prob.root.connect('des_vars.gamma','TubeAndPod.gamma')
prob.root.connect('des_vars.pump_weight','TubeAndPod.pump_weight')
prob.root.connect('des_vars.electricity_price','TubeAndPod.electricity_price')
prob.root.connect('des_vars.tube_thickness', 'TubeAndPod.tube_thickness')
prob.root.connect('des_vars.tube_length', 'TubeAndPod.tube_length')
prob.root.connect('des_vars.h', 'TubeAndPod.h')
prob.root.connect('des_vars.r_pylon', 'TubeAndPod.r_pylon')
prob.root.connect('des_vars.vf', 'TubeAndPod.vf')
prob.root.connect('des_vars.v0', 'TubeAndPod.v0')
prob.root.connect('des_vars.time_thrust', 'TubeAndPod.time_thrust')
prob.root.connect('des_vars.pod_mach', 'TubeAndPod.pod_mach')
prob.root.connect('des_vars.comp_inlet_area', 'TubeAndPod.comp_inlet_area')
prob.root.connect('des_vars.comp_PR', 'TubeAndPod.comp.map.PRdes')
prob.root.connect('des_vars.PsE', 'TubeAndPod.nozzle.Ps_exhaust')
prob.root.connect('des_vars.des_time', 'TubeAndPod.des_time')
prob.root.connect('des_vars.time_of_flight', 'TubeAndPod.time_of_flight')
prob.root.connect('des_vars.motor_max_current', 'TubeAndPod.motor_max_current')
prob.root.connect('des_vars.motor_LD_ratio', 'TubeAndPod.motor_LD_ratio')
prob.root.connect('des_vars.motor_oversize_factor', 'TubeAndPod.motor_oversize_factor')
prob.root.connect('des_vars.inverter_efficiency', 'TubeAndPod.inverter_efficiency')
prob.root.connect('des_vars.battery_cross_section_area', 'TubeAndPod.battery_cross_section_area')
prob.root.connect('des_vars.n_passengers', 'TubeAndPod.n_passengers')
prob.root.connect('des_vars.A_payload', 'TubeAndPod.A_payload')
prob.root.connect('des_vars.vel_b', 'TubeAndPod.vel_b')
prob.root.connect('des_vars.h_lev', 'TubeAndPod.h_lev')
prob.root.connect('des_vars.vel', 'TubeAndPod.vel')
prob.root.connect('des_vars.pod_period', 'TubeAndPod.cost.pod_period')
prob.root.connect('des_vars.ib', 'TubeAndPod.cost.ib')
prob.root.connect('des_vars.bm', 'TubeAndPod.cost.bm')
prob.root.connect('des_vars.track_length', 'TubeAndPod.track_length')
prob.root.connect('des_vars.avg_speed', 'TubeAndPod.cost.avg_speed')
prob.root.connect('des_vars.land_length', 'TubeAndPod.land_length')
prob.root.connect('des_vars.water_length', 'TubeAndPod.water_length')
prob.root.connect('des_vars.operating_time', 'TubeAndPod.operating_time')
prob.root.connect('des_vars.W', 'TubeAndPod.fl_start.W')
prob.setup()
n_passengers = np.linspace(10, 100, num = 50)
A_tube = np.zeros((1, len(n_passengers)))
total_energy_cost = np.zeros((1, len(n_passengers)))
ticket_cost = np.zeros((1, len(n_passengers)))
for i in range(len(n_passengers)):
prob['des_vars.n_passengers'] = n_passengers[i]
prob.run()
A_tube[0,i] = prob['TubeAndPod.pod.A_tube']
total_energy_cost[0,i] = prob['TubeAndPod.cost.total_energy_cost']
ticket_cost[0,i] = prob['TubeAndPod.cost.ticket_cost']
np.savetxt('../../../paper/images/data_files/capacity_trades/n_passengers.txt', n_passengers, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/capacity_trades/A_tube.txt', A_tube, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/capacity_trades/total_energy_cost.txt', total_energy_cost, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/capacity_trades/ticket_cost.txt', ticket_cost, fmt = '%f', delimiter = '\t', newline = '\r\n')
plt.plot(n_passengers, A_tube[0,:], linewidth = 2.0)
plt.xlabel('Passengers per Pod', fontsize = 12, fontweight = 'bold')
plt.ylabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.show()
plt.plot(n_passengers, total_energy_cost[0,:]/(1e6), linewidth = 2.0)
plt.xlabel('Passengers per Pod', fontsize = 12, fontweight = 'bold')
plt.ylabel('Yearly Energy Cost (USD)', fontsize = 12, fontweight = 'bold')
plt.ylim(25.0, 35.0)
plt.show()
plt.plot(n_passengers, ticket_cost[0,:], linewidth = 2.0)
plt.xlabel('Passengers per Pod', fontsize = 12, fontweight = 'bold')
plt.ylabel('Estimated Ticket Cost (USD)', fontsize = 12, fontweight = 'bold')
plt.show() | {
"content_hash": "fee84be2a5c80e8a2107c1e51f4bfb09",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 152,
"avg_line_length": 47.296551724137935,
"alnum_prop": 0.6415864683581219,
"repo_name": "andipeng/MagnePlane",
"id": "1888072f4b7958e837e78c0113389304e4495c8c",
"size": "6858",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "paper/images/trade_scripts/capacity_trades_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "983"
},
{
"name": "Glyph",
"bytes": "16005"
},
{
"name": "Processing",
"bytes": "8275"
},
{
"name": "Python",
"bytes": "558002"
},
{
"name": "Shell",
"bytes": "1886"
},
{
"name": "TeX",
"bytes": "212439"
}
],
"symlink_target": ""
} |
import re
def to_list(input):
if isinstance(input, (list, tuple)):
return list(input)
else:
return [input]
def resource_to_bytes(resource_str):
if not resource_str:
return resource_str
matched = re.compile("([0-9]+)([a-z]+)?").match(resource_str.lower())
fraction_matched = re.compile("([0-9]+\\.[0-9]+)([a-z]+)?").match(resource_str.lower())
if fraction_matched:
raise Exception(
"Fractional values are not supported. Input was: {}".format(resource_str))
try:
value = int(matched.group(1))
postfix = matched.group(2)
if postfix == 'b':
value = value
elif postfix == 'k':
value = value * 1000
elif postfix == "m":
value = value * 1000 * 1000
elif postfix == 'g':
value = value * 1000 * 1000 * 1000
else:
raise Exception("Not supported type: {}".format(resource_str))
return value
except Exception:
raise Exception("Size must be specified as bytes(b),"
"kilobytes(k), megabytes(m), gigabytes(g). "
"E.g. 50b, 100k, 250m, 30g")
def is_local(sc):
master = sc.getConf().get("spark.master")
return master == "local" or master.startswith("local[")
def get_parent_pid(pid):
import psutil
cur_proc = psutil.Process(pid)
return cur_proc.ppid()
| {
"content_hash": "ff6309ac953e00a54f3cd7c70a66df30",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 91,
"avg_line_length": 30.19148936170213,
"alnum_prop": 0.5553206483439042,
"repo_name": "intel-analytics/analytics-zoo",
"id": "ce699dabee440b96a002420470e9efe244079a47",
"size": "2010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/ray/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Recipe
class RecipeAdmin(admin.ModelAdmin):
model = Recipe
admin.site.register(Recipe, RecipeAdmin)
| {
"content_hash": "15db2d93e8f1a78f729cdad38b7c7d80",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 40,
"avg_line_length": 17.88888888888889,
"alnum_prop": 0.782608695652174,
"repo_name": "talpor/recipe-search-hackathon",
"id": "942a24315ac446e5f2b1ec9fec41be84beff1807",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipe-search/recipe/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4133"
},
{
"name": "HTML",
"bytes": "31287"
},
{
"name": "JavaScript",
"bytes": "38508"
},
{
"name": "Python",
"bytes": "49842"
},
{
"name": "Ruby",
"bytes": "674"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import spiceutils
# a generic gate
# has a logical effort
# has a parasitic delay
class Gate:
def __init__(self, **kwargs):
pass
def get_le(self):
pass
def get_gamma(self):
pass
def get_input_load(self):
pass
def set_size(self):
pass
def get_type(self):
pass
def write_spice_params(self, pfile):
pass
# an inverter
class Inv(Gate):
# one can either specify nothing, in which case the default values of ratio,
# LE and gamma will be used
# otherwise one must provide all of ratio, gamma and LE for the critical
# edge
def __init__(self, **kwargs):
# LE = logical effort
# ratio = ratio of pwidth : nwidth
# gamma = parasitic delay of gate
self.LE = 1.0
self.ratio = 3.0
self.gamma = 1.0
if ("ratio" in kwargs) or ("LE" in kwargs) or ("gamma" in kwargs):
assert "LE" in kwargs, """you must specify ratio, LE and gamma or none
of them"""
assert "gamma" in kwargs, """you must specify ratio, LE and gamma or
none of them"""
self.ratio = float(kwargs["ratio"])
self.LE = float(kwargs["LE"])
self.gamma = float(kwargs["gamma"])
def get_le(self):
return self.LE
def get_gamma(self):
return self.gamma
def set_size(self, Cin):
self.Wn = Cin/(self.ratio + 1.0)
self.Wp = Cin*self.ratio/(self.ratio + 1.0)
def get_input_load(self):
return self.Wn + self.Wp
def get_type(self):
return "inv"
def write_spice_params(self, pfile):
print >> pfile, ".param Wn=%.2f"%(self.Wn,)
print >> pfile, ".param Wp=%.2f"%(self.Wp,)
def __repr__(self):
return ("Inv(ratio=%.4f,LE=%.4f,gamma=%.4f,Wn=%.4f,Wp=%.4f)"
% (self.ratio, self.LE, self.gamma, self.Wn, self.Wp))
# a nand gate with configurable number of inputs
class Nand(Gate):
# one can either specify N, in which case the analytical values of ratio,
# LE and gamma will be used
# otherwise one must provide all of ratio, gamma and LE for the critical
# edge
def __init__(self, **kwargs):
# N = number of inputs
# LE = logical effort
# ratio = ratio of pwidth : nwidth
# gamma = intrinsic parasitic delay
self.N = 2
self.LE = 1.0
self.ratio = 3.0
self.gamma = 1.0
if "N" in kwargs:
self.N = kwargs["N"]
self.ratio = 3.0/self.N
self.LE = (3.0 + self.N)/4.0
self.gamma = 1.0
if ("ratio" in kwargs) or ("LE" in kwargs) or ("gamma" in kwargs):
assert "LE" in kwargs, """you must specify ratio, LE and gamma or none
of them"""
assert "gamma" in kwargs, """you must specify ratio, LE and gamma or
none of them"""
self.ratio = float(kwargs["ratio"])
self.LE = float(kwargs["LE"])
self.gamma = float(kwargs["gamma"])
def get_le(self):
return self.LE
def get_gamma(self):
return self.gamma
def set_size(self, Cin):
self.Wn = Cin/(self.ratio + 1.0)
self.Wp = Cin*self.ratio/(self.ratio + 1.0)
def get_input_load(self):
return self.Wn + self.Wp
def get_type(self):
return "nand"+str(self.N)
def write_spice_params(self, pfile):
print >> pfile, ".param Wn=%.2f"%(self.Wn,)
print >> pfile, ".param Wp=%.2f"%(self.Wp,)
def __repr__(self):
return ("Nand(N=%d,ratio=%.4f,LE=%.4f,gamma=%.4f,Wn=%.4f,Wp=%.4f)"
% (self.N, self.ratio, self.LE, self.gamma, self.Wn, self.Wp))
def simulate_gate(gate, load, rload):
paramsfile = open('../spice/gates/parameters.sp', 'w')
gate.write_spice_params(paramsfile)
print >> paramsfile, ".param Wload=%s"%(spiceutils.spice_format(load),)
print >> paramsfile, ".param Rload=%s"%(spiceutils.spice_format(rload),)
print >> paramsfile, ".include \"%s.ckt\""%(gate.get_type(),)
paramsfile.close()
devnull = open('/dev/null', 'w')
subprocess.call("hspice ../spice/gates/wiredriver.sp", stdout=devnull,
stderr=subprocess.STDOUT, shell=True)
devnull.close()
return spiceutils.read_mt0('wiredriver.mt0')[0]
| {
"content_hash": "71796facf01f7bc7972ce240807eb04f",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 82,
"avg_line_length": 28.81578947368421,
"alnum_prop": 0.5627853881278538,
"repo_name": "subhasis256/cache_spice",
"id": "48aa75945a0ee93ac3b1aaae195ec89f8bea25b9",
"size": "4380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/gates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37883"
},
{
"name": "Shell",
"bytes": "198"
},
{
"name": "SourcePawn",
"bytes": "10384"
}
],
"symlink_target": ""
} |
from unittest import skipUnless
from django.core.management.color import no_style
from django.db import connection
from django.test import TestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Article, ArticleTranslation, IndexTogetherSingleList
@ignore_warnings(category=RemovedInDjango20Warning)
class CreationIndexesTests(TestCase):
"""
Test index handling by the to-be-deprecated connection.creation interface.
"""
def test_index_together(self):
index_sql = connection.creation.sql_indexes_for_model(Article, no_style())
self.assertEqual(len(index_sql), 1)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.creation.sql_indexes_for_model(IndexTogetherSingleList, no_style())
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.creation.sql_indexes_for_model(IndexedArticle, no_style())
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.creation.sql_indexes_for_model(Article, no_style())
self.assertEqual(len(index_sql), 1)
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[2])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', "This is a mysql-specific issue")
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [])
| {
"content_hash": "a037e8b0f59514b6a6fe2cd1dfdb41bf",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 98,
"avg_line_length": 44.62626262626262,
"alnum_prop": 0.6727025803531009,
"repo_name": "pquentin/django",
"id": "1d60f215fec6a9f109a0f49eefc2ae66bc380450",
"size": "4418",
"binary": false,
"copies": "12",
"ref": "refs/heads/stable/1.8.x",
"path": "tests/indexes/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5553"
},
{
"name": "Python",
"bytes": "10306794"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
from django.apps import apps as global_apps
from django.contrib.contenttypes.management import create_contenttypes
from django.db import models, migrations
from django.utils.timezone import now
MARKER = '.. Migrated from django_comments_xtd.Comment model.\n\n'
comments_app_name = 'django_comments_xtd'
content_type = 'job'
def migrate_old_content(apps, schema_editor):
try:
Comment = apps.get_model(comments_app_name, 'XtdComment')
except LookupError:
# django_comments_xtd isn't installed.
return
create_contenttypes(apps.app_configs['contenttypes'])
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
Job = apps.get_model('jobs', 'Job')
ContentType = apps.get_model('contenttypes', 'ContentType')
db_alias = schema_editor.connection.alias
try:
# 'ContentType.name' is now a property in Django 1.8 so we
# can't use it to query a ContentType anymore.
job_contenttype = ContentType.objects.using(db_alias).get(model=content_type)
except ContentType.DoesNotExist:
return
old_comments = Comment.objects.using(db_alias).filter(
content_type=job_contenttype.pk, is_public=True, is_removed=False,
)
found_jobs = {}
comments = []
for comment in old_comments:
try:
job = found_jobs[comment.object_pk]
except KeyError:
try:
job = Job.objects.using(db_alias).get(pk=comment.object_pk)
found_jobs[comment.object_pk] = job
except Job.DoesNotExist:
continue
review_comment = JobReviewComment(
job=job,
comment=MARKER + comment.comment,
creator=comment.user,
created=comment.submit_date,
updated=now(),
)
comments.append(review_comment)
JobReviewComment.objects.using(db_alias).bulk_create(comments)
def delete_migrated_content(apps, schema_editor):
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
db_alias = schema_editor.connection.alias
JobReviewComment.objects.using(db_alias).filter(comment__startswith=MARKER).delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('jobs', '0011_jobreviewcomment'),
]
if global_apps.is_installed(comments_app_name):
dependencies.append((comments_app_name, '0001_initial'))
operations = [
migrations.RunPython(migrate_old_content, delete_migrated_content),
]
| {
"content_hash": "87eabd4d3dc96983661faf93fab14336",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 88,
"avg_line_length": 35.732394366197184,
"alnum_prop": 0.6621994481671265,
"repo_name": "python/pythondotorg",
"id": "ba9d78d4fcbddb60aef21ad9817004b5a2afec22",
"size": "2537",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "jobs/migrations/0012_auto_20170809_1849.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "Dockerfile",
"bytes": "229"
},
{
"name": "HTML",
"bytes": "498813"
},
{
"name": "JavaScript",
"bytes": "24050"
},
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1145343"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "198033"
}
],
"symlink_target": ""
} |
from xml.etree import ElementTree as ET
if __name__ == '__main__':
infile = '/etc/tomcat5.5/tomcat-users.xml'
tomcat_users = ET.parse(infile)
for user in [e for e in tomcat_users.findall('./user') if
e.get('name') == 'tomcat']:
print (user.attrib)
| {
"content_hash": "8308cfa6131a1a73c8942c08bd3e21a8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 62,
"avg_line_length": 31,
"alnum_prop": 0.5985663082437276,
"repo_name": "lluxury/P_U_S_A",
"id": "0dd1132f4a72538ea19ca3c4092d51bea5956c08",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3_text/elementtree_tomcat_users.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "6274"
},
{
"name": "PLpgSQL",
"bytes": "1421"
},
{
"name": "Perl",
"bytes": "999"
},
{
"name": "Python",
"bytes": "1322191"
},
{
"name": "Roff",
"bytes": "6"
},
{
"name": "Shell",
"bytes": "1055"
}
],
"symlink_target": ""
} |
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['dizigold.net', 'dizigold1.com']
self.base_link = 'http://www.dizigold2.com'
self.player_link = 'http://player.dizigold2.com/?id=%s&s=1&dil=%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
result = cache.get(self.dizigold_tvcache, 120)
tvshowtitle = cleantitle.get(tvshowtitle)
result = [i[0] for i in result if tvshowtitle == i[1]][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def dizigold_tvcache(self):
try:
result = client.request(self.base_link)
result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0]
result = re.compile('href="(.+?)">(.+?)<').findall(result)
result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result]
result = [(i[0], cleantitle.get(i[1])) for i in result]
return result
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if url == None: return
url = '/%s/%01d-sezon/%01d-bolum' % (url.replace('/', ''), int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
base_url = urlparse.urljoin(self.base_link, url)
result = client.request(base_url)
id = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0]
for dil in ['tr', 'or', 'en']:
query = self.player_link % (id, dil)
result = client.request(query, referer=base_url)
try:
url = client.parseDOM(result, 'iframe', ret='src')[-1]
if 'openload' in url:
host = 'openload.co' ; direct = False ; url = [{'url': url, 'quality': 'HD'}]
elif 'ok.ru' in url:
host = 'vk' ; direct = True ; url = directstream.odnoklassniki(url)
elif 'vk.com' in url:
host = 'vk' ; direct = True ; url = directstream.vk(url)
else: raise Exception()
for i in url: sources.append({'source': host, 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': direct, 'debridonly': False})
except:
pass
try:
url = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| {
"content_hash": "5556d3134de3d2848dd1c4a962d9039e",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 165,
"avg_line_length": 34.34108527131783,
"alnum_prop": 0.5374717832957111,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "9719f27558df728675e4e40ccc04dd5e49643191",
"size": "4471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script.module.fantastic/lib/resources/lib/sources/en/dizigold.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
"""engine.SCons.Tool.f03
Tool-specific initialization for the generic Posix f03 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f03.py 2014/03/02 14:18:15 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f03_to_env
compilers = ['f03']
def generate(env):
add_all_to_env(env)
add_f03_to_env(env)
fcomp = env.Detect(compilers) or 'f03'
env['F03'] = fcomp
env['SHF03'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "980f272587240d045ceb7750d3e62e65",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 119,
"avg_line_length": 32.714285714285715,
"alnum_prop": 0.7428432799611839,
"repo_name": "sftd/scons",
"id": "258db7066cb53713c6b414a904a18720d9d7033b",
"size": "2061",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "scons-local/SCons/Tool/f03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1913081"
}
],
"symlink_target": ""
} |
from .server import *
from .async import asyncweb, webcoroutine
notify = asyncweb.notify
notify_after = asyncweb.notify_after | {
"content_hash": "685a3c8983030bb403aa89cff6a0358d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 41,
"avg_line_length": 25.2,
"alnum_prop": 0.8015873015873016,
"repo_name": "jtackaberry/stagehand",
"id": "935afc248a886a58f7ad6911542c22a93170f518",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stagehand/web/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11070"
},
{
"name": "CoffeeScript",
"bytes": "8397"
},
{
"name": "JavaScript",
"bytes": "2294"
},
{
"name": "Python",
"bytes": "650370"
}
],
"symlink_target": ""
} |
from abc import ABCMeta
def _make_delegator_method(name):
def delegator(self, *args, **kwargs):
return getattr(self.__delegate__, name)(*args, **kwargs) # pragma: no cover
# todo: consider using __call__() instead of __delegate__
# in Python delegates are objects with __call__ method..
# so why not to use the following:
# return getattr(self(), name)(*args, **kwargs)
# ?
return delegator
# def _make_delegator_method_to_property(name):
# def delegator(self, *args, **kwargs):
# return getattr(self.__delegate__, name)
# return delegator
def _make_delegator_property(name):
return property(lambda self: getattr(self.__delegate__, name)) # pragma: no cover
def _is_property(name, cls):
return isinstance(getattr(cls, name, None), property)
class DelegatingMeta(ABCMeta):
def __new__(mcs, name, bases, dct):
abstract_property_names = frozenset.union(
*(frozenset(filter(lambda m: _is_property(m, base), base.__abstractmethods__))
for base in bases))
for base in bases:
base.__abstractmethods__ = frozenset(filter(lambda m: not _is_property(m, base), base.__abstractmethods__))
abstract_method_names = frozenset.union(*(base.__abstractmethods__
for base in bases))
for name in abstract_method_names:
if name not in dct:
dct[name] = _make_delegator_method(name)
# for name in abstract_property_names:
# if name not in dct:
# dct[name] = _make_delegator_method_to_property(name)
cls = super(DelegatingMeta, mcs).__new__(mcs, name, bases, dct)
for name in abstract_property_names:
if name not in dct:
setattr(cls, name, _make_delegator_property(name))
return cls
# todo: finalize naming: Delegating, Delegate, actual_delegate, delegatee, delegator o_O ?
# We have the following players in this game:
# * MetaClass for Classes of Objects who delegates their implementation to aggregated object
# So who should be named how?
| {
"content_hash": "5ac09587177b8981a272f14a2d3f1057",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 119,
"avg_line_length": 35.47540983606557,
"alnum_prop": 0.6164510166358595,
"repo_name": "SergeyPirogov/selene",
"id": "4790afe07c99445354df0a84235383f75387a69b",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selene/common/delegation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "7229"
},
{
"name": "JavaScript",
"bytes": "11561"
},
{
"name": "PHP",
"bytes": "36"
},
{
"name": "Python",
"bytes": "165348"
},
{
"name": "Shell",
"bytes": "641"
}
],
"symlink_target": ""
} |
import Adafruit_BBIO.ADC as ADC
import time
ADC.setup()
sensor1 = "P9_40" # left infrared sensor
sensor2 = "P9_39"
sensor3 = "P9_38" # middle infrared sensor
sensor4 = "P9_37"
sensor5 = "P9_36" # right infrared sensor
while True:
#reading1 = ADC.read(sensor1)
#reading2 = ADC.read(sensor2)
#reading3 = ADC.read(sensor3)
#reading4 = ADC.read(sensor4)
#reading5 = ADC.read(sensor5)
#print("reading2 " + str(reading2))
print("reading3 " + str(reading3))
time.sleep(0.05)
| {
"content_hash": "d2cb71ab25db9aa80305cec8595358e6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 42,
"avg_line_length": 24.095238095238095,
"alnum_prop": 0.6620553359683794,
"repo_name": "kohloderso/quickbot_bbb",
"id": "2332ee61b8959fb70e63a033ce9c6851040b1305",
"size": "506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myQuickbotCode/sensorTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44081"
},
{
"name": "Shell",
"bytes": "278"
}
],
"symlink_target": ""
} |
from main import db
from datetime import datetime
import pytz
from sqlalchemy import orm
class Reservation(db.Model):
END_BEFORE_START_ERROR_MESSAGE = "End date cannot be before start date"
__tablename__ = 'reservations'
id = db.Column(db.Integer, primary_key=True)
startTime = db.Column(db.DateTime)
endTime = db.Column(db.DateTime)
allDay = db.Column(db.Boolean)
title = db.Column(db.String(80))
description = db.Column(db.String(256))
userId = db.Column(db.Integer, db.ForeignKey('users.id'))
reminderMailSent = db.Column(db.Boolean)
def __init__(self, title, start_date, end_date, all_day, user_id, description=""):
self.title = title
self.description = description
self.allDay = all_day
self.userId = user_id
self.reminderMailSent = False
if not type(start_date) is datetime:
raise ValueError("start_date must be a datetime object")
if not type(end_date) is datetime:
raise ValueError("end_date must be a datetime object")
if start_date.tzinfo is None or start_date.tzinfo.utcoffset(start_date) is None:
start_date = pytz.utc.localize(start_date)
elif start_date.tzinfo != pytz.utc:
start_date = start_date.replace(tzinfo=start_date.tzinfo).astimezone(pytz.utc)
if end_date.tzinfo is None or end_date.tzinfo.utcoffset(end_date) is None:
end_date = pytz.utc.localize(end_date)
elif end_date.tzinfo != pytz.utc:
end_date = end_date.replace(tzinfo=end_date.tzinfo).astimezone(pytz.utc)
if end_date < start_date:
raise ValueError(Reservation.END_BEFORE_START_ERROR_MESSAGE)
self.startTime = start_date
self.endTime = end_date
@orm.reconstructor
def init_on_load(self):
if self.startTime.tzinfo is None or self.startTime.tzinfo.utcoffset(self.startTime) is None:
self.startTime = pytz.utc.localize(self.startTime)
if self.endTime.tzinfo is None or self.endTime.tzinfo.utcoffset(self.endTime) is None:
self.endTime = pytz.utc.localize(self.endTime)
def to_dict(self):
dict = self.__dict__
if '_sa_instance_state' in dict:
del dict['_sa_instance_state']
return dict
@staticmethod
def get_required_attributes():
return ['title', 'startTime', 'endTime', 'allDay']
@staticmethod
def get_all_attributes():
return ['title', 'startTime', 'endTime', 'allDay', 'description']
| {
"content_hash": "abcc39bc48de8a20bbbb2e7f4f4909ae",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 100,
"avg_line_length": 37.529411764705884,
"alnum_prop": 0.646551724137931,
"repo_name": "patklaey/ZermattReservationAPI",
"id": "652dbd42d91f07467313e5213a190a9213a3337c",
"size": "2552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DB/Reservation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51892"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.