content
stringlengths 5
1.05M
|
|---|
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import errno
import io
import os
import platform
import pwd
import shutil
import socket
import tarfile
import tempfile
import hashlib
import eventlet
import nova.conf
import nova.context
from contextlib import closing
from nova import exception
from nova import i18n
from nova import image
from nova import network
from nova.network import model as network_model
from nova import objects
from nova.virt import driver
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
import pylxd
from pylxd import exceptions as lxd_exceptions
from nova.virt.lxd import vif as lxd_vif
from nova.virt.lxd import common
from nova.virt.lxd import flavor
from nova.virt.lxd import storage
from nova.api.metadata import base as instance_metadata
from nova.objects import fields as obj_fields
from nova.objects import migrate_data
from nova.virt import configdrive
from nova.compute import power_state
from nova.compute import vm_states
from nova.virt import hardware
from oslo_utils import units
from oslo_serialization import jsonutils
from nova import utils
import psutil
from oslo_concurrency import lockutils
from nova.compute import task_states
from oslo_utils import excutils
from nova.virt import firewall
_ = i18n._
lxd_opts = [
cfg.StrOpt('root_dir',
default='/var/lib/lxd/',
help='Default LXD directory'),
cfg.StrOpt('pool',
default=None,
help='LXD Storage pool to use with LXD >= 2.9'),
cfg.IntOpt('timeout',
default=-1,
help='Default LXD timeout'),
cfg.BoolOpt('allow_live_migration',
default=False,
help='Determine wheter to allow live migration'),
]
CONF = cfg.CONF
CONF.register_opts(lxd_opts, 'lxd')
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
MAX_CONSOLE_BYTES = 100 * units.Ki
NOVA_CONF = nova.conf.CONF
ACCEPTABLE_IMAGE_FORMATS = {'raw', 'root-tar', 'squashfs'}
BASE_DIR = os.path.join(
CONF.instances_path, CONF.image_cache_subdirectory_name)
def _last_bytes(file_like_object, num):
"""Return num bytes from the end of the file, and remaning byte count.
:param file_like_object: The file to read
:param num: The number of bytes to return
:returns: (data, remaining)
"""
try:
file_like_object.seek(-num, os.SEEK_END)
except IOError as e:
# seek() fails with EINVAL when trying to go before the start of
# the file. It means that num is larger than the file size, so
# just go to the start.
if e.errno == errno.EINVAL:
file_like_object.seek(0, os.SEEK_SET)
else:
raise
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
def _neutron_failed_callback(event_name, instance):
LOG.error("Neutron Reported failure on event "
"{event} for instance {uuid}"
.format(event=event_name, uuid=instance.name),
instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_cpu_info():
"""Get cpu information.
This method executes lscpu and then parses the output,
returning a dictionary of information.
"""
cpuinfo = {}
out, err = utils.execute('lscpu')
if err:
msg = _("Unable to parse lscpu output.")
raise exception.NovaException(msg)
cpu = [line.strip('\n') for line in out.splitlines()]
for line in cpu:
if line.strip():
name, value = line.split(':', 1)
name = name.strip().lower()
cpuinfo[name] = value.strip()
f = open('/proc/cpuinfo', 'r')
features = [line.strip('\n') for line in f.readlines()]
for line in features:
if line.strip():
if line.startswith('flags'):
name, value = line.split(':', 1)
name = name.strip().lower()
cpuinfo[name] = value.strip()
return cpuinfo
def _get_ram_usage():
"""Get memory info."""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
def _get_fs_info(path):
"""Get free/used/total disk space."""
hddinfo = os.statvfs(path)
total = hddinfo.f_blocks * hddinfo.f_bsize
available = hddinfo.f_bavail * hddinfo.f_bsize
used = total - available
return {'total': total,
'available': available,
'used': used}
def _get_zpool_info(pool_or_dataset):
"""Get the free/used/total diskspace in a zfs pool or dataset.
A dataset is distinguished by having a '/' in the string.
:param pool_or_dataset: The string name of the pool or dataset
:type pool_or_dataset: str
:returns: dictionary with keys 'total', 'available', 'used'
:rtype: Dict[str, int]
:raises: :class:`exception.NovaException`
:raises: :class:`oslo.concurrency.PorcessExecutionError`
:raises: :class:`OSError`
"""
def _get_zfs_attribute(cmd, attribute):
value, err = utils.execute(cmd, 'list',
'-o', attribute,
'-H',
'-p',
pool_or_dataset,
run_as_root=True)
if err:
msg = _("Unable to parse zfs output.")
raise exception.NovaException(msg)
value = int(value.strip())
return value
if '/' in pool_or_dataset:
# it's a dataset:
# for zfs datasets we only have 'available' and 'used' and so need to
# construct the total from available and used.
used = _get_zfs_attribute('zfs', 'used')
available = _get_zfs_attribute('zfs', 'available')
total = available + used
else:
# otherwise it's a zpool
total = _get_zfs_attribute('zpool', 'size')
used = _get_zfs_attribute('zpool', 'alloc')
available = _get_zfs_attribute('zpool', 'free')
return {'total': total,
'available': available,
'used': used}
def _get_power_state(lxd_state):
"""Take a lxd state code and translate it to nova power state."""
state_map = [
(power_state.RUNNING, {100, 101, 103, 200}),
(power_state.SHUTDOWN, {102, 104, 107}),
(power_state.NOSTATE, {105, 106, 401}),
(power_state.CRASHED, {108, 400}),
(power_state.SUSPENDED, {109, 110, 111}),
]
for nova_state, lxd_states in state_map:
if lxd_state in lxd_states:
return nova_state
raise ValueError('Unknown LXD power state: {}'.format(lxd_state))
def _sync_glance_image_to_lxd(client, context, image_ref):
"""Sync an image from glance to LXD image store.
The image from glance can't go directly into the LXD image store,
as LXD needs some extra metadata connected to it.
The image is stored in the LXD image store with an alias to
the image_ref. This way, it will only copy over once.
"""
lock_path = os.path.join(CONF.instances_path, 'locks')
with lockutils.lock(
lock_path, external=True,
lock_file_prefix='lxd-image-{}'.format(image_ref)):
# NOTE(jamespage): Re-query by image_ref to ensure
# that another process did not
# sneak infront of this one and create
# the same image already.
try:
client.images.get_by_alias(image_ref)
return
except lxd_exceptions.LXDAPIException as e:
if e.response.status_code != 404:
raise
try:
ifd, image_file = tempfile.mkstemp()
mfd, manifest_file = tempfile.mkstemp()
image = IMAGE_API.get(context, image_ref)
if image.get('disk_format') not in ACCEPTABLE_IMAGE_FORMATS:
raise exception.ImageUnacceptable(
image_id=image_ref, reason=_("Bad image format"))
IMAGE_API.download(context, image_ref, dest_path=image_file)
# It is possible that LXD already have the same image
# but NOT aliased as result of previous publish/export operation
# (snapshot from openstack).
# In that case attempt to add it again
# (implicitly via instance launch from affected image) will produce
# LXD error - "Image with same fingerprint already exists".
# Error does not have unique identifier to handle it we calculate
# fingerprint of image as LXD do it and check if LXD already have
# image with such fingerprint.
# If any we will add alias to this image and will not re-import it
def add_alias():
def lxdimage_fingerprint():
def sha256_file():
sha256 = hashlib.sha256()
with closing(open(image_file, 'rb')) as f:
for block in iter(lambda: f.read(65536), b''):
sha256.update(block)
return sha256.hexdigest()
return sha256_file()
fingerprint = lxdimage_fingerprint()
if client.images.exists(fingerprint):
LOG.info("Image with fingerprint {fingerprint} already "
"exists but not accessible by alias {alias}, "
"add alias"
.format(fingerprint=fingerprint, alias=image_ref))
lxdimage = client.images.get(fingerprint)
lxdimage.add_alias(image_ref, '')
return True
return False
if add_alias():
return
# up2date LXD publish/export operations produce images which
# already contains /rootfs and metdata.yaml in exported file.
# We should not pass metdata explicitly in that case as imported
# image will be unusable bacause LXD will think that it containts
# rootfs and will not extract embedded /rootfs properly.
# Try to detect if image content already has metadata and not pass
# explicit metadata in that case
def imagefile_has_metadata(image_file):
try:
with closing(tarfile.TarFile.open(
name=image_file, mode='r:*')) as tf:
try:
tf.getmember('metadata.yaml')
return True
except KeyError:
pass
except tarfile.ReadError:
pass
return False
if imagefile_has_metadata(image_file):
LOG.info("Image {alias} already has metadata, "
"skipping metadata injection..."
.format(alias=image_ref))
with open(image_file, 'rb') as image:
image = client.images.create(image, wait=True)
else:
metadata = {
'architecture': image.get(
'hw_architecture',
obj_fields.Architecture.from_host()),
'creation_date': int(os.stat(image_file).st_ctime)}
metadata_yaml = jsonutils.dumps(
metadata, sort_keys=True, indent=4,
separators=(',', ': '),
ensure_ascii=False).encode('utf-8') + b"\n"
tarball = tarfile.open(manifest_file, "w:gz")
tarinfo = tarfile.TarInfo(name='metadata.yaml')
tarinfo.size = len(metadata_yaml)
tarball.addfile(tarinfo, io.BytesIO(metadata_yaml))
tarball.close()
with open(manifest_file, 'rb') as manifest:
with open(image_file, 'rb') as image:
image = client.images.create(
image, metadata=manifest,
wait=True)
image.add_alias(image_ref, '')
finally:
os.close(ifd)
os.close(mfd)
os.unlink(image_file)
os.unlink(manifest_file)
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""Wrapper to automatically set root_helper in brick calls.
:param multipath: A boolean indicating whether the connector can
support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running.
If False, it falls back to multipath=False
when multipathd is not running.
"""
root_helper = utils.get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip,
multipath,
enforce_multipath)
def brick_get_connector(protocol, driver=None,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = utils.get_root_helper()
if protocol.upper() == "RBD":
kwargs['do_local_attach'] = True
return connector.InitiatorConnector.factory(
protocol, root_helper,
driver=driver,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
class LXDLiveMigrateData(migrate_data.LiveMigrateData):
"""LiveMigrateData for LXD."""
VERSION = '1.0'
fields = {}
class LXDDriver(driver.ComputeDriver):
"""A LXD driver for nova.
LXD is a system container hypervisor. LXDDriver provides LXD
functionality to nova. For more information about LXD, see
http://www.ubuntu.com/cloud/lxd
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_multiattach": False,
}
def __init__(self, virtapi):
super(LXDDriver, self).__init__(virtapi)
self.client = None # Initialized by init_host
self.host = NOVA_CONF.host
self.network_api = network.API()
self.vif_driver = lxd_vif.LXDGenericVifDriver()
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
def init_host(self, host):
"""Initialize the driver on the host.
The pylxd Client is initialized. This initialization may raise
an exception if the LXD instance cannot be found.
The `host` argument is ignored here, as the LXD instance is
assumed to be on the same system as the compute worker
running this code. This is by (current) design.
See `nova.virt.driver.ComputeDriver.init_host` for more
information.
"""
try:
self.client = pylxd.Client()
except lxd_exceptions.ClientConnectionFailed as e:
msg = _("Unable to connect to LXD daemon: {}").format(e)
raise exception.HostNotFound(msg)
self._after_reboot()
def cleanup_host(self, host):
"""Clean up the host.
`nova.virt.ComputeDriver` defines this method. It is overridden
here to be explicit that there is nothing to be done, as
`init_host` does not create any resources that would need to be
cleaned up.
See `nova.virt.driver.ComputeDriver.cleanup_host` for more
information.
"""
def get_info(self, instance):
"""Return an InstanceInfo object for the instance."""
try:
container = self.client.containers.get(instance.name)
except lxd_exceptions.NotFound:
raise exception.InstanceNotFound(instance_id=instance.uuid)
state = container.state()
return hardware.InstanceInfo(
state=_get_power_state(state.status_code))
def list_instances(self):
"""Return a list of all instance names."""
return [c.name for c in self.client.containers.all()]
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None):
"""Create a new lxd container as a nova instance.
Creating a new container requires a number of steps. First, the
image is fetched from glance, if needed. Next, the network is
connected. A profile is created in LXD, and then the container
is created and started.
See `nova.virt.driver.ComputeDriver.spawn` for more
information.
"""
try:
self.client.containers.get(instance.name)
raise exception.InstanceExists(name=instance.name)
except lxd_exceptions.LXDAPIException as e:
if e.response.status_code != 404:
raise # Re-raise the exception if it wasn't NotFound
instance_dir = common.InstanceAttributes(instance).instance_dir
if not os.path.exists(instance_dir):
fileutils.ensure_tree(instance_dir)
# Check to see if LXD already has a copy of the image. If not,
# fetch it.
try:
self.client.images.get_by_alias(instance.image_ref)
except lxd_exceptions.LXDAPIException as e:
if e.response.status_code != 404:
raise
_sync_glance_image_to_lxd(
self.client, context, instance.image_ref)
# Plug in the network
if network_info:
timeout = CONF.vif_plugging_timeout
if (utils.is_neutron() and timeout):
events = [('network-vif-plugged', vif['id'])
for vif in network_info if not vif.get(
'active', True)]
else:
events = []
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=_neutron_failed_callback):
self.plug_vifs(instance, network_info)
except eventlet.timeout.Timeout:
LOG.warn("Timeout waiting for vif plugging callback for "
"instance {uuid}"
.format(uuid=instance['name']))
if CONF.vif_plugging_is_fatal:
self.destroy(
context, instance, network_info, block_device_info)
raise exception.InstanceDeployFailure(
'Timeout waiting for vif plugging',
instance_id=instance['name'])
# Create the profile
try:
profile = flavor.to_profile(
self.client, instance, network_info, block_device_info)
except lxd_exceptions.LXDAPIException as e:
with excutils.save_and_reraise_exception():
self.cleanup(
context, instance, network_info, block_device_info)
# Create the container
container_config = {
'name': instance.name,
'profiles': [profile.name],
'source': {
'type': 'image',
'alias': instance.image_ref,
},
}
try:
container = self.client.containers.create(
container_config, wait=True)
except lxd_exceptions.LXDAPIException as e:
with excutils.save_and_reraise_exception():
self.cleanup(
context, instance, network_info, block_device_info)
lxd_config = self.client.host_info
storage.attach_ephemeral(
self.client, block_device_info, lxd_config, instance)
if configdrive.required_by(instance):
configdrive_path = self._add_configdrive(
context, instance,
injected_files, admin_password,
network_info)
profile = self.client.profiles.get(instance.name)
config_drive = {
'configdrive': {
'path': '/config-drive',
'source': configdrive_path,
'type': 'disk',
'readonly': 'True',
}
}
profile.devices.update(config_drive)
profile.save()
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
self.firewall_driver.instance_filter(
instance, network_info)
container.start(wait=True)
self.firewall_driver.apply_instance_filter(
instance, network_info)
except lxd_exceptions.LXDAPIException as e:
with excutils.save_and_reraise_exception():
self.cleanup(
context, instance, network_info, block_device_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy a running instance.
Since the profile and the instance are created on `spawn`, it is
safe to delete them together.
See `nova.virt.driver.ComputeDriver.destroy` for more
information.
"""
lock_path = os.path.join(CONF.instances_path, 'locks')
with lockutils.lock(
lock_path, external=True,
lock_file_prefix='lxd-container-{}'.format(instance.name)):
# TODO(sahid): Each time we get a container we should
# protect it by using a mutex.
try:
container = self.client.containers.get(instance.name)
if container.status != 'Stopped':
container.stop(wait=True)
container.delete(wait=True)
if (instance.vm_state == vm_states.RESCUED):
rescued_container = self.client.containers.get(
'{}-rescue'.format(instance.name))
if rescued_container.status != 'Stopped':
rescued_container.stop(wait=True)
rescued_container.delete(wait=True)
except lxd_exceptions.LXDAPIException as e:
if e.response.status_code == 404:
LOG.warning("Failed to delete instance. "
"Container does not exist for {instance}."
.format(instance=instance.name))
else:
raise
finally:
self.cleanup(
context, instance, network_info, block_device_info)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Clean up the filesystem around the container.
See `nova.virt.driver.ComputeDriver.cleanup` for more
information.
"""
if destroy_vifs:
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
lxd_config = self.client.host_info
storage.detach_ephemeral(self.client,
block_device_info,
lxd_config,
instance)
name = pwd.getpwuid(os.getuid()).pw_name
container_dir = common.InstanceAttributes(instance).instance_dir
if os.path.exists(container_dir):
utils.execute(
'chown', '-R', '{}:{}'.format(name, name),
container_dir, run_as_root=True)
shutil.rmtree(container_dir)
try:
self.client.profiles.get(instance.name).delete()
except lxd_exceptions.LXDAPIException as e:
if e.response.status_code == 404:
LOG.warning("Failed to delete instance. "
"Profile does not exist for {instance}."
.format(instance=instance.name))
else:
raise
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the container.
Nova *should* not execute this on a stopped container, but
the documentation specifically says that if it is called, the
container should always return to a 'Running' state.
See `nova.virt.driver.ComputeDriver.cleanup` for more
information.
"""
container = self.client.containers.get(instance.name)
container.restart(force=True, wait=True)
def get_console_output(self, context, instance):
"""Get the output of the container console.
See `nova.virt.driver.ComputeDriver.get_console_output` for more
information.
"""
instance_attrs = common.InstanceAttributes(instance)
console_path = instance_attrs.console_path
if not os.path.exists(console_path):
return ''
uid = pwd.getpwuid(os.getuid()).pw_uid
utils.execute(
'chown', '%s:%s' % (uid, uid), console_path, run_as_root=True)
utils.execute(
'chmod', '755', instance_attrs.container_path, run_as_root=True)
with open(console_path, 'rb') as f:
log_data, _ = _last_bytes(f, MAX_CONSOLE_BYTES)
return log_data
def get_host_ip_addr(self):
return CONF.my_ip
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach block device to a nova instance.
Attaching a block device to a container requires a couple of steps.
First os_brick connects the cinder volume to the host. Next,
the block device is added to the containers profile. Next, the
apparmor profile for the container is updated to allow mounting
'ext4' block devices. Finally, the profile is saved.
The block device must be formatted as ext4 in order to mount
the block device inside the container.
See `nova.virt.driver.ComputeDriver.attach_volume' for
more information/
"""
profile = self.client.profiles.get(instance.name)
protocol = connection_info['driver_volume_type']
storage_driver = brick_get_connector(protocol)
device_info = storage_driver.connect_volume(
connection_info['data'])
disk = os.stat(os.path.realpath(device_info['path']))
vol_id = connection_info['data']['volume_id']
disk_device = {
vol_id: {
'path': mountpoint,
'major': '%s' % os.major(disk.st_rdev),
'minor': '%s' % os.minor(disk.st_rdev),
'type': 'unix-block'
}
}
profile.devices.update(disk_device)
# XXX zulcss (10 Jul 2016) - fused is currently not supported.
profile.config.update({'raw.apparmor': 'mount fstype=ext4,'})
profile.save()
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Detach block device from a nova instance.
First the volume id is deleted from the profile, and the
profile is saved. The os-brick disconnects the volume
from the host.
See `nova.virt.driver.Computedriver.detach_volume` for
more information.
"""
profile = self.client.profiles.get(instance.name)
vol_id = connection_info['data']['volume_id']
if vol_id in profile.devices:
del profile.devices[vol_id]
profile.save()
protocol = connection_info['driver_volume_type']
storage_driver = brick_get_connector(protocol)
storage_driver.disconnect_volume(connection_info['data'], None)
def attach_interface(self, context, instance, image_meta, vif):
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, vif)
profile = self.client.profiles.get(instance.name)
net_device = lxd_vif.get_vif_devname(vif)
config_update = {
net_device: {
'nictype': 'physical',
'hwaddr': vif['address'],
'parent': lxd_vif.get_vif_internal_devname(vif),
'type': 'nic',
}
}
profile.devices.update(config_update)
profile.save(wait=True)
def detach_interface(self, context, instance, vif):
try:
profile = self.client.profiles.get(instance.name)
devname = lxd_vif.get_vif_devname(vif)
# NOTE(jamespage): Attempt to remove device using
# new style tap naming
if devname in profile.devices:
del profile.devices[devname]
profile.save(wait=True)
else:
# NOTE(jamespage): For upgrades, scan devices
# and attempt to identify
# using mac address as the
# device will *not* have a
# consistent name
for key, val in profile.devices.items():
if val.get('hwaddr') == vif['address']:
del profile.devices[key]
profile.save(wait=True)
break
except lxd_exceptions.NotFound:
# This method is called when an instance get destroyed. It
# could happen that Nova to receive an event
# "vif-delete-event" after the instance is destroyed which
# result the lxd profile not exist.
LOG.debug("lxd profile for instance {instance} does not exist. "
"The instance probably got destroyed before this method "
"got called.".format(instance=instance.name))
self.vif_driver.unplug(instance, vif)
def migrate_disk_and_power_off(
self, context, instance, dest, _flavor, network_info,
block_device_info=None, timeout=0, retry_interval=0):
if CONF.my_ip == dest:
# Make sure that the profile for the container is up-to-date to
# the actual state of the container.
flavor.to_profile(
self.client, instance, network_info, block_device_info,
update=True)
container = self.client.containers.get(instance.name)
container.stop(wait=True)
return ''
def snapshot(self, context, instance, image_id, update_task_state):
lock_path = str(os.path.join(CONF.instances_path, 'locks'))
with lockutils.lock(
lock_path, external=True,
lock_file_prefix='lxd-container-{}'.format(instance.name)):
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
container = self.client.containers.get(instance.name)
if container.status != 'Stopped':
container.stop(wait=True)
image = container.publish(wait=True)
container.start(wait=True)
update_task_state(
task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot = IMAGE_API.get(context, image_id)
data = image.export()
image_meta = {'name': snapshot['name'],
'disk_format': 'raw',
'container_format': 'bare'}
IMAGE_API.update(context, image_id, image_meta, data)
def pause(self, instance):
"""Pause container.
See `nova.virt.driver.ComputeDriver.pause` for more
information.
"""
container = self.client.containers.get(instance.name)
container.freeze(wait=True)
def unpause(self, instance):
"""Unpause container.
See `nova.virt.driver.ComputeDriver.unpause` for more
information.
"""
container = self.client.containers.get(instance.name)
container.unfreeze(wait=True)
def suspend(self, context, instance):
"""Suspend container.
See `nova.virt.driver.ComputeDriver.suspend` for more
information.
"""
self.pause(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume container.
See `nova.virt.driver.ComputeDriver.resume` for more
information.
"""
self.unpause(instance)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
try:
state = self.get_info(instance).state
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
self.power_on(context, instance, network_info, block_device_info)
except (exception.InternalError, exception.InstanceNotFound):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue a LXD container.
From the perspective of nova, rescuing a instance requires a number of
steps. First, the failed container is stopped, and then this method is
called.
So the original container is already stopped, and thus, next,
'-rescue', is appended to the failed container's name, this is done so
the container can be unrescued. The container's profile is updated with
the rootfs of the failed container. Finally, a new container is created
and started.
See 'nova.virt.driver.ComputeDriver.rescue` for more
information.
"""
rescue = '%s-rescue' % instance.name
container = self.client.containers.get(instance.name)
container_rootfs = os.path.join(
nova.conf.CONF.lxd.root_dir, 'containers', instance.name, 'rootfs')
container.rename(rescue, wait=True)
profile = self.client.profiles.get(instance.name)
rescue_dir = {
'rescue': {
'source': container_rootfs,
'path': '/mnt',
'type': 'disk',
}
}
profile.devices.update(rescue_dir)
profile.save()
container_config = {
'name': instance.name,
'profiles': [profile.name],
'source': {
'type': 'image',
'alias': instance.image_ref,
}
}
container = self.client.containers.create(
container_config, wait=True)
container.start(wait=True)
def unrescue(self, instance, network_info):
"""Unrescue an instance.
Unrescue a container that has previously been rescued.
First the rescue containerisremoved. Next the rootfs
of the defective container is removed from the profile.
Finally the container is renamed and started.
See 'nova.virt.drvier.ComputeDriver.unrescue` for more
information.
"""
rescue = '%s-rescue' % instance.name
container = self.client.containers.get(instance.name)
if container.status != 'Stopped':
container.stop(wait=True)
container.delete(wait=True)
profile = self.client.profiles.get(instance.name)
del profile.devices['rescue']
profile.save()
container = self.client.containers.get(rescue)
container.rename(instance.name, wait=True)
container.start(wait=True)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off an instance
See 'nova.virt.drvier.ComputeDriver.power_off` for more
information.
"""
container = self.client.containers.get(instance.name)
if container.status != 'Stopped':
container.stop(wait=True)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on an instance
See 'nova.virt.drvier.ComputeDriver.power_on` for more
information.
"""
container = self.client.containers.get(instance.name)
if container.status != 'Running':
container.start(wait=True)
def get_available_resource(self, nodename):
"""Aggregate all available system resources.
See 'nova.virt.drvier.ComputeDriver.get_available_resource`
for more information.
"""
cpuinfo = _get_cpu_info()
cpu_info = {
'arch': platform.uname()[5],
'features': cpuinfo.get('flags', 'unknown'),
'model': cpuinfo.get('model name', 'unknown'),
'topology': {
'sockets': cpuinfo['socket(s)'],
'cores': cpuinfo['core(s) per socket'],
'threads': cpuinfo['thread(s) per core'],
},
'vendor': cpuinfo.get('vendor id', 'unknown'),
}
cpu_topology = cpu_info['topology']
vcpus = (int(cpu_topology['cores']) *
int(cpu_topology['sockets']) *
int(cpu_topology['threads']))
local_memory_info = _get_ram_usage()
lxd_config = self.client.host_info
# NOTE(jamespage): ZFS storage report is very LXD 2.0.x
# centric and will need to be updated
# to support LXD storage pools
storage_driver = lxd_config['environment']['storage']
if storage_driver == 'zfs':
# NOTE(ajkavanagh) - BUG/1782329 - this is temporary until storage
# pools is implemented. LXD 3 removed the storage.zfs_pool_name
# key from the config. So, if it fails, we need to grab the
# configured storage pool and use that as the name instead.
try:
pool_name = lxd_config['config']['storage.zfs_pool_name']
except KeyError:
pool_name = CONF.lxd.pool
local_disk_info = _get_zpool_info(pool_name)
else:
local_disk_info = _get_fs_info(CONF.lxd.root_dir)
data = {
'vcpus': vcpus,
'memory_mb': local_memory_info['total'] // units.Mi,
'memory_mb_used': local_memory_info['used'] // units.Mi,
'local_gb': local_disk_info['total'] // units.Gi,
'local_gb_used': local_disk_info['used'] // units.Gi,
'vcpus_used': 0,
'hypervisor_type': 'lxd',
'hypervisor_version': '011',
'cpu_info': jsonutils.dumps(cpu_info),
'hypervisor_hostname': socket.gethostname(),
'supported_instances': [
(obj_fields.Architecture.I686, obj_fields.HVType.LXD,
obj_fields.VMMode.EXE),
(obj_fields.Architecture.X86_64, obj_fields.HVType.LXD,
obj_fields.VMMode.EXE),
(obj_fields.Architecture.I686, obj_fields.HVType.LXC,
obj_fields.VMMode.EXE),
(obj_fields.Architecture.X86_64, obj_fields.HVType.LXC,
obj_fields.VMMode.EXE),
],
'numa_topology': None,
}
return data
def refresh_instance_security_rules(self, instance):
return self.firewall_driver.refresh_instance_security_rules(
instance)
def ensure_filtering_rules_for_instance(self, instance, network_info):
return self.firewall_driver.ensure_filtering_rules_for_instance(
instance, network_info)
def filter_defer_apply_on(self):
return self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
return self.firewall_driver.filter_defer_apply_off()
def unfilter_instance(self, instance, network_info):
return self.firewall_driver.unfilter_instance(
instance, network_info)
def get_host_uptime(self):
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def plug_vifs(self, instance, network_info):
for vif in network_info:
self.vif_driver.plug(instance, vif)
def unplug_vifs(self, instance, network_info):
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def get_host_cpu_stats(self):
return {
'kernel': int(psutil.cpu_times()[2]),
'idle': int(psutil.cpu_times()[3]),
'user': int(psutil.cpu_times()[0]),
'iowait': int(psutil.cpu_times()[4]),
'frequency': _get_cpu_info().get('cpu mhz', 0)
}
def get_volume_connector(self, instance):
return {'ip': CONF.my_block_storage_ip,
'initiator': 'fake',
'host': 'fakehost'}
def get_available_nodes(self, refresh=False):
hostname = socket.gethostname()
return [hostname]
# XXX: rockstar (5 July 2016) - The methods and code below this line
# have not been through the cleanup process. We know the cleanup process
# is complete when there is no more code below this comment, and the
# comment can be removed.
#
# ComputeDriver implementation methods
#
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
# Ensure that the instance directory exists
instance_dir = common.InstanceAttributes(instance).instance_dir
if not os.path.exists(instance_dir):
fileutils.ensure_tree(instance_dir)
# Step 1 - Setup the profile on the dest host
flavor.to_profile(self.client,
instance, network_info, block_device_info)
# Step 2 - Open a websocket on the srct and and
# generate the container config
self._migrate(migration['source_compute'], instance)
# Step 3 - Start the network and container
self.plug_vifs(instance, network_info)
self.client.container.get(instance.name).start(wait=True)
def confirm_migration(self, migration, instance, network_info):
self.unplug_vifs(instance, network_info)
self.client.profiles.get(instance.name).delete()
self.client.containers.get(instance.name).delete(wait=True)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self.client.containers.get(instance.name).start(wait=True)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
for vif in network_info:
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(
instance, network_info)
self.firewall_driver.prepare_instance_filter(
instance, network_info)
self.firewall_driver.apply_instance_filter(
instance, network_info)
flavor.to_profile(self.client,
instance, network_info, block_device_info)
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
self._migrate(dest, instance)
post_method(context, instance, dest, block_migration)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
self.client.containers.get(instance.name).delete(wait=True)
def post_live_migration_at_source(self, context, instance, network_info):
self.client.profiles.get(instance.name).delete()
self.cleanup(context, instance, network_info)
def check_can_live_migrate_destination(
self, context, instance, src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
try:
self.client.containers.get(instance.name)
raise exception.InstanceExists(name=instance.name)
except lxd_exceptions.LXDAPIException as e:
if e.response.status_code != 404:
raise
return LXDLiveMigrateData()
def cleanup_live_migration_destination_check(
self, context, dest_check_data):
return
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
if not CONF.lxd.allow_live_migration:
msg = _("Live migration is not enabled.")
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
return dest_check_data
#
# LXDDriver "private" implementation methods
#
# XXX: rockstar (21 Nov 2016) - The methods and code below this line
# have not been through the cleanup process. We know the cleanup process
# is complete when there is no more code below this comment, and the
# comment can be removed.
def _add_configdrive(self, context, instance,
injected_files, admin_password, network_info):
"""Create configdrive for the instance."""
if CONF.config_drive_format != 'iso9660':
raise exception.ConfigDriveUnsupportedFormat(
format=CONF.config_drive_format)
container = self.client.containers.get(instance.name)
storage_id = 0
"""
Determine UID shift used for container uid mapping
Sample JSON config from LXD
{
"volatile.apply_template": "create",
...
"volatile.last_state.idmap": "[
{
\"Isuid\":true,
\"Isgid\":false,
\"Hostid\":100000,
\"Nsid\":0,
\"Maprange\":65536
},
{
\"Isuid\":false,
\"Isgid\":true,
\"Hostid\":100000,
\"Nsid\":0,
\"Maprange\":65536
}] ",
"volatile.tap5fd6808a-7b.name": "eth0"
}
"""
container_id_map = jsonutils.loads(
container.config['volatile.last_state.idmap'])
uid_map = list(filter(lambda id_map: id_map.get("Isuid"),
container_id_map))
if uid_map:
storage_id = uid_map[0].get("Hostid", 0)
else:
# privileged containers does not have uid/gid mapping
# LXD API return nothing
pass
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(
instance, content=injected_files, extra_md=extra_md,
network_info=network_info, request_context=context)
iso_path = os.path.join(
common.InstanceAttributes(instance).instance_dir,
'configdrive.iso')
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(iso_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error("Creating config drive failed with error: {}"
.format(e), instance=instance)
configdrive_dir = os.path.join(
nova.conf.CONF.instances_path, instance.name, 'configdrive')
if not os.path.exists(configdrive_dir):
fileutils.ensure_tree(configdrive_dir)
with utils.tempdir() as tmpdir:
mounted = False
try:
_, err = utils.execute('mount',
'-o',
'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
iso_path, tmpdir,
run_as_root=True)
mounted = True
# Copy and adjust the files from the ISO so that we
# dont have the ISO mounted during the life cycle of the
# instance and the directory can be removed once the instance
# is terminated
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(configdrive_dir, ent))
utils.execute('chmod', '-R', '775', configdrive_dir,
run_as_root=True)
utils.execute('chown', '-R',
'%s:%s' % (storage_id, storage_id),
configdrive_dir, run_as_root=True)
finally:
if mounted:
utils.execute('umount', tmpdir, run_as_root=True)
return configdrive_dir
def _after_reboot(self):
"""Perform sync operation after host reboot."""
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache', 'metadata'])
for instance in instances:
if (instance.vm_state != vm_states.STOPPED):
continue
try:
network_info = self.network_api.get_instance_nw_info(
context, instance)
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(
instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def _migrate(self, source_host, instance):
"""Migrate an instance from source."""
source_client = pylxd.Client(
endpoint='https://{}'.format(source_host), verify=False)
container = source_client.containers.get(instance.name)
data = container.generate_migration_data()
self.containers.create(data, wait=True)
|
import os
def exists_and_isfile(filename):
"""if file does not exist return False
if file exisits check if isfile or raise Exception
"""
if filename is None:
return False
if os.path.exists(filename):
if os.path.isfile(filename):
return True
else:
raise Exception("Object '%s' exisits but is not a file!")
return False
|
print('\033[1;93m-=-=-=-=-=-=-=-=-=-=-=-=-=-\033[m')
print('\033[1;31m GERADOR DE PA\033[m')
print('\033[1;93m-=-=-=-=-=-=-=-=-=-=-=-=-=-\033[m')
termo = int(input('Primeiro Termo: '))
razão = int(input('Razão da PA: '))
cont = 0
while cont <= 9:
print(termo, end=' ')
print('->', end=' ')
termo += razão
cont += 1
print('FIM')
|
"""
Stereotype property page.
"""
from gi.repository import Gtk
from gaphor import UML
from gaphor.core import gettext, transactional
from gaphor.diagram.propertypages import PropertyPageBase, PropertyPages
def create_stereotype_tree_view(model, toggle_stereotype, set_slot_value):
"""
Create a tree view for an editable tree model.
:Parameters:
model
Model, for which tree view is created.
"""
tree_view = Gtk.TreeView.new_with_model(model)
tree_view.set_search_column(-1)
# Stereotype/Attributes
col = Gtk.TreeViewColumn.new()
col.set_title("{} / {}".format(gettext("Stereotype"), gettext("Attribute")))
col.set_expand(True)
renderer = Gtk.CellRendererToggle()
renderer.set_property("active", True)
renderer.set_property("activatable", True)
renderer.connect("toggled", toggle_stereotype, model, 2)
col.pack_start(renderer, False)
col.add_attribute(renderer, "active", 2)
def show_checkbox(column, cell, model, iter, data):
# value = model.get_value(iter, 4)
# cell.set_property('active', value is not None)
value = model.get_value(iter, 3)
cell.set_property("visible", isinstance(value, UML.Stereotype))
col.set_cell_data_func(renderer, show_checkbox)
renderer = Gtk.CellRendererText.new()
renderer.set_property("editable", False)
renderer.set_property("is-expanded", True)
col.pack_start(renderer, False)
col.add_attribute(renderer, "text", 0)
tree_view.append_column(col)
# TODO: use col.set_cell_data_func(renderer, func, None) to toggle visibility
# Value
renderer = Gtk.CellRendererText()
renderer.set_property("is-expanded", True)
renderer.connect("edited", set_slot_value, model, 1)
col = Gtk.TreeViewColumn(gettext("Value"), renderer, text=1)
col.set_expand(True)
def set_editable(column, cell, model, iter, data):
value = model.get_value(iter, 4)
cell.set_property("editable", bool(value))
col.set_cell_data_func(renderer, set_editable)
tree_view.append_column(col)
# tree_view.connect('key_press_event', remove_on_keypress)
# tree_view.connect('key_press_event', swap_on_keypress)
return tree_view
@PropertyPages.register(UML.Element)
class StereotypePage(PropertyPageBase):
order = 40
name = "Stereotypes"
def __init__(self, item):
self.item = item
self.size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
def construct(self):
page = Gtk.VBox()
subject = self.item.subject
if subject is None:
return None
stereotypes = UML.model.get_stereotypes(subject)
if not stereotypes:
return None
# show stereotypes attributes toggle
if hasattr(self.item, "show_stereotypes"):
hbox = Gtk.HBox()
label = Gtk.Label(label="")
hbox.pack_start(label, False, True, 0)
button = Gtk.CheckButton(label=gettext("Show stereotypes attributes"))
button.set_active(self.item.show_stereotypes)
button.connect("toggled", self._on_show_stereotypes_change)
hbox.pack_start(button, True, True, 0)
page.pack_start(hbox, False, True, 0)
# stereotype attributes
# self.model = StereotypeAttributes(self.item.subject)
self.model = Gtk.TreeStore.new([str, str, bool, object, object, object])
tree_view = create_stereotype_tree_view(
self.model, self._toggle_stereotype, self._set_value
)
page.pack_start(tree_view, True, True, 0)
page.show_all()
self.refresh()
return page
@transactional
def _on_show_stereotypes_change(self, button):
self.item.show_stereotypes = button.get_active()
def refresh(self):
self.model.clear()
subject = self.item.subject
stereotypes = UML.model.get_stereotypes(subject)
instances = subject.appliedStereotype
# shortcut map stereotype -> slot (InstanceSpecification)
slots = {}
for obj in instances:
for slot in obj.slot:
slots[slot.definingFeature] = slot
for st in stereotypes:
for obj in instances:
if st in obj.classifier:
break
else:
obj = None
parent = self.model.append(None, (st.name, "", bool(obj), st, None, None))
if obj:
for attr in st.ownedAttribute:
if not attr.association:
slot = slots.get(attr)
value = slot.value if slot else ""
data = (attr.name, value, True, attr, obj, slot)
self.model.append(parent, data)
else:
for attr in st.ownedAttribute:
if not attr.association:
data = (attr.name, "", False, attr, None, None)
self.model.append(parent, data)
@transactional
def _set_value(self, renderer, path, value, model, col=0):
iter = model.get_iter(path)
self.set_slot_value(iter, value)
@transactional
def _toggle_stereotype(self, renderer, path, model, col):
iter = model.get_iter(path)
self.select_stereotype(iter)
def select_stereotype(self, iter):
"""
Select the stereotype.
"""
path = self.model.get_path(iter)
row = self.model[path]
name, old_value, is_applied, stereotype, _, _ = row
value = not is_applied
subject = self.item.subject
if value:
UML.model.apply_stereotype(subject, stereotype)
else:
UML.model.remove_stereotype(subject, stereotype)
row[2] = value
# TODO: change refresh in a refresh of the data model, rather than a clear-refresh
self.refresh()
def set_slot_value(self, iter, value):
"""
Set value of stereotype property applied to an UML element.
Slot is created if instance Create valuChange value of instance spe
"""
path = self.model.get_path(iter)
row = self.model[path]
name, old_value, is_applied, attr, obj, slot = row
if isinstance(attr, UML.Stereotype):
return # don't edit stereotype rows
if slot is None and not value:
return # nothing to do and don't create slot without value
if slot is None:
slot = UML.model.add_slot(obj, attr)
assert slot
if value:
slot.value = value
else:
# no value, then remove slot
del obj.slot[slot]
slot = None
value = ""
row[1] = value
row[5] = slot
|
from SCons.Script import * # Needed so we can use scons stuff like builders
#UNFINISHED
def Init(env):
#Checking if vital vars are present
checking_vars = ['CRAYON_BASE',
'CRAYON_PROJ_NAME',
'CRAYON_PROJ_LIBS',
'CRAYON_BUILD_DIR'
]
for x in checking_vars:
if x not in env['ENV']:
print "ERROR, variable " + x + " not found. Stopping program ..."
quit()
#Set env vars
PROJECT=env['ENV']['CRAYON_PROJ_NAME']
CFLAGS=""
LDFLAGS=""
LIBS=""
IP_BIN=""
BUILD=env['ENV']['CRAYON_BUILD_DIR']
PREPROCESSOR=""
PREPROCESSOR_FLAGS=""
# BUILD_MODE=""
#Setting CFLAGS
if 'CRAYON_PROJ_CFLAGS' in env['ENV']:
CFLAGS += env['ENV']['CRAYON_PROJ_CFLAGS'] + " "
if 'KOS_CFLAGS' in env['ENV']:
CFLAGS += env['ENV']['KOS_CFLAGS'] + " "
CFLAGS += "-I" + env['ENV']['CRAYON_BASE'] + "/include"
#Setting LDFLAGS
if 'CRAYON_PROJ_LDFLAGS' in env['ENV']:
LDFLAGS += env['ENV']['CRAYON_PROJ_LDFLAGS'] + " "
if 'KOS_LDFLAGS' in env['ENV']:
LDFLAGS += env['ENV']['KOS_LDFLAGS'] + " "
LDFLAGS += "-L" + env['ENV']['CRAYON_BASE'] + "/lib/dreamcast"
#Setting LIBS
if 'CRAYON_PROJ_LIBS' in env['ENV']:
LIBS += env['ENV']['CRAYON_PROJ_LIBS'] + " "
LIBS += "-lcrayon -lm"
if 'KOS_LIBS' in env['ENV']:
LIBS += " " + env['ENV']['KOS_LIBS']
#Setting IP.BIN location
if 'CRAYON_IP_BIN' in env['ENV']:
IP_BIN = env['ENV']['CRAYON_IP_BIN']
else:
IP_BIN = env['ENV']['CRAYON_BASE'] + "/IP.BIN"
#Setting PREPROCESSOR
if 'CRAYON_PP' in env['ENV']:
PREPROCESSOR = env['ENV']['CRAYON_PP']
else:
PREPROCESSOR = env['ENV']['CRAYON_BASE'] + "/preprocess.sh"
#Setting PREPROCESSOR_FLAGS
if 'CRAYON_PP_FLAGS' in env['ENV']:
PREPROCESSOR_FLAGS = env['ENV']['CRAYON_PP_FLAGS']
#Add our vars
env['ENV'].update(PROJECT=PROJECT, CFLAGS=CFLAGS, LDFLAGS=LDFLAGS,
LIBS=LIBS, IP_BIN=IP_BIN, BUILD=BUILD, PREPROCESSOR=PREPROCESSOR,
PREPROCESSOR_FLAGS=PREPROCESSOR_FLAGS)
#Make builders
elf = Builder(action="kos-cc -o $TARGET $SOURCES $LIBS") #SOURCES takes all dependencies and shoves them into one command
kos_bin = Builder(action="sh-elf-objcopy -R .stack -O binary $SOURCE $TARGET")
scramble = Builder(action="$KOS_BASE/utils/scramble/scramble $SOURCE $TARGET")
iso = Builder(action="genisoimage -G $KOS_BASE/../IP.BIN -C 0,11702 -J -l -r -o $TARGET .")
cdi = Builder(action="cdi4dc $SOURCE $TARGET")
#Add the builders
env.Append(BUILDERS= {'Elf': elf, 'KosBin': kos_bin, 'Scramble': scramble, 'Iso': iso, 'Cdi': cdi})
#I want this to depend on a "preprocessing" builder no matter the BOOT_MODE
#And...either the cdi builder for BOOT_MODE=cd else just a kos_bin
#We should set the builder dependencies here based on BOOT_MODE
# dreamcast = Builder()
|
'''
Bing Speech To Text (STT)
based on https://github.com/Uberi/speech_recognition
'''
import json
import uuid
import wave
import io
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
from bing_base import *
class BingVoiceRecognizer():
def __init__(self, bing_base):
self.bing_base = bing_base
def recognize(self, audio_data, language="en-US", show_all=False):
access_token = self.bing_base.token()
wav_data = self.to_wav(audio_data)
url = "https://speech.platform.bing.com/recognize/query?{0}".format(urlencode({
"version": "3.0",
"requestid": uuid.uuid4(),
"appID": "D4D52672-91D7-4C74-8AD8-42B1D98141A5",
"format": "json",
"locale": language,
"device.os": "wp7",
"scenarios": "ulm",
"instanceid": uuid.uuid4(),
"result.profanitymarkup": "0",
}))
request = Request(url, data=wav_data, headers={
"Authorization": "Bearer {0}".format(access_token),
"Content-Type": "audio/wav; samplerate=16000; sourcerate={0}; trustsourcerate=true".format(16000),
})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(
getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "header" not in result or "lexical" not in result["header"]: raise UnknownValueError()
return result["header"]["lexical"]
@staticmethod
def to_wav(raw_data):
# generate the WAV file contents
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
wav_writer.setframerate(16000)
wav_writer.setsampwidth(2)
wav_writer.setnchannels(1)
wav_writer.writeframes(raw_data)
wav_data = wav_file.getvalue()
finally: # make sure resources are cleaned up
wav_writer.close()
return wav_data
if __name__ == '__main__':
import sys
try:
from credsaa import BING_KEY
except ImportError:
print('Get a key from https://www.microsoft.com/cognitive-services/en-us/speech-api and create creds.py with the key')
sys.exit(-1)
if len(sys.argv) != 2:
print('Usage: %s 16k_mono.wav' % sys.argv[0])
sys.exit(-1)
wf = wave.open(sys.argv[1])
if wf.getframerate() != 16000 or wf.getnchannels() != 1 or wf.getsampwidth() != 2:
print('only support 16000 sample rate, 1 channel and 2 bytes sample width')
sys.exit(-2)
# read less than 10 seconds audio data
n = wf.getnframes()
if (n / 16000.0) > 10.0:
n = 16000 * 10
frames = wf.readframes(n)
recognizer = BingVoiceRecognizer(BING_KEY)
# recognize speech using Microsoft Bing Voice Recognition
try:
text = recognizer.recognize(frames, language='en-US')
print('Bing:' + text.encode('utf-8'))
except UnknownValueError:
print("Microsoft Bing Voice Recognition could not understand audio")
except RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
|
from django import forms
class StudentForm(forms.Form):
file = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True})) # for creating file input
|
import pandas_datareader.data as web
import datetime
#获取上证指数的2017.1.1日至今的交易数据
df_stockload = web.DataReader("000001.SS", "yahoo", datetime.datetime(2017,1,1), datetime.date.today())
print(df_stockload.head()) # 查看前几行
"""
High Low Open Close Volume Adj Close
Date
2017-01-03 3136.5 3105.3 3105.3 3135.9 141600 3135.9
2017-01-04 3160.1 3130.1 3133.8 3158.8 167900 3158.8
2017-01-05 3168.5 3154.3 3157.9 3165.4 174700 3165.4
2017-01-06 3172.0 3153.0 3163.8 3154.3 183700 3154.3
2017-01-09 3173.1 3147.7 3148.5 3171.2 171700 3171.2
"""
print(df_stockload.tail()) # 查看末尾几行
"""
High Low Open Close Volume Adj Close
Date
2019-03-04 3090.8 3006.9 3015.9 3027.6 525600 3027.6
2019-03-05 3055.0 3009.4 3019.9 3054.2 424100 3054.2
2019-03-06 3103.8 3050.1 3060.4 3102.1 555000 3102.1
2019-03-07 3129.9 3075.0 3103.7 3106.4 583800 3106.4
2019-03-08 3075.0 2969.6 3038.3 2969.9 577900 2969.9
"""
print (df_stockload.columns)#查看列索引信息
"""
Index(['High', 'Low', 'Open', 'Close', 'Volume', 'Adj Close'], dtype='object')
"""
print (df_stockload.index)#查看行索引信息
"""
DatetimeIndex(['2017-01-03', '2017-01-04', '2017-01-05', '2017-01-06',
'2017-01-09', '2017-01-10', '2017-01-11', '2017-01-12',
'2017-01-13', '2017-01-16',
...
'2019-02-25', '2019-02-26', '2019-02-27', '2019-02-28',
'2019-03-01', '2019-03-04', '2019-03-05', '2019-03-06',
'2019-03-07', '2019-03-08'],
dtype='datetime64[ns]', name='Date', length=530, freq=None)
"""
print(df_stockload.shape)#查看形状
"""
(530, 6)
"""
print (df_stockload.describe())#查看各列数据描述性统计
print(df_stockload.info())#查看缺失及每列数据类型
import matplotlib.pyplot as plt
#绘制收盘价
df_stockload.Close.plot(c='b')
plt.legend(['Close','30ave','60ave'],loc='best')
plt.show()
|
from pathlib import Path
from urllib.parse import urlparse
import click
import requests
import database_connection # noqa: F401
from matrix_connection import get_download_url
from schema import Message
def download_stem(message, prefer_thumbnails):
image_url = (message.thumbnail_url if prefer_thumbnails else None) \
or message.image_url
return urlparse(image_url).path.lstrip('/')
def run_downloads(messages, download_dir, prefer_thumbnails):
for msg in messages:
image_url = (msg.thumbnail_url if prefer_thumbnails else None) or msg.image_url
res = requests.head(get_download_url(image_url))
assert res.status_code == 200
mtype, subtype = res.headers['content-type'].split('/', 2)
if mtype != 'image':
print(f"Skipping {image_url}: {res.headers['content-type']}")
continue
res = requests.get(get_download_url(image_url))
assert res.status_code == 200
filename = (download_dir / download_stem(msg, prefer_thumbnails)
).with_suffix('.' + subtype)
print('Downloading', image_url, '->', filename)
with open(filename, 'wb') as fp:
fp.write(res.content)
@click.command()
@click.option('--thumbnails/--no-thumbnails', default=True)
@click.argument('output', required=False)
def download_images(thumbnails, output):
"""Download thumbnails."""
noun = 'thumbnails' if thumbnails else 'images'
download_dir = Path(output or noun)
messages = [msg for msg in Message.objects
if msg.content.get('msgtype') == 'm.image']
download_dir.mkdir(exist_ok=True)
current_stems = {p.stem for p in download_dir.glob('*')}
new_messages = [msg for msg in messages
if download_stem(msg, thumbnails)
not in current_stems]
skip_count = len(messages) - len(new_messages)
if skip_count:
print(f"Skipping {skip_count} already-downloaded {noun}")
if new_messages:
print(f"Downloading {len(new_messages)} new {noun}...")
else:
print("Nothing to do")
run_downloads(new_messages, download_dir, prefer_thumbnails=thumbnails)
if __name__ == '__main__':
download_images()
|
import sys
from ansibleflow import config
from specter import Spec, fixture
from six import StringIO
real_stdout = sys.stdout
real_stderr = sys.stderr
@fixture
class BaseSpec(Spec):
def before_all(self):
config.get_config('./data/test_project.yml')
def after_all(self):
config._config = None
def before_each(self):
sys.stdout = self.stdout = StringIO()
sys.stderr = self.stderr = StringIO()
def after_each(self):
sys.stdout = real_stdout
sys.stderr = real_stderr
|
"""Different types of Tasks."""
from __future__ import annotations
import logging
import abc
from pathlib import Path
from typing import Optional, Dict, List, Tuple
from shutil import copytree, rmtree
from . import tools
from .schema_tags import Tags, LangTags, BuildTags
log = logging.getLogger("GHC")
OUTPUT_MISMATCH_MESSAGE = """Given input: '{input}'
Your output '{actual}'
Expected output: '{expected}'"""
BUILD_SUCCESS_TAG = "Build succeeded"
STYLE_ERROR_TAG = "Style errors"
TOTAL_ERRORS_FOUND_TAG = "Total errors found"
class Task:
"""Define an abstract Task."""
BACKUP_FOLDER = ".backup"
ResultDictType = Dict[str, tools.CmdResult]
class Injection:
"""A small class that encapsulates an injection source and destination."""
def __init__(self: Task.Injection, source: Path, destination: Path):
self._source = source
self._destination = destination
@property
def source(self) -> Path:
"""Get the source folder."""
return self._source
@property
def destination(self) -> Path:
"""Get the destination folder."""
return self._destination
@staticmethod
def from_yaml_node(
task_node: dict, student_hw_folder: Path, job_file: Path
) -> Optional[Task]:
"""Create an Task appropriate for the language."""
student_task_folder = student_hw_folder / task_node[Tags.FOLDER_TAG]
if not student_task_folder.exists():
log.warning("Folder '%s' does not exist. Skipping.", student_task_folder)
return None
language_tag = task_node[Tags.LANGUAGE_TAG]
if language_tag == LangTags.CPP:
return CppTask(task_node, student_task_folder, job_file)
if language_tag == LangTags.BASH:
return BashTask(task_node, student_task_folder, job_file)
log.error("Unknown Task language.")
return None
def __init__(
self: Task, task_node: dict, student_task_folder: Path, job_file: Path
):
"""Initialize a generic Task."""
self.name = task_node[Tags.NAME_TAG]
self._job_root_folder = job_file.parent
self._output_type = task_node[Tags.OUTPUT_TYPE_TAG]
self._student_task_folder = student_task_folder
self._binary_name = task_node[Tags.BINARY_NAME_TAG]
self._pipe_through = task_node[Tags.PIPE_TAG]
self._build_timeout = task_node[Tags.BUILD_TIMEOUT_TAG]
if Tags.TESTS_TAG in task_node:
self._test_nodes = task_node[Tags.TESTS_TAG]
else:
self._test_nodes = [] # Sometimes we don't have tests.
self.__test_counter = 0
def __with_number_prefix(self: Task, test_name: str) -> str:
"""Get the test name with number."""
self.__test_counter += 1
return tools.add_number_to_name(self.__test_counter, test_name)
def check(self: Task) -> Task.ResultDictType:
"""Iterate over the tests and check them."""
# Generate empty results.
results: Task.ResultDictType = {}
def run_all_tests(
test_node: dict, executable_folder: Path
) -> Task.ResultDictType:
"""Run all tests in the task."""
results: Task.ResultDictType = {}
if Tags.INJECT_FOLDER_TAG not in test_node:
# There is no need to rebuild the code. We can just run our tests.
test_result = self._run_test(test_node, executable_folder)
results[
self.__with_number_prefix(test_node[Tags.NAME_TAG])
] = test_result
return results
# There are folders to inject, so we will have to rebuild with the newly
# injected folders. We do it in a new temp folder.
with tools.TempDirCopy(
source_folder=self._student_task_folder, prefix="build_injected"
) as code_folder:
folders_to_inject = self.__get_folders_to_inject(
node=test_node, destination_root=code_folder
)
Task.__inject_folders(folders_to_inject)
build_result, build_folder = self._build_if_needed(code_folder)
if build_result and not build_result.succeeded():
raise Exception("Build with inserted folders must ALWAYS succeed!")
test_result = self._run_test(
test_node=test_node, executable_folder=build_folder
)
results[
self.__with_number_prefix(test_node[Tags.NAME_TAG])
] = test_result
return results
with tools.TempDirCopy(self._student_task_folder) as code_folder:
# Build the source if this is needed.
build_result, build_folder = self._build_if_needed(code_folder)
if build_result:
results[self.__with_number_prefix(BUILD_SUCCESS_TAG)] = build_result
if not build_result.succeeded():
# The build has failed, so no further testing needed.
return results
# The build is either not needed or succeeded. Continue testing.
for test_node in self._test_nodes:
results.update(
run_all_tests(test_node=test_node, executable_folder=build_folder)
)
style_errors = self._code_style_errors()
if style_errors:
results[STYLE_ERROR_TAG] = style_errors
return results
def __get_folders_to_inject(
self: Task, node: dict, destination_root: Path
) -> List[Injection]:
folders_to_inject = []
if Tags.INJECT_FOLDER_TAG in node:
# Inject all needed folders.
for injection in node[Tags.INJECT_FOLDER_TAG]:
source_folder = (
self._job_root_folder / injection[Tags.INJECT_SOURCE_TAG]
)
destination_folder = (
destination_root / injection[Tags.INJECT_DESTINATION_TAG]
)
folders_to_inject.append(
Task.Injection(source=source_folder, destination=destination_folder)
)
return folders_to_inject
@staticmethod
def __inject_folders(folders_to_inject: List[Task.Injection]):
"""Inject all folders overwriting existing folders in case of conflict."""
for injection in folders_to_inject:
if injection.destination.exists():
rmtree(injection.destination)
copytree(injection.source, injection.destination)
@abc.abstractmethod
def _run_test(self: Task, test_node: dict, executable_folder: Path):
return None
@abc.abstractmethod
def _build_if_needed(self: Task, code_folder: Path):
return None, code_folder
@abc.abstractmethod
def _code_style_errors(self: Task):
return None
class CppTask(Task):
"""Define a C++ Task."""
CMAKE_BUILD_CMD = "cmake .. && make -j2"
REMAKE_AND_TEST = "make clean && rm -r * && cmake .. && make -j2 && ctest -VV"
BUILD_CMD_SIMPLE = "clang++ -std=c++14 -o {binary} {compiler_flags} {binary}.cpp"
def __init__(self: CppTask, task_node: dict, root_folder: Path, job_file: Path):
"""Initialize the C++ Task."""
super().__init__(task_node, root_folder, job_file)
self._compiler_flags = task_node[Tags.COMPILER_FLAGS_TAG]
self._build_type = task_node[Tags.BUILD_TYPE_TAG]
def _build_if_needed(
self: CppTask, code_folder: Path
) -> Tuple[tools.CmdResult, Path]:
if self._build_type == BuildTags.CMAKE:
build_folder = code_folder / "build"
build_folder.mkdir(parents=True, exist_ok=True)
return (
tools.run_command(
CppTask.CMAKE_BUILD_CMD,
cwd=build_folder,
timeout=self._build_timeout,
),
build_folder,
)
return (
tools.run_command(
CppTask.BUILD_CMD_SIMPLE.format(
binary=self._binary_name, compiler_flags=self._compiler_flags
),
cwd=code_folder,
timeout=self._build_timeout,
),
code_folder,
)
def _code_style_errors(self: CppTask) -> Optional[tools.CmdResult]:
"""Check if code conforms to Google Style."""
command = (
"cpplint --counting=detailed "
+ "--filter=-legal,-readability/todo,"
+ "-build/include_order,-runtime/threadsafe_fn,"
+ "-runtime/arrays"
+ ' $( find . -name "*.h" -o -name "*.cpp" | grep -vE "^./build/" )'
)
result = tools.run_command(
command,
cwd=self._student_task_folder,
timeout=self._build_timeout,
)
if result.stderr and TOTAL_ERRORS_FOUND_TAG in result.stderr:
return result
if result.stdout and TOTAL_ERRORS_FOUND_TAG in result.stdout:
return result
return None
def _run_test(self: CppTask, test_node: dict, executable_folder: Path):
if test_node[Tags.RUN_GTESTS_TAG]:
return tools.run_command(
CppTask.REMAKE_AND_TEST,
cwd=executable_folder,
timeout=test_node[Tags.TIMEOUT_TAG],
)
input_str = ""
if Tags.INPUT_TAG in test_node:
input_str = test_node[Tags.INPUT_TAG]
run_cmd = "./{binary_name} {args}".format(
binary_name=self._binary_name, args=input_str
)
if self._pipe_through:
run_cmd += " " + self._pipe_through
run_result = tools.run_command(
run_cmd, cwd=executable_folder, timeout=test_node[Tags.TIMEOUT_TAG]
)
if not run_result.succeeded():
return run_result
# TODO(igor): do I need explicit error here?
our_output, error = tools.convert_to(self._output_type, run_result.stdout)
if not our_output:
# Conversion has failed.
run_result.stderr = error
return run_result
expected_output, error = tools.convert_to(
self._output_type, test_node[Tags.EXPECTED_OUTPUT_TAG]
)
if our_output != expected_output:
run_result.stderr = OUTPUT_MISMATCH_MESSAGE.format(
actual=our_output, input=input_str, expected=expected_output
)
return run_result
class BashTask(Task):
"""Define a Bash Task."""
RUN_CMD = "sh {binary_name}.sh {args}"
def __init__(self: BashTask, task_node: dict, root_folder: Path, job_file: Path):
"""Initialize the Task."""
super().__init__(task_node, root_folder, job_file)
def _build_if_needed(self: BashTask, code_folder: Path):
return None, code_folder # There is nothing to build in Bash.
def _code_style_errors(self: BashTask):
return None
def _run_test(
self: BashTask, test_node: dict, executable_folder: Path
) -> tools.CmdResult:
input_str = ""
if Tags.INPUT_TAG in test_node:
input_str = test_node[Tags.INPUT_TAG]
run_cmd = BashTask.RUN_CMD.format(binary_name=self._binary_name, args=input_str)
if self._pipe_through:
run_cmd += " " + self._pipe_through
run_result = tools.run_command(
run_cmd, cwd=executable_folder, timeout=test_node[Tags.TIMEOUT_TAG]
)
if not run_result.succeeded():
return run_result
our_output, error = tools.convert_to(self._output_type, run_result.stdout)
if not our_output:
# Conversion has failed.
run_result.stderr = error
return run_result
expected_output, error = tools.convert_to(
self._output_type, test_node[Tags.EXPECTED_OUTPUT_TAG]
)
if our_output != expected_output:
run_result.stderr = OUTPUT_MISMATCH_MESSAGE.format(
actual=our_output, input=input_str, expected=expected_output
)
return run_result
|
'''
The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
Given an integer n, return all distinct solutions to the n-queens puzzle.
Each solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively.
Example:
Input: 4
Output: [
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
Explanation: There exist two distinct solutions to the 4-queens puzzle as shown above.
'''
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
self.output = []
self.dfs(n, [])
return self.visualize_matrix(n)
def visualize_matrix(self, n):
final = []
matrix = []
for i in range(n):
matrix.append('.' * n)
for x in self.output:
temp = matrix.copy()
for idx, y in enumerate(x):
temp[idx] = temp[idx][:y] + 'Q' + temp[idx][y + 1:]
final.append(temp)
return final
def indiagonal(self, x, output):
ban = []
row = len(output)
for idx, num in enumerate(output):
ban.append(num + row - idx)
ban.append(num - row + idx)
if x not in ban:
return True
else:
return False
def dfs(self, n, output):
if len(output) == n:
self.output.append(output.copy())
return
for x in range(n):
if x not in output and self.indiagonal(x, output):
self.dfs(n, output + [x])
return
|
a = int(input("enter range: " ))
d=dict()
for i in range(1,a):
d[i]=i*i
print(d)
|
import re
from typing import List
from xml.sax import saxutils
from .writers.xml import XMLMaker
from ..interpreter import PDFInterpreter, PageInterpreter, logging
from ..interpreter import LTImage, LTTextBlock, LTCharBlock, LTChar, LTCurve, LTXObject
from ..interpreter.commands import LTItem
from ..interpreter.commands import LTRect, LTLine
from ..interpreter.commands.state import CurvePath, PDFColor
from ..parser.PSStackParser import PSLiteral
from ..utils import enc
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
def convert_capcase_to_snakecase(text: str) -> str:
s1 = FIRST_CAP_RE.sub(r'\1-\2', text)
return ALL_CAP_RE.sub(r'\1-\2', s1).lower()
def normalize_color_values(values: list):
ret = ""
for i,v in enumerate(values):
if i > 0:
ret += ','
if isinstance(v, PSLiteral):
ret += enc(str(v.name)).decode()
else:
ret += enc(str(v)).decode()
return ret
def get_color(col: PDFColor, prefix: str = 'color', codec = 'utf-8'):
if col.color_space is not None:
return {
f"{prefix}-type": enc(col.color_space.name).decode(),
f"{prefix}-values": normalize_color_values(col.values)
}
return {}
def convert_to_xml(
input_file_path: str,
output_file_path: str,
image_folder_path: str = None,
dont_export_images: bool = False,
codec: str = 'utf-8',
maxpages: int = 0,
password: str = None,
caching: bool = True,
ignore_bad_chars: bool = False,
check_extractable: bool = True,
pagenos: List[int] = None,
debug_level: int = logging.WARNING,
):
intepreter = PDFInterpreter(input_file_path,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=check_extractable,
ignore_bad_chars=ignore_bad_chars,
pagenos=pagenos,
debug_level=debug_level
)
with XMLMaker(output_file_path, codec=codec) as xml:
with xml.elm("pages"):
for page in intepreter:
render_page(xml, page)
def render_page(xml: XMLMaker, ltpage: PageInterpreter):
def render(item: LTItem):
if isinstance(item, LTCurve):
place_curve(xml, item)
elif isinstance(item, LTXObject):
with xml.elm('xobject', {
"x0": item.x0,
"x1": item.x1,
"y0": item.y0,
"y1": item.y1,
}):
for child in item:
render(child)
elif isinstance(item, LTImage):
name = item.name
# if xml.imagewriter is not None:
# name = xml.imagewriter.export_image(item)
xml.singleton('img', attrs={
'src': enc(name, None),
"x0": item.x0,
"x1": item.x1,
"y0": item.y0,
"y1": item.y1,
})
elif isinstance(item, LTTextBlock):
place_text_block(xml, item)
with xml.elm('page', {
"id": ltpage.page_num,
"width": ltpage.width,
"height": ltpage.height,
}):
for child in ltpage:
render(child)
def place_char(xml: XMLMaker, char: LTChar):
with xml.elm('char', {
'x0': char.x0,
'x1': char.x1,
'y0': char.y0,
'y1': char.y1,
}, no_additional_char=True):
xml.write(saxutils.escape(char.get_text()), lineend='', deep_space="")
def place_char_block(xml: XMLMaker, char_block: LTCharBlock):
attr = {
'size': char_block.size,
"x0": char_block.x0,
"x1": char_block.x1,
"y0": char_block.y0,
"y1": char_block.y1,
**get_color(char_block.color, codec=xml.codec),
}
for key, value in char_block.font.descriptor.items():
if key != "Type" and "FontFile" not in key:
attr[convert_capcase_to_snakecase(key)] = value
with xml.elm("char-block", attr):
for char in char_block:
place_char(xml, char)
def place_text_block(xml: XMLMaker, text_block: LTCharBlock):
attr = {
"x0": text_block.x0,
"x1": text_block.x1,
"y0": text_block.y0,
"y1": text_block.y1,
}
with xml.elm("text-block", attr):
for char_block in text_block:
place_char_block(xml, char_block)
def place_curve(xml: XMLMaker, item: LTCurve):
attr = {
"x0": item.x0,
"x1": item.x1,
"y0": item.y0,
"y1": item.y1,
**get_color(item.stroke, "stroke", codec=xml.codec),
**get_color(item.fill, "fill", codec=xml.codec),
}
if isinstance(item, LTRect):
return xml.singleton('rect', attr)
elif isinstance(item, LTLine):
return xml.singleton('line', attr)
with xml.elm('curve', attr):
for path in item.paths:
with xml.elm('path', { "type": path.method.name }):
for point in path.points:
xml.singleton('point', { 'x': point.x, 'y': point.y })
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 20:33:44 2021
@author: Yefry Lopez
You need intall Simplegui
You can see the code in action here:
https://py3.codeskulptor.org/#user306_YTAa8q66OLDKlLV.py
"""
# template for "Stopwatch: The Game"
import simplegui
# define global variables
variable = 0
x = 0
y = 0
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
A = t//600
var3 = t //10
var5 = var3 % 60
B = var5 // 10
C = var5 % 10
D = t % 10
return str(A) + ":" + str(B) + str(C) + "." + str(D)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start():
global start
timer.start()
def stop():
global start, x, y
if timer.is_running():
y += 1
timer.stop()
if variable % 10 == 0:
x += 1
def reset():
global variable
global x
global y
variable = 0
x = 0
y = 0
# define event handler for timer with 0.1 sec interval
def timer_handler():
global variable
global x
global y
variable += 1
# define draw handler
def draw_handler(canvas):
global variable
global x
global y
string = str(x) + "/" + str(y)
canvas.draw_text(format(variable), (120, 90), 30, 'White',"sans-serif")
canvas.draw_text(string, (250, 20), 18, 'White',"sans-serif")
# create frame
frame = simplegui.create_frame('Stopwatch', 300, 150)
timer = simplegui.create_timer(100, timer_handler)
# register event handlers
frame.set_draw_handler(draw_handler)
start = frame.add_button('Start', start, 100)
stop = frame.add_button('Stop', stop, 100)
reset = frame.add_button('Reset', reset, 100)
# start frame
frame.start()
# Please remember to review the grading rubric
|
from typing import Optional, List, Dict
import graphene
from graphql_jwt.decorators import login_required
from .types import PerformerType
from ..utils import check_permissions, update_and_save
from ...reviews.models import Performer, Album
class AlbumInputType(graphene.InputObjectType):
name = graphene.String(required=True)
year = graphene.Int(required=True)
mbid = graphene.String(required=True)
class CreatePerformer(graphene.relay.ClientIDMutation):
performer = graphene.Field(PerformerType)
class Input:
mbid = graphene.String(required=True)
name = graphene.String(required=True)
albums = graphene.List(graphene.NonNull(AlbumInputType), default=[])
@classmethod
@login_required
def mutate_and_get_payload(
cls,
_,
info,
mbid: str,
name: str,
albums: Optional[List[AlbumInputType]] = None,
):
albums = albums or []
performer = Performer.objects.create(
mbid=mbid, name=name, user=info.context.user,
)
for album in albums:
Album.objects.create(
performer=performer,
name=album.name,
mbid=album.mbid,
year=album.year,
user=info.context.user,
)
return CreatePerformer(performer=performer)
class UpdatePerformer(graphene.relay.ClientIDMutation):
performer = graphene.Field(PerformerType)
class Input:
performer = graphene.ID(required=True)
mbid = graphene.String()
name = graphene.String()
@classmethod
@login_required
def mutate_and_get_payload(cls, _, info, performer: str, **kwargs: Dict[str, str]):
performer_instance = graphene.relay.Node.get_node_from_global_id(
info, performer
)
check_permissions(performer_instance.user, info)
update_and_save(performer_instance, kwargs)
return UpdatePerformer(performer=performer_instance)
class DeletePerformer(graphene.relay.ClientIDMutation):
success = graphene.Boolean()
class Input:
performer = graphene.ID(required=True)
@classmethod
@login_required
def mutate_and_get_payload(cls, _, info, performer: str):
performer_instance = graphene.relay.Node.get_node_from_global_id(
info, performer
)
check_permissions(performer_instance.user, info)
if performer_instance is None:
return DeletePerformer(success=False)
performer_instance.delete()
return DeletePerformer(success=True)
|
from typing import List
import numpy as np
import tensorflow as tf
class AsyncGGNN(object):
@classmethod
def default_params(cls):
return {
'hidden_size': 128,
'edge_label_size': 16,
'propagation_rounds': 4, # Has to be an even number
'propagation_substeps': 15, # This is the maximal number of considered substeps
'graph_rnn_cell': 'GRU', # GRU or RNN
'graph_rnn_activation': 'tanh', # tanh, ReLU
'use_edge_bias': False,
'num_labeled_edge_types': 1,
'num_unlabeled_edge_types': 4,
}
def __init__(self, hyperparams):
self.hyperparams = hyperparams
self.num_labeled_edge_types = self.hyperparams['num_labeled_edge_types']
self.num_unlabeled_edge_types = self.hyperparams['num_unlabeled_edge_types']
self.num_edge_types = self.num_labeled_edge_types + self.num_unlabeled_edge_types
self.__parameters = {}
self.__make_parameters()
@property
def parameters(self):
return self.__parameters
def __make_parameters(self):
activation_name = self.hyperparams['graph_rnn_activation'].lower()
if activation_name == 'tanh':
activation_fun = tf.nn.tanh
elif activation_name == 'relu':
activation_fun = tf.nn.relu
else:
raise Exception("Unknown activation function type '%s'." % activation_name)
h_dim = self.hyperparams['hidden_size']
e_dim = self.hyperparams['edge_label_size']
self.__parameters['labeled_edge_weights'] = [tf.get_variable(name='labeled_edge_weights_typ%i' % e_typ,
shape=[h_dim + e_dim, h_dim],
initializer=tf.glorot_uniform_initializer())
for e_typ in range(self.num_labeled_edge_types)]
self.__parameters['unlabeled_edge_weights'] = [tf.get_variable(name='unlabeled_edge_weights_typ%i' % e_typ,
shape=[h_dim, h_dim],
initializer=tf.glorot_uniform_initializer())
for e_typ in range(self.num_unlabeled_edge_types)]
if self.hyperparams['use_edge_bias']:
self.__parameters['labeled_edge_biases'] = [tf.Variable(np.zeros([h_dim], dtype=np.float32),
name='labeled_edge_biases_typ%i' % e_typ)
for e_typ in range(self.num_labeled_edge_types)]
self.__parameters['unlabeled_edge_biases'] = [tf.Variable(np.zeros([h_dim], dtype=np.float32),
name='edge_biases_typ%i' % e_typ)
for e_typ in range(self.num_unlabeled_edge_types)]
cell_type = self.hyperparams['graph_rnn_cell'].lower()
if cell_type == 'gru':
cell = tf.nn.rnn_cell.GRUCell(h_dim, activation=activation_fun)
elif cell_type == 'rnn':
cell = tf.nn.rnn_cell.BasicRNNCell(h_dim, activation=activation_fun)
else:
raise Exception("Unknown RNN cell type '%s'." % cell_type)
self.__parameters['rnn_cell'] = cell
def async_ggnn_layer(self,
initial_node_representation: tf.Tensor,
initial_nodes: List[tf.Tensor],
sending_nodes: List[List[List[tf.Tensor]]],
edge_labels: List[List[List[tf.Tensor]]],
msg_targets: List[List[tf.Tensor]],
receiving_nodes: List[List[tf.Tensor]],
receiving_node_num: List[tf.Tensor]) -> tf.Tensor:
"""
Run through an async GGNN and return the representations of all nodes.
:param initial_node_representation: the initial embeddings of the nodes.
Shape: [-1, h_dim]
:param initial_nodes: List of node id tensors I_{r}: Node IDs that will have no incoming edges in round r.
Inner Tensor Shape: [-1]
:param sending_nodes: List of lists of lists of sending nodes S_{r,s,e}: Source node ids of edges of type e
propagating in step s of round r. By convention, 0..self.num_labeled_edges are labeled
edge types, and self.num_labeled_edges.. are unlabeled edge types.
Restrictions: If v in S_{r,s,e}, then v in R_{r,s'} for s' < s or v in I_{r}.
Inner Tensor Shape: [-1]
:param edge_labels: List of lists of lists of (embeddings of) labels of edges L_{r,s,e}: Labels of edges of type
e propagating in step s of round r.
Restrictions: len(L_{r,s,e}) = len(S_{r,s,e})
Inner Tensor Shape: [-1, e_dim]
:param msg_targets: List of lists of normalised edge target nodes T_{r,s}: Targets of edges propagating in step
s of round r, normalised to a continuous range starting from 0.
This is used for aggregating messages from the sending nodes.
Inner Tensor Shape: [-1]
:param receiving_nodes: List of lists of receiving nodes R_{r,s}: Target node ids of aggregated messages in
propagation step s of round r.
Restrictions: If v in R_{r,s}, v not in R_{r,s'} for all s' != s and v not in I_{r}.
Inner Tensor Shape: [-1]
:param receiving_node_num: Number of receiving nodes N_{r,s}
Restrictions: N_{r,s} = len(R_{r,s})
Inner Tensor Shape: [|Substeps|]
:return: representations of all nodes after propagation according to schedule. Shape: [-1, h_dim]
"""
with tf.variable_scope('async_ggnn'):
cur_node_states = initial_node_representation
for prop_round in range(self.hyperparams['propagation_rounds']):
with tf.variable_scope('prop_round%i' % (prop_round,)):
# ---- Declare and fill tensor arrays used in tf.while_loop:
sending_nodes_ta = tf.TensorArray(
tf.int32,
infer_shape=False,
element_shape=[None],
size=self.hyperparams['propagation_substeps'] * self.num_edge_types,
name='sending_nodes'
)
edge_labels_ta = tf.TensorArray(
tf.float32,
infer_shape=False,
element_shape=[None, self.hyperparams['edge_label_size']],
size=self.hyperparams['propagation_substeps'] * self.num_labeled_edge_types,
name='edge_labels'
)
msg_targets_ta = tf.TensorArray(tf.int32,
infer_shape=False,
element_shape=[None],
size=self.hyperparams['propagation_substeps'],
name='msg_targets')
receiving_nodes_ta = tf.TensorArray(tf.int32,
infer_shape=False,
element_shape=[None],
size=self.hyperparams['propagation_substeps'],
clear_after_read=False,
name='receiving_nodes')
receiving_node_num_ta = tf.TensorArray(tf.int32,
infer_shape=False,
element_shape=[],
size=self.hyperparams['propagation_substeps'],
name='receiving_nodes_num')
for step in range(self.hyperparams['propagation_substeps']):
for labeled_edge_typ in range(self.num_labeled_edge_types):
sending_nodes_ta = sending_nodes_ta.write(step * self.num_edge_types + labeled_edge_typ,
sending_nodes[prop_round][step][labeled_edge_typ])
edge_labels_ta = edge_labels_ta.write(step * self.num_labeled_edge_types + labeled_edge_typ,
edge_labels[prop_round][step][labeled_edge_typ])
for unlabeled_edge_typ in range(self.num_unlabeled_edge_types):
shifted_edge_typ = self.num_labeled_edge_types + unlabeled_edge_typ
sending_nodes_ta = sending_nodes_ta.write(step * self.num_edge_types + shifted_edge_typ,
sending_nodes[prop_round][step][shifted_edge_typ])
msg_targets_ta = msg_targets_ta.write(step, msg_targets[prop_round][step])
receiving_nodes_ta = receiving_nodes_ta.write(step, receiving_nodes[prop_round][step])
receiving_node_num_ta = receiving_node_num_ta.unstack(receiving_node_num[prop_round])
new_node_states_ta = tf.TensorArray(tf.float32,
infer_shape=False,
element_shape=[self.hyperparams['hidden_size']],
size=tf.shape(cur_node_states)[0],
clear_after_read=False,
name='new_node_states')
# ---- Actual propagation schedule implementation:
# Initialize the initial nodes with their state from last round:
new_node_states_ta = new_node_states_ta.scatter(initial_nodes[prop_round],
tf.gather(cur_node_states, initial_nodes[prop_round]))
def do_substep(substep_id, new_node_states_ta):
# For each edge active in this substep, pull source state and transform:
sending_states_per_edge_type = []
edge_labels_per_type = []
for labeled_edge_typ in range(self.num_labeled_edge_types):
sending_states_per_edge_type.append(
new_node_states_ta.gather(sending_nodes_ta.read(
substep_id * self.num_edge_types + labeled_edge_typ
))
)
edge_labels_per_type.append(edge_labels_ta.read(
substep_id * self.num_labeled_edge_types + labeled_edge_typ
))
for unlabeled_edge_typ in range(self.num_unlabeled_edge_types):
shifted_edge_typ = self.num_labeled_edge_types + unlabeled_edge_typ
sending_states_per_edge_type.append(new_node_states_ta.gather(
sending_nodes_ta.read(substep_id * self.num_edge_types + shifted_edge_typ)
))
# Collect old states for receiving nodes
substep_receiving_nodes = receiving_nodes_ta.read(substep_id)
old_receiving_node_states = tf.gather(cur_node_states, substep_receiving_nodes)
old_receiving_node_states.set_shape([None, self.hyperparams['hidden_size']])
msg_targets_this_step = msg_targets_ta.read(substep_id)
receiving_node_num_this_step = receiving_node_num_ta.read(substep_id)
substep_new_node_states = self.propagate_one_step(
sending_states_per_edge_type, edge_labels_per_type,
msg_targets_this_step, receiving_node_num_this_step,
old_receiving_node_states
)
# Write updated states back:
new_node_states_ta = new_node_states_ta.scatter(indices=substep_receiving_nodes,
value=substep_new_node_states,
name="state_scatter_round%i" % (prop_round,))
return substep_id + 1, new_node_states_ta
def is_done(substep_id, new_node_states_ta_unused):
return tf.logical_and(substep_id < self.hyperparams['propagation_substeps'],
tf.greater(tf.shape(receiving_nodes_ta.read(substep_id))[0], 0))
_, new_node_states_ta = tf.while_loop(cond=is_done,
body=do_substep,
loop_vars=[tf.constant(0), new_node_states_ta]
)
cur_node_states = new_node_states_ta.stack(name="state_stack_round%i" % (prop_round,))
return cur_node_states
def propagate_one_step(self,
sending_states_per_edge_type: List[tf.Tensor],
edge_labels_per_type: List[tf.Tensor],
msg_targets_this_step: tf.Tensor,
receiving_node_num_this_step: tf.Tensor,
old_receiving_node_states: tf.Tensor) -> tf.Tensor:
sent_messages = []
for (edge_typ, sending_state_representations) in enumerate(sending_states_per_edge_type):
if edge_typ < self.num_labeled_edge_types:
messages = tf.matmul(tf.concat([sending_state_representations, edge_labels_per_type[edge_typ]],
axis=-1),
self.__parameters['labeled_edge_weights'][edge_typ])
if self.hyperparams['use_edge_bias']:
messages += self.__parameters['labeled_edge_biases'][edge_typ]
else:
shifted_edge_typ = edge_typ - self.num_labeled_edge_types
messages = tf.matmul(
sending_state_representations, self.__parameters['unlabeled_edge_weights'][shifted_edge_typ]
)
if self.hyperparams['use_edge_bias']:
messages += self.__parameters['unlabeled_edge_biases'][shifted_edge_typ]
sent_messages.append(messages)
# Stack all edge messages and aggregate as sum for each receiving node:
sent_messages = tf.concat(sent_messages, axis=0)
aggregated_received_messages = tf.unsorted_segment_sum(
sent_messages, msg_targets_this_step, receiving_node_num_this_step
)
# Combine old states in RNN cell with incoming messages
aggregated_received_messages.set_shape([None, self.hyperparams['hidden_size']])
new_node_states = self.__parameters['rnn_cell'](aggregated_received_messages,
old_receiving_node_states)[1]
return new_node_states
|
#
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>
#
"""The data module provides interfaces and implementations to generate tf
tensors for patches of multi resolution data.
We have the following assumptions:
1. The data have n spatial dimensions (e.g. 1 for sound, 2 for images, etc.)
2. The data can be expressed in a number of different discrete scales which we
call **levels**.
3. Each level depicts the same spatial region which means that we have a
trivial correspondence between levels by associating each value with (1/s)^n
values of the higher resolution centered around that value. n is the number
of dimensions, s is the scale down.
"""
from .from_tensors import FromTensors
|
from types import SimpleNamespace
import numpy as np
import toml
import matplotlib.pyplot as plt
from matplotlib.widgets import Cursor
import time
from unicycle import simulate_unicycle
from collision_check import circle_collision_check
from lidar import Lidar
config = toml.load("config.toml")
config_params = config['params']
params = SimpleNamespace(**config_params)
grid_data = None
grid_res = 1
class DWA:
"""Implementation of DWA
After initializing with grid_data, this class can be used as an iterator to get the simulation progress.
"""
def __init__(self, grid_data, ref_path, start_pose, goal_threshold=0.3, grid_res=1, reality=None) -> None:
self.grid_data = grid_data
if reality is None:
self.reality = grid_data.copy()
else:
self.reality = reality # Dynamic data
self.ref_path = ref_path
self.start_pose = start_pose
self.goal_threshold = goal_threshold
self.grid_res = grid_res
self.path_index = 0
self.pose = start_pose
self.v, self.w = 0.0, 0.0
self.failed_attempts = -1
self.logs = []
self.path_index = 0
self.lidar = Lidar(max_dist=params.lidar_dist)
self.lidar.set_env(self.reality, self.grid_res)
def _command_window(self, v, w, dt=0.1):
"""Returns acceptable v,w commands given current v,w"""
# velocity can be (0, V_MAX)
# ACC_MAX = max linear acceleration
v_max = min(params.V_MAX, v + params.ACC_MAX*dt)
v_min = max(0, v - params.ACC_MAX*dt)
# omega can be (-W_MAX, W_MAX)
# W_DOT_MAX = max angular acceleration
epsilon = 1e-6
w_max = min(params.W_MAX, w + params.W_DOT_MAX*dt)
w_min = max(-params.W_MAX, w - params.W_DOT_MAX*dt)
# generate quantized range for v and omega
vs = np.linspace(v_min, v_max, num=11)
ws = np.linspace(w_min, w_max, num=11)
# cartesian product of [vs] and [ws]
# remember there are 0 velocity entries which have to be discarded eventually
commands = np.transpose([np.tile(vs, len(ws)), np.repeat(ws, len(vs))])
# calculate kappa for the set of commands
kappa = commands[:, 1]/(commands[:, 0]+epsilon)
# returning only commands < max curvature
return commands[(kappa < params.K_MAX) & (commands[:, 0] != 0)]
def _track(self, ref_path, pose, v, w, dt=0.1, grid_data=grid_data,
detect_collision=True, grid_res=grid_res):
commands = self._command_window(v, w, dt)
# initialize path cost
best_cost, best_command = np.inf, None
best_local_path = None
for i, (v, w) in enumerate(commands):
# Number of steps = prediction horizon
local_path = simulate_unicycle(pose, v, w, params.pred_horizon, dt)
if detect_collision:
# ignore colliding paths
hit, distance = circle_collision_check(
grid_data, local_path, grid_res=grid_res)
if hit:
print("local path has a collision")
continue
else:
distance = np.inf
# calculate cross-track error
# can use a simplistic definition of
# how close is the last pose in local path from the ref path
cte = np.linalg.norm(
ref_path[:, 0:2]-local_path[:len(ref_path), 0:2], axis=-1)
cte = cte * np.linspace(0,1,len(ref_path))
cte = np.sum(cte)
# print(cte)
# other cost functions are possible
# can modify collision checker to give distance to closest obstacle
cost = params.w_cte*cte + params.w_speed*(params.V_MAX - v)**2 + params.w_obs / distance
# check if there is a better candidate
if cost < best_cost:
best_cost, best_command = cost, [v, w]
best_local_path = local_path
if best_command:
return best_command, best_local_path
else:
return [0, 0], best_local_path
def __iter__(self):
self.path_index = 0
self.logs = []
return self
def reset(self):
self.path_index = 0
self.logs = []
return self
def __next__(self):
if self.path_index > len(self.ref_path)-1:
raise StopIteration
local_ref_path = self.ref_path[self.path_index:self.path_index+params.pred_horizon]
if self.goal_threshold > np.min(np.hypot(local_ref_path[:, 0]-self.pose[0],
local_ref_path[:, 1]-self.pose[1])):
self.path_index = self.path_index + 1
self.failed_attempts += 1
if self.failed_attempts > 1600:
self.path_index += 1
self.failed_attempts = -1
# get next command
(self.v, self.w), best_local_path = self._track(local_ref_path, self.pose, self.v, self.w, dt=params.dt,
detect_collision=True, grid_data=self.grid_data)
# simulate vehicle for 1 step
# remember the function now returns a trajectory, not a single pose
self.pose = simulate_unicycle(self.pose, self.v, self.w, N=1, dt=params.dt)[0]
self.lidar.set_env(self.reality, self.grid_res)
distances, collision_points = self.lidar.sense_obstacles(self.pose)
# Add obstacles to grid data
for point in collision_points:
if point[0] != -1:
i, j = point
self.grid_data[i, j] = 1
# update logs
self.logs.append([*self.pose, self.v, self.w, self.path_index])
print(self.path_index)
return np.array(self.logs), distances, best_local_path
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from socket import gethostname, getfqdn
from ansible.module_utils.basic import AnsibleModule, os, re
DOCUMENTATION = '''
---
module: oracle_gi_facts
short_description: Returns some facts about Grid Infrastructure environment
description:
- Returns some facts about Grid Infrastructure environment
- Must be run on a remote host
version_added: "0.8.0"
options:
oracle_home:
description:
- Grid Infrastructure home, can be absent if ORACLE_HOME environment variable is set
required: false
notes:
- Oracle Grid Infrastructure 12cR1 or later required
- Must be run as (become) GI owner
author: Ilmar Kerm, ilmar.kerm@gmail.com, @ilmarkerm
'''
EXAMPLES = '''
---
- hosts: localhost
vars:
oracle_env:
ORACLE_HOME: /u01/app/grid/product/12.1.0.2/grid
tasks:
- name: Return GI facts
oracle_gi_facts:
environment: "{{ oracle_env }}"
'''
# The following is to make the module usable in python 2.6 (RHEL6/OEL6)
# Source: http://pydoc.net/pep8radius/0.9.0/pep8radius.shell/
try:
from subprocess import check_output, CalledProcessError
except ImportError: # pragma: no cover
# python 2.6 doesn't include check_output
# monkey patch it in!
import subprocess
STDOUT = subprocess.STDOUT
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs: # pragma: no cover
raise ValueError('stdout argument not allowed, '
'it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd,
output=output)
return output
subprocess.check_output = check_output
# overwrite CalledProcessError due to `output`
# keyword not being available (in 2.6)
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
subprocess.CalledProcessError = CalledProcessError
def is_executable(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def exec_program_lines(arguments):
try:
output = check_output(arguments)
return output.splitlines()
except CalledProcessError:
# Just ignore the error
return ['']
def exec_program(arguments):
return exec_program_lines(arguments)[0]
def hostname_to_fqdn(hostname):
if "." not in hostname:
return getfqdn(hostname)
else:
return hostname
def local_listener():
global srvctl, shorthostname, iscrs, vips
args = [srvctl, 'status', 'listener']
if iscrs:
args += ['-n', shorthostname]
listeners_out = exec_program_lines(args)
re_listener_name = re.compile('Listener (.+) is enabled')
listeners = []
out = []
for l in listeners_out:
if "is enabled" in l:
m = re_listener_name.search(l)
listeners.append(m.group(1))
for l in listeners:
config = {}
output = exec_program_lines([srvctl, 'config', 'listener', '-l', l])
for line in output:
if line.startswith('Name:'):
config['name'] = line[6:]
elif line.startswith('Type:'):
config['type'] = line[6:]
elif line.startswith('Network:'):
config['network'] = line[9:line.find(',')]
elif line.startswith('End points:'):
config['endpoints'] = line[12:]
for proto in config['endpoints'].split('/'):
p = proto.split(':')
config[p[0].lower()] = p[1]
if "network" in config.keys():
config['address'] = vips[config['network']]['fqdn']
config['ipv4'] = vips[config['network']]['ipv4']
config['ipv6'] = vips[config['network']]['ipv6']
out.append(config)
return out
def scan_listener():
global srvctl, shorthostname, iscrs, networks, scans
out = {}
for n in networks.keys():
output = exec_program_lines([srvctl, 'config', 'scan_listener', '-k', n])
for line in output:
endpoints = None
# 19c
m = re.search('Endpoints: (.+)', line)
if m is not None:
endpoints = m.group(1)
else:
# 18c, 12c
m = re.search('SCAN Listener (.+) exists. Port: (.+)', line)
if m is not None:
endpoints = m.group(2)
if endpoints:
out[n] = {'network': n, 'scan_address': scans[n]['fqdn'], 'endpoints': endpoints,
'ipv4': scans[n]['ipv4'], 'ipv6': scans[n]['ipv6']}
for proto in endpoints.split('/'):
p = proto.split(':')
out[n][p[0].lower()] = p[1]
break
return out
def get_networks():
global srvctl, shorthostname, iscrs
out = {}
item = {}
output = exec_program_lines([srvctl, 'config', 'network'])
for line in output:
m = re.search('Network ([0-9]+) exists', line)
if m is not None:
if "network" in item.keys():
out[item['network']] = item
item = {'network': m.group(1)}
elif line.startswith('Subnet IPv4:'):
item['ipv4'] = line[13:]
elif line.startswith('Subnet IPv6:'):
item['ipv6'] = line[13:]
if "network" in item.keys():
out[item['network']] = item
return out
def get_vips():
global srvctl, shorthostname, iscrs
output = exec_program_lines([srvctl, 'config', 'vip', '-n', shorthostname])
vip = {}
out = {}
for line in output:
if line.startswith('VIP exists:'):
if "network" in vip.keys():
out[vip['network']] = vip
vip = {}
m = re.search('network number ([0-9]+),', line)
vip['network'] = m.group(1)
elif line.startswith('VIP Name:'):
vip['name'] = line[10:]
vip['fqdn'] = hostname_to_fqdn(vip['name'])
elif line.startswith('VIP IPv4 Address:'):
vip['ipv4'] = line[18:]
elif line.startswith('VIP IPv6 Address:'):
vip['ipv6'] = line[18:]
if "network" in vip.keys():
out[vip['network']] = vip
return out
def get_scans():
global srvctl, shorthostname, iscrs
out = {}
item = {}
output = exec_program_lines([srvctl, 'config', 'scan', '-all'])
for line in output:
if line.startswith('SCAN name:'):
if "network" in item.keys():
out[item['network']] = item
m = re.search('SCAN name: (.+), Network: ([0-9]+)', line)
item = {'network': m.group(2), 'name': m.group(1), 'ipv4': [], 'ipv6': []}
item['fqdn'] = hostname_to_fqdn(item['name'])
else:
m = re.search('SCAN [0-9]+ (IPv[46]) VIP: (.+)', line)
if m is not None:
item[m.group(1).lower()] += [m.group(2)]
if "network" in item.keys():
out[item['network']] = item
return out
# Ansible code
def main():
global module, shorthostname, hostname, srvctl, crsctl, cemutlo, iscrs, vips, networks, scans
msg = ['']
module = AnsibleModule(
argument_spec=dict(
oracle_home=dict(required=False)
),
supports_check_mode=True
)
# Preparation
facts = {}
if module.params["oracle_home"]:
os.environ['ORACLE_HOME'] = module.params["oracle_home"]
srvctl = os.path.join(os.environ['ORACLE_HOME'], 'bin', 'srvctl')
crsctl = os.path.join(os.environ['ORACLE_HOME'], 'bin', 'crsctl')
cemutlo = os.path.join(os.environ['ORACLE_HOME'], 'bin', 'cemutlo')
if not is_executable(srvctl) or not is_executable(crsctl):
module.fail_json(changed=False,
msg="Are you sure ORACLE_HOME=%s points to GI home?"
" I can't find executables srvctl or crsctl under bin/." %
os.environ['ORACLE_HOME'])
iscrs = True # This needs to be dynamically set if it is full clusterware or Oracle restart
hostname = gethostname()
shorthostname = hostname.split('.')[0]
#
if module.check_mode:
module.exit_json(changed=False)
# Cluster name
if iscrs:
facts.update({'clustername': exec_program([cemutlo, '-n'])})
else:
facts.update({'clustername': 'ORACLE_RESTART'})
# Cluster version
if iscrs:
version = exec_program([crsctl, 'query', 'crs', 'activeversion'])
else:
version = exec_program([crsctl, 'query', 'has', 'releaseversion'])
m = re.search(r'\[([0-9.]+)\]$', version)
facts.update({'version': m.group(1)})
# VIPS
vips = get_vips()
facts.update({'vip': vips.values()})
# Networks
networks = get_networks()
facts.update({'network': networks.values()})
# SCANs
scans = get_scans()
facts.update({'scan': scans.values()})
# Listener
facts.update({'local_listener': local_listener()})
facts.update({'scan_listener': scan_listener().values() if iscrs else []})
# Databases
facts.update({'database_list': exec_program_lines([srvctl, 'config', 'database'])})
# Output
module.exit_json(msg=", ".join(msg), changed=False, ansible_facts=facts)
if __name__ == '__main__':
main()
|
# Generated by Django 2.1.5 on 2019-04-06 16:19
import analyzer.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='analysis',
name='result',
field=models.CharField(choices=[(analyzer.models.AnalysisResult('Nothing malicious found'), 'Nothing malicious found'), (analyzer.models.AnalysisResult('Malicious indicators found'), 'Malicious indicators found'), (analyzer.models.AnalysisResult('Error during analysis'), 'Error during analysis')], max_length=25),
),
migrations.AlterField(
model_name='analysis',
name='status',
field=models.CharField(choices=[(analyzer.models.AnalysisStatus('Upload in progress'), 'Upload in progress'), (analyzer.models.AnalysisStatus('Extraction of indicator'), 'Extraction of indicator'), (analyzer.models.AnalysisStatus('Research of indicators in the database'), 'Research of indicators in the database'), (analyzer.models.AnalysisStatus('Error'), 'Error'), (analyzer.models.AnalysisStatus('Done'), 'Done')], max_length=25),
),
]
|
"""Oversampling Functions"""
# Authors: Jeffrey Wang
# License: BSD 3 clause
import numpy as np
from sklearn.mixture import GaussianMixture as GMM
from sleepens.utils import separate_by_label, create_random_state
def balance(data, labels, desired=None, balance='auto',
seed=None, verbose=0):
"""
Balance a dataset through generative oversampling.
Model a generative algorithm over each of the
desired labelled data to sample more data from
the resulting distributions.
Parameters
----------
data : array-like, shape=(n_samples, n_features)
Data.
labels : array-like, shape=(n_samples,)
Labels corresponding to data.
desired : {None, array-like}, default=None
List of labels to oversample.
If None, balances all labels.
balance : {'auto', int, dict}, default='auto'
Determines how to balance and oversample.
Must be one of:
- 'auto' : Automatically balance the dataset
by oversampling `desired` labels to
match the number of samples in the majority
label. Recommended to set `desired` to None,
or risk not oversampling a minority label.
- int : Oversample `desired` labels so that total
such samples reach this value.
- dict : Oversample each label by the value given.
seed : None or int or RandomState, default=None
Initial seed for the RandomState. If seed is None,
return the RandomState singleton. If seed is an int,
return a RandomState with the seed set to the int.
If seed is a RandomState, return that RandomState.
verbose : int, default=0
Verbosity; higher values result in
more verbose output.
Returns
-------
data : ndarray
All oversampled data, shuffled.
labels : ndarray
All corresponding labels.
"""
if len(data) == 0:
if verbose : print("No data to oversample")
return np.array([]), np.array([])
if verbose > 0 : print("Balancing Dataset")
data = separate_by_label(data, labels)
separated = data
if desired is not None:
data_labels = list(data.keys())
for d in desired:
if desired not in data_labels:
raise ValueError("Desired label is not in the data")
separated = {d: separated[d] for d in desired}
if balance == 'auto' or isinstance(balance, int):
if balance == 'auto':
if verbose > 0 : print("Balancing Dataset. Method set to 'auto'")
target = np.max([len(data[k]) for k in data.keys()])
else:
if verbose > 0 : print("Balancing Dataset up to", str(balance))
target = balance
n_samples = {k: target - len(separated[k]) for k in separated.keys()}
n_samples = {k: 0 if n_samples[k] < 0 else n_samples[k] for k in separated.keys()}
elif isinstance(balance, (dict)):
if verbose > 0 : print("Balancing Dataset. Method set to custom")
n_samples = balance
else:
raise ValueError("Balance must be {'auto', int, dict}")
if verbose > 0 : print("Oversampling")
return _generative_oversample(separated, n_samples, seed=seed, verbose=verbose)
def scale(data, labels, factor=1, seed=None, verbose=0):
"""
Oversample the dataset.
Model a generative algorithm over each of the
desired labelled data to sample more data from
the resulting distributions.
Parameters
----------
data : array-like, shape=(n_samples, n_features)
Data.
labels : array-like, shape=(n_samples,)
Labels corresponding to data.
factor : float, default=1
Factor to oversample the dataset by.
seed : None or int or RandomState, default=None
Initial seed for the RandomState. If seed is None,
return the RandomState singleton. If seed is an int,
return a RandomState with the seed set to the int.
If seed is a RandomState, return that RandomState.
verbose : int, default=0
Verbosity; higher values result in
more verbose output.
Returns
-------
data : ndarray
All oversampled data, shuffled.
labels : ndarray
All corresponding labels.
"""
if factor <= 0 : raise ValueError("Factor must be positive")
if len(data) == 0:
if verbose > 0 : print("No data to oversample")
return np.array([]), np.array([])
if verbose > 0 : print("Scaling Dataset")
separated = separate_by_label(data, labels)
n_samples = {k: int(factor * len(separated[k])) for k in separated.keys()}
return _generative_oversample(separated, n_samples, seed=seed, verbose=verbose)
def sample(data, labels, sizes, seed=None, verbose=0):
"""
Sample from the dataset.
Model a generative algorithm over each of the
desired labelled data to sample more data from
the resulting distributions.
Parameters
----------
data : array-like, shape=(n_samples, n_features)
Data.
labels : array-like, shape=(n_samples,)
Labels corresponding to data.
sizes : dict
Number of samples for each class.
seed : None or int or RandomState, default=None
Initial seed for the RandomState. If seed is None,
return the RandomState singleton. If seed is an int,
return a RandomState with the seed set to the int.
If seed is a RandomState, return that RandomState.
verbose : int, default=0
Verbosity; higher values result in
more verbose output.
Returns
-------
data : ndarray
All oversampled data, shuffled.
labels : ndarray
All corresponding labels.
"""
if len(data) == 0:
if verbose > 0 : print("No data to oversample")
return np.array([]), np.array([])
if verbose > 0 : print("Sampling Dataset")
separated = separate_by_label(data, labels)
return _generative_oversample(separated, sizes, seed=seed, verbose=verbose)
def _generative_oversample(data_labels, n_samples, seed=None, verbose=0):
"""
Generatively oversample the data.
Parameters
----------
data_labels : dict
Dictionary of data categorized by label.
n_samples : dict
Dictionary of the number of samples to oversample
each label.
seed : None or int or RandomState, default=None
Initial seed for the RandomState. If seed is None,
return the RandomState singleton. If seed is an int,
return a RandomState with the seed set to the int.
If seed is a RandomState, return that RandomState.
verbose : int, default=0
Verbosity; higher values result in
more verbose output.
Returns
-------
data : ndarray
All oversampled data, shuffled.
labels : ndarray
All corresponding labels.
"""
oversampled = {}
for label in data_labels:
if n_samples[label] == 0 or len(data_labels[label]) < 2 : continue
if verbose > 0 : print("\tModelling distribution for", str(label))
model = _fit_cluster(data_labels[label], seed=seed)
if verbose > 0 : print("\tSampling data for", str(label))
model.weights_ = (model.weights_ / np.sum(model.weights_)).astype(np.float64)
oversampled[label] = model.sample(n_samples[label])[0]
if verbose > 0 : print("Collating and shuffling")
new_set = []
for k in oversampled.keys():
length = len(oversampled[k])
labels = np.array([k]*length).reshape(length, 1)
new_set += list(np.concatenate((oversampled[k], labels), axis=1))
create_random_state(seed=seed).shuffle(new_set)
new_set = np.array(new_set)
if new_set.size == 0 : return np.array([]), np.array([]).astype(int)
return new_set[:,:-1], new_set[:,-1].astype(int)
def _fit_cluster(data, seed=None):
"""
Fit a Gaussian Mixture Model to the given data.
Parameters
----------
data : array-like, shape=(n_samples, n_features)
Data.
seed : None or int or RandomState, default=None
Initial seed for the RandomState. If seed is None,
return the RandomState singleton. If seed is an int,
return a RandomState with the seed set to the int.
If seed is a RandomState, return that RandomState.
Returns
-------
model : GaussianMixture
The best fitted Gaussian Miture Model as determined
by the mean of the BIC and AIC for the respective model.
"""
data = np.array(data)
models = []
abic = []
n_components = min([len(data), 10])
for i in range(n_components):
if len(data) < 2 * (i+1) : continue
m = GMM(n_components=i+1, n_init=5, random_state=seed)
m.fit(data)
models.append(m)
abic.append(np.mean([m.bic(data), m.aic(data)]))
return models[np.argmin(abic)]
|
from unittest import TestCase
from pylinac import tg51
class TestFunctions(TestCase):
def test_p_tp(self):
temps = (22, 25, 19)
presss = (760, 770, 740)
expected_ptp = (1.0, 0.997, 1.0165)
for temp, press, exp in zip(temps, presss, expected_ptp):
self.assertAlmostEqual(tg51.p_tp(temp, press), exp, delta=0.001)
def test_p_pol(self):
m_ref = (20, -20.2, 19.8)
m_opp = (-20, 19.8, -20.1)
expected_ppol = (1.0, 0.99, 1.0075)
for ref, opp, exp in zip(m_ref, m_opp, expected_ppol):
self.assertAlmostEqual(tg51.p_pol(ref, opp), exp, delta=0.001)
def test_p_ion(self):
low_vals = (20, 20.05)
high_vals = (20, 20.1)
expected_pion = (1.0, 1.0025)
for low, high, exp in zip(low_vals, high_vals, expected_pion):
self.assertAlmostEqual(tg51.p_ion(300, 150, high, low), exp, delta=0.001)
def test_dref(self):
i50s = (3, 5, 7)
drefs = (1.72, 2.96, 4.19)
for i50, dref in zip(i50s, drefs):
self.assertAlmostEqual(tg51.d_ref(i50), dref, delta=0.01)
def test_r50(self):
i50s = (3.5, 5.5, 12)
r50s = (3.54, 5.60, 18.71)
for i50, r50 in zip(i50s, r50s):
self.assertAlmostEqual(tg51.r_50(i50), r50, delta=0.01)
def test_m_corr(self):
exp = 20.225
res = tg51.m_corrected(1.01, 0.995, 1, 1.005, (20, 20.05))
self.assertAlmostEqual(exp, res, delta=0.002)
def test_pddx(self):
pdds = (66.4, 70.5, 72.8, 73.3, 76.7, 77.1, 77.1, 79.3)
energies = (6, 10, 10, 10, 15, 15, 15, 18)
pddxs = (66.4, 70.5, 72.8, 72.87, 77.18, 77.57, 78.27, 80.47)
foils = (None, '30cm', '50cm', None, None, '50cm', '30cm', None)
for pdd, energy, pddx, foil in zip(pdds, energies, pddxs, foils):
self.assertAlmostEqual(tg51.pddx(pdd, energy, foil), pddx, delta=0.01)
def test_kq(self):
# Test via PDDs
models = ('30010', 'A12')
pddxs = (66.4, 76.7)
kqs = (0.9927, 0.976)
for model, pddx, kq in zip(models, pddxs, kqs):
self.assertAlmostEqual(tg51.kq(model, pddx), kq, delta=0.001)
# test via TPRs
tprs = (0.65, 0.76)
kqs = (0.994, 0.975)
for model, tpr, kq in zip(models, tprs, kqs):
self.assertAlmostEqual(tg51.kq(model, tpr=tpr), kq, delta=0.001)
# neither TPR or PDD passed
with self.assertRaises(ValueError):
tg51.kq()
# both defined
with self.assertRaises(ValueError):
tg51.kq(pddx=0.66, tpr=0.72)
# PDD too low
with self.assertRaises(ValueError):
tg51.kq(pddx=61)
# TPR too high
with self.assertRaises(ValueError):
tg51.kq(tpr=81)
class TestTG51Base:
temperature = 22
pressure = 760
model = '30013'
nd_w = 5.555
p_elec = 1.000
volt_high = -300
volt_low = -150
m_raw = (20, 20, 20)
m_opp = (20, 20, 20)
m_low = (20, 20, 20)
clinical_pdd = 66
dose_mu_dmax = 1.000
tissue_correction = 1.000
mu = 200
def test_dose_dmax(self):
self.assertAlmostEqual(self.dose_mu_dmax, self.tg51.dose_mu_dmax, delta=0.0005)
class TestTG51Photon(TestTG51Base):
energy = 6
measured_pdd = 66
lead_foil = None
dose_mu_10 = 1.000
def setUp(self):
self.tg51 = tg51.TG51Photon(temp=self.temperature, press=self.pressure,
model=self.model, n_dw=self.nd_w, p_elec=self.p_elec,
measured_pdd=self.measured_pdd, lead_foil=self.lead_foil,
clinical_pdd=self.clinical_pdd, energy=self.energy,
volt_high=self.volt_high, volt_low=self.volt_low,
m_raw=self.m_raw, m_opp=self.m_opp, m_low=self.m_low,
mu=self.mu, tissue_correction=self.tissue_correction)
def test_dose_10(self):
self.assertAlmostEqual(self.dose_mu_10, self.tg51.dose_mu_10, delta=0.0005)
class TestTG51Electron(TestTG51Base):
k_ecal = 1.000
i_50 = 7.5
dose_mu_dref = 1.000
def setUp(self):
self.tg51 = tg51.TG51Electron(temp=self.temperature, press=self.pressure,
model=self.model, n_dw=self.nd_w, p_elec=self.p_elec,
clinical_pdd=self.clinical_pdd,
volt_high=self.volt_high, volt_low=self.volt_low,
m_raw=self.m_raw, m_opp=self.m_opp, m_low=self.m_low,
mu=self.mu, tissue_correction=self.tissue_correction,
i_50=self.i_50)
def test_dose_dref(self):
self.assertAlmostEqual(self.dose_mu_dref, self.tg51.dose_mu_dref, delta=0.0005)
class MDATB2_2015(TestTG51Photon, TestCase):
temperature = 20.5
pressure = 760
energy = 15
nd_w = 5.444
p_elec = 1.002
m_raw = 29.28
m_opp = -29.33
m_low = 29.10
measured_pdd = 76.9
clinical_pdd = 77.4
dose_mu_10 = 0.779
dose_mu_dmax = 1.007
class MDATB1_2015(TestTG51Photon, TestCase):
temperature = 21
pressure = 763
nd_w = 5.393
energy = 10
p_elec = 1.003
m_raw = 27.727
m_opp = 27.784
m_low = 27.635
measured_pdd = 73.42
clinical_pdd = 73.5
dose_mu_10 = 0.734
dose_mu_dmax = 0.999
class ACB5_2011(TestTG51Photon, TestCase):
temperature = 22
pressure = 751.2
nd_w = 5.450
m_raw = 24.82
m_opp = -24.83
m_low = 24.79
measured_pdd = 66.8
clinical_pdd = 66.8
tissue_correction = 0.99
dose_mu_10 = 0.672
dose_mu_dmax = 1.0066
class ACB5_2012_6X(TestTG51Photon, TestCase):
temperature = 21.7
pressure = 757.2
nd_w = 5.446
m_raw = 25.27
m_opp = -25.19
m_low = 25.17
measured_pdd = 66.8
clinical_pdd = 66.8
tissue_correction = 0.99
dose_mu_10 = 0.679
dose_mu_dmax = 1.0159
class ACB5_2012_18X(TestTG51Photon, TestCase):
temperature = 21.7
pressure = 757.2
nd_w = 5.446
m_raw = 30.67
m_opp = -30.65
m_low = 30.50
energy = 18
measured_pdd = 79.5
clinical_pdd = 79.7
tissue_correction = 0.99
lead_foil = None
dose_mu_10 = 0.8059
dose_mu_dmax = 1.011
class IMMCTB_15X(TestTG51Photon, TestCase):
temperature = 22.4
pressure = 748.1
nd_w = 5.394
m_raw = 14.307
m_opp = -14.323
m_low = 14.22
energy = 15
measured_pdd = 76.79
clinical_pdd = 76.7
lead_foil = '30cm'
mu = 100
dose_mu_10 = 0.769
dose_mu_dmax = 1.002
class ACB5_2011_9E(TestTG51Electron, TestCase):
temperature = 21.6
pressure = 751.9
nd_w = 5.45
m_raw = 39.79
m_opp = -39.71
m_low = 39.33
i_50 = 3.87
clinical_pdd = 100
tissue_correction = 0.99
dose_mu_dref = 0.997
dose_mu_dmax = 0.997
class ACB5_2012_16E(TestTG51Electron, TestCase):
temperature = 21.5
pressure = 758
nd_w = 5.446
m_raw = 40.71
m_opp = -40.71
m_low = 40.22
i_50 = 6.42
clinical_pdd = 99.5
k_ecal = 0.897
m_plus = 40.71
tissue_correction = 0.99
dose_mu_dref = 1.000
dose_mu_dmax = 1.005
class IMMC_TB_9E(TestTG51Electron, TestCase):
mu = 100
temperature = 22
pressure = 748.2
p_elec = 0.999
nd_w = 5.394
m_raw = 19.877
m_opp = 19.933
m_low = 19.643
i_50 = 3.55
clinical_pdd = 100
tissue_correction = 1.0
dose_mu_dref = 1.006
dose_mu_dmax = 1.006
|
"""
Name: create_semantic_images.py
Desc: Creates RGB images using texture UV maps.
"""
# Import these two first so that we can import other packages
from __future__ import division
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import io_utils
# Import remaining packages
import bpy
from bpy import context, data, ops
import bpy_extras.mesh_utils
import bmesh
import trimesh
from collections import defaultdict, Counter
import glob
import json
import math
from mathutils import Vector, Euler, Color
import numpy as np
import random
import settings
import shutil # Temporary dir
import time
import tempfile # Temporary dir
import utils
import uuid as uu
from utils import Profiler
from plyfile import *
import numpy as np
from scipy.signal import find_peaks
from create_images_utils import *
SCRIPT_DIR_ABS_PATH = os.path.dirname(os.path.realpath(__file__))
TASK_NAME = 'rgb'
utils.set_random_seed()
basepath = settings.MODEL_PATH
def main():
global basepath
global TASK_NAME
utils.delete_all_objects_in_context()
model = io_utils.import_mesh(basepath)
if settings.CREATE_PANOS:
engine='CYCLES'
else:
engine = 'BI'
# add_face_materials(engine, model)
point_infos = io_utils.load_saved_points_of_interest(basepath)
########################3
bpy.context.scene.objects.active = model
model.select = True
mat = bpy.data.materials.new('material_1')
model.active_material = mat
mat.use_vertex_color_paint = True
bpy.ops.paint.vertex_paint_toggle()
scn = bpy.context.scene
if len(bpy.context.active_object.data.materials) == 0:
bpy.context.active_object.data.materials.append(bpy.data.materials['Material'])
print("!!!! if")
else:
bpy.context.active_object.data.materials[0] = bpy.data.materials['Material']
print("!!!! else")
# scn.render.alpha_mode = 'TRANSPARENT'
# bpy.data.worlds["World"].light_settings.use_ambient_occlusion = True
#####################3333
# print("!!!!!!!!!!!!1 ", model.name)
# # model.select_set(True)
# # bpy.data.objects[model.name].select_set(True)
# bpy.ops.paint.vertex_paint_toggle()
# #bpy.context.area.ui_type = 'ShaderNodeTree'
# #bpy.ops.material.new()
# mat = bpy.data.materials.get("Material")
# print("!!!!!!!!!!!!! mar: ", mat)
# if len(bpy.context.active_object.data.materials) == 0:
# bpy.context.active_object.data.materials.append(bpy.data.materials['Material'])
# print("!!!! if")
# else:
# bpy.context.active_object.data.materials[0] = bpy.data.materials['Material']
# print("!!!! else")
# if mat:
# bpy.context.scene.use_nodes = True
# mat.node_tree.nodes.new("ShaderNodeVertexColor")
# mat.node_tree.links.new(mat.node_tree.nodes[2].outputs['Color'], mat.node_tree.nodes[1].inputs['Base Color'])
# # bpy.context.scene.render.filepath = '~/Desktop/photos/img.jpg'
# # bpy.context.scene.render.engine = 'CYCLES'
# # bpy.ops.render.render('INVOKE_DEFAULT', write_still=True)
############################
# render + save
for point_info in point_infos:
for view_number, view_dict in enumerate(point_info):
view_id = view_number if settings.CREATE_PANOS else view_dict['view_id']
setup_and_render_image(TASK_NAME, basepath,
clean_up=True,
execute_render_fn=render_rgb_img,
logger=None,
view_dict=view_dict,
view_number=view_id)
if settings.CREATE_PANOS:
break # we only want to create 1 pano per camera
def add_face_materials(engine, mesh):
"""
Read the texture from a png file, and apply it to the mesh.
Args:
model: The model in context after loading the .ply
engine: The render engine
"""
texture_image = bpy.data.images.load(os.path.join(basepath, settings.TEXTURE_FILE))
image_texture = bpy.data.textures.new('export_texture', type = 'IMAGE')
image_texture.image = texture_image
image_material = bpy.data.materials.new('TextureMaterials')
image_material.use_shadeless = True
material_texture = image_material.texture_slots.add()
material_texture.texture = image_texture
material_texture.texture_coords = 'UV'
bpy.ops.object.mode_set(mode='OBJECT')
context_obj = bpy.context.object
context_obj_data = context_obj.data
context_obj_data.materials.append(image_material)
bpy.types.SpaceView3D.show_textured_solid = True
'''
RENDER
'''
def render_rgb_img(scene, save_path):
"""
Renders an image from the POV of the camera and save it out
Args:
scene: A Blender scene that the camera will render
save_path: Where to save the image
"""
save_path_dir, img_filename = os.path.split(save_path)
with Profiler("Render") as prf:
utils.set_preset_render_settings(scene, presets=['BASE', 'NON-COLOR'])
# render_save_path = setup_scene_for_rgb_render(scene, save_path_dir)
ident = str(uu.uuid4())
ext = utils.img_format_to_ext[settings.PREFERRED_IMG_EXT.lower()]
temp_filename = "{0}0001.{1}".format(ident, ext)
render_save_path = os.path.join(save_path_dir, temp_filename)
prf.step("Setup")
print("******************* ", render_save_path, save_path)
scene.render.filepath = os.path.join(temp_filename)
bpy.ops.render.render(write_still=True)
prf.step("Render")
with Profiler("Saving") as prf:
shutil.move(render_save_path, save_path)
def setup_scene_for_rgb_render(scene, outdir):
"""
Creates the scene so that a rgb image will be saved.
Args:
scene: The scene that will be rendered
outdir: The directory to save raw renders to
Returns:
save_path: The path to which the image will be saved
"""
# Use node rendering for python control
scene.use_nodes = True
tree = scene.node_tree
links = tree.links
# Make sure there are no existing nodes
for node in tree.nodes:
tree.nodes.remove(node)
# Set up a renderlayer and plug it into our remapping layer
inp = tree.nodes.new('CompositorNodeRLayers')
if (bpy.app.version[1] >= 70): # Don't apply color transformation -- changed in Blender 2.70
scene.view_settings.view_transform = 'Raw'
scene.sequencer_colorspace_settings.name = 'Non-Color'
# Save it out
if outdir:
out = tree.nodes.new('CompositorNodeOutputFile')
ident = str(uu.uuid4())
out.file_slots[0].path = ident
out.base_path = outdir
# out.format.color_mode = 'BW'
# out.format.color_depth = settings.DEPTH_BITS_PER_CHANNEL
out.format.color_mode = 'RGB'
out.format.color_depth = settings.COLOR_BITS_PER_CHANNEL
out.format.file_format = settings.PREFERRED_IMG_EXT.upper()
links.new(inp.outputs[0], out.inputs[0])
ext = utils.img_format_to_ext[settings.PREFERRED_IMG_EXT.lower()]
temp_filename = "{0}0001.{1}".format(ident, ext)
return os.path.join(outdir, temp_filename)
else:
out = tree.nodes.new('CompositorNodeComposite')
links.new(inp.outputs[0], out.inputs[0])
return None
if __name__ == '__main__':
with Profiler("create_rgb_images.py"):
main()
|
from django.db.models import base
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib import messages
from typing import Optional
import time
from typing import Mapping
import requests
from pprint import pprint
import calendar
import datetime
import brownie
from requests.api import get
def get_latest_block():
URL = "https://api.etherscan.io/api"
HEADERS = {
'accept': 'application/json'
}
date = datetime.datetime.utcnow()
utc_time = calendar.timegm(date.utctimetuple())
QUERYSET = {
'module': 'block',
'action': 'getblocknobytime',
'timestamp': str(utc_time),
'closest': 'before',
'apikey': '4A824VS77HUTKMPJ6PUQPHIDUZPHMP2AHQ',
}
response = requests.get(URL, headers=HEADERS, params=QUERYSET)
if response.status_code == 200:
latest_block_num = response.json().get('result')
return latest_block_num
else:
pass
def get_block_info(block_num):
URL = "https://api.etherscan.io/api"
HEADERS = {
'accept': 'application/json'
}
QUERYSET = {
'module': 'proxy',
'action': 'eth_getBlockByNumber',
'tag': hex(int(block_num)),
'boolean': 'true',
'apikey': '4A824VS77HUTKMPJ6PUQPHIDUZPHMP2AHQ',
}
response = requests.get(URL, headers=HEADERS, params=QUERYSET)
if response.status_code == 200:
full_block_info = response.json()
return full_block_info
else:
pass
def get_address_tx_hist(address):
URL = "https://api.etherscan.io/api"
HEADERS = {
'accept': 'application/json'
}
latest_block = get_latest_block()
QUERYSET = {
'module': 'account',
'action': 'txlist',
'address': address,
'startblock': '0',
'endblock': latest_block,
'page': '1',
'offset': '10',
'sort': 'asc',
'apikey': '4A824VS77HUTKMPJ6PUQPHIDUZPHMP2AHQ',
}
response = requests.get(URL, headers=HEADERS, params=QUERYSET)
if response.status_code == 200:
address_tx_list_result = response.json()
if address_tx_list_result.get('message') == 'OK':
return address_tx_list_result.get('result')
else:
return None
def get_eth_price():
URL = "https://api.etherscan.io/api"
HEADERS = {
'accept': 'application/json'
}
QUERYSET = {
'module': 'stats',
'action': 'ethprice',
'apikey': '4A824VS77HUTKMPJ6PUQPHIDUZPHMP2AHQ',
}
response = requests.get(URL, headers=HEADERS, params=QUERYSET)
if response.status_code == 200:
prices = response.json()
if prices.get('message') == 'OK':
eth_usd = prices.get('result').get('ethusd')
eth_btc = prices.get('result').get('ethbtc')
return eth_usd, eth_btc
else:
return None, None
def index(request):
latest_mined_block = int(get_latest_block())
latest_mined_block_str = '{0:,d}'.format(latest_mined_block)
block_info = get_block_info(latest_mined_block).get('result')
block_tx_list = block_info.get('transactions')
for tx in block_tx_list:
tx['value'] = int(tx.get('value'), base=16)
miner = block_info.get('miner')
difficulty = '{0:,d}'.format(int(block_info.get('difficulty'), base=16))
tx_count = len(block_tx_list)
tx_list = []
eth_usd, eth_btc = get_eth_price()
address = ''
address_tx_count = 0
try:
if request.method == 'POST':
address = request.POST.get('address')
tx_list = get_address_tx_hist(address)
address_tx_count = len(tx_list)
else:
raise ValueError()
except Exception as e:
print(e)
return render(request, 'index.html', {
'title': 'ETH Blockchain Explorer',
'cover_header': 'ETH Blockchain Explorer',
'cover_body': 'Explore the ETHEREUM Blockchain with this tool, using enhanced capabilities provided by ',
'main_cta': "Let's Go!",
'home': 'Home',
'stats': 'Latest Block Statistics',
'stats_body': 'Latests mined block information and transactions list',
'block_tx_list': block_tx_list,
'searched_address': address,
'address_tx_list': tx_list,
'address_tx_count': address_tx_count,
'explorer': 'Address Explorer',
'explorer_body': 'Search by Address to see more details about its transactions. ',
'latest_mined_block': latest_mined_block_str,
'miner': miner,
'difficulty': difficulty,
'latst_block_tx_count': tx_count,
'search': 'Explore',
'address_error': 'Invalid Address. Please check it and try again.',
'eth_usd': eth_usd,
'eth_btc': eth_btc
})
def address_details(request):
pass
|
from .analytical import retarget_from_src_to_target, Retargeting, generate_joint_map
from .constrained_retargeting import retarget_from_src_to_target as retarget_from_src_to_target_constrained
from .point_cloud_retargeting import retarget_from_point_cloud_to_target
from .constants import ROCKETBOX_TO_GAME_ENGINE_MAP, ADDITIONAL_ROTATION_MAP,GAME_ENGINE_TO_ROCKETBOX_MAP, ROCKETBOX_ROOT_OFFSET
from .point_cloud_retargeting import PointCloudRetargeting
|
"""
navigating.py constants and basic functions for navigation
"""
from __future__ import absolute_import, division, print_function
import sys
import math
# Import ioflo libs
from .sixing import *
from .odicting import odict
from ..base import excepting
from .consoling import getConsole
console = getConsole()
TWOPI = 2.0 * math.pi # two times pi
DEGTORAD = math.pi / 180.0 # r = DEGTORAD * d
RADTODEG = 180.0 / math.pi # d = RADTODEG * r
def sign(x):
"""Calculates the sign of a number and returns
1 if positive
-1 if negative
0 if zero
should make it so type int or float of x is preserved in return type
"""
if x > 0.0:
return 1.0
elif x < 0.0:
return -1.0
else:
return 0.0
Sign = sign
def wrap1(angle, wrap=360):
"""
1 sided wrap of angle to interval [0, wrap]
angle and wrap may be positive or negative
If wrap == 0 just returns angle i.e. don't wrap
"""
if wrap != 0:
angle %= wrap # wrap to full circle first
return angle
def wrap2(angle, wrap = 180.0):
"""
2 sided wrap of angle to [-wrap, +wrap]
angle and wrap may be positive or negative
If wrap == 0 just returns angle ie don't wrap
Wrap2 = (2 sided one positive one negative) wrap of angle to
signed interval [-wrap, + wrap] wrap is half circle
if wrap = 0 then don't wrap
angle may be positive or negative
result is invariant to sign of wrap
Wrap preserves convention so angle can be in compass or Cartesian coordinates
Uses property of python modulo operator that implement true
clock or circular arithmetic as location on circle
distance % circumference = location
if circumference positive then locations postive sign,
magnitues increase CW (CW 0 1 2 3 ... 0)
if circumference negative then locations negative sign,
magnitudes increase CCW (CCW 0 -1 -2 -3 ... 0)
if distance positive then wrap distance CW around circle
if distance negative then wrap distance CCW around circle
No strong need for a full wrap in Python since modulo operator does that
even for negative angles
angle %= 360.0
"""
if wrap != 0.0:
angle %= wrap * 2.0 # wrap to full circle first
if abs(angle) > abs(wrap): #more than half way round
angle = (angle - wrap) % (- wrap) #wrap extra on reversed half circle
return angle
Wrap2 = wrap2
def delta(desired, actual, wrap = 180.0):
"""Calculate the short rotation for delta = desired - actual
and delta wraps around at wrap
"""
#delta = desired - actual so
#desired = actual + delta
return wrap2(angle = (desired - actual), wrap = wrap)
Delta = delta
def moveByHSD(heading = 0.0, speed = 1.0, duration = 0.0):
"""
Returns change in position after moving on heading at speed for duration
heading in compass coordinates, 0 deg is north, up, cw rotation increases
"""
deltaNorth = duration * (speed * math.cos(DEGTORAD * heading))
deltaEast = duration * (speed * math.sin(DEGTORAD * heading))
return (deltaNorth, deltaEast)
MoveByHSD = moveByHSD
def MoveToHSD(north = 0.0, east = 0.0,
heading = 0.0, speed = 1.0, duration = 0.0):
"""
Returns new position after moving on heading at speed for duration
heading in compass coordinates, 0 deg is north, up, cw rotation increases
north east order since lat lon
"""
north += duration * (speed * math.cos(DEGTORAD * heading))
east += duration * (speed * math.sin(DEGTORAD * heading))
return (north,east)
def RotateFSToNE(heading = 0.0, forward = 0.0, starboard = 0.0):
"""
rotates Forward Starboard vector to North East vector
heading in compass coordinates, 0 deg is north, up, cw rotation increases
north east order since lat lon
"""
ch = math.cos(DEGTORAD * heading)
sh = math.sin(DEGTORAD * heading)
north = ch * forward - sh * starboard
east = sh * forward + ch * starboard
return (north,east)
def RotateNEToFS(heading = 0.0, north = 0.0, east = 0.0):
"""
Rotate north east vector to Forward Starboard
heading in compass coordinates, 0 deg is north, up, cw rotation increases
north east order since lat lon
"""
ch = math.cos(DEGTORAD * heading)
sh = math.sin(DEGTORAD * heading)
forward = ch * north + sh * east
starboard = - sh * north + ch * east
return (forward,starboard)
def AlongCrossTrack(track = 0.0, north = 0.0, east = 0.0,
mag = None, heading = None):
"""
Returns as a tuple, the along and cross track components of the vector
given by (north, east) where the track is from origin to (n, e)
or by mag (magnitude) heading (degrees) if provided
track is the track course ( nav angle degrees)
a positive along track is in the foreward direction of the track
a negative along track is in the backward direction of the track
a positive cross track is to the east of the track
a negative cross track is to the west of the track
"""
if mag is not None and heading is not None:
heading = wrap2(heading)
north = mag * math.cos(DEGTORAD * heading)
east = mag * math.sin(DEGTORAD * heading)
track = wrap2(track)
#along track component
trackNorth = math.cos(DEGTORAD * track)
trackEast = math.sin(DEGTORAD * track)
A = north * trackNorth + east * trackEast
#cross track vector
crossNorth = north - A * trackNorth
crossEast = east - A * trackEast
#cross track magnitude
C = (crossNorth ** 2.0 + crossEast ** 2.0) ** 0.5
#fix sign by testing for shortest rotation of cross vector to track direction
#if z component of cross X track is positive then shortest rotation is CCW
# and cross is to the right of track
#if z component of cross x track is negative then shortest rotation is CW
# and cross is to the left of track
(x,y,z) = CrossProduct3D((crossEast, crossNorth, 0.0),
(trackEast, trackNorth,0.0))
if z < 0.0: #make C negative if to left of track
C *= -1
return (A,C)
def CrabSpeed(track = 0.0, speed = 2.0, north = 0.0, east = 0.0,
mag = None, heading = None):
"""
Returns a tuple of the compensating (crabbed) course angle (in degrees)
and the delta crab angle
and the resulting along track speed (including current and cluster).
The crabbed course is that needed to compensate for the current
given by (east, north) or mag (magnitude) heading (degrees) if provided
Where the resulting along track speed is the projection of
the compensating course at speed onto the desired course
track is the desired track course ( nav angle degrees)
speed is the cluster speed (must be non zero)
compensating course = desired course - delta crab angle
a positive crab angle means the compensating course is to the left
of the desired course.
a negative crab angle means the compensating course is to the right
of the desired course
"""
if mag is not None and heading is not None:
heading = wrap2(heading)
north = mag * math.cos(DEGTORAD * heading)
east = mag * math.sin(DEGTORAD * heading)
track = wrap2(track)
(A,C) = AlongCrossTrack(track = track, north = north, east = east)
#current compensated course crab = track + delta crab angle
delta = - RADTODEG * math.asin(C / speed)
crab = track + delta
#B = along track component of compensated course
B = speed * (math.sin(DEGTORAD * crab) * math.sin(DEGTORAD * track) +
math.cos(DEGTORAD * crab) * math.cos(DEGTORAD * track) )
return (crab, delta, B + A)
def CrossProduct3D(a,b):
"""Forms the 3 dimentional vector cross product of sequences a and b
a is crossed onto b
cartesian coordinates
returns a 3 tuple
"""
cx = a[1] * b[2] - b[1] * a[2]
cy = a[2] * b[0] - b[2] * a[0]
cz = a[0] * b[1] - b[0] * a[1]
return (cx,cy,cz)
def DotProduct(a,b):
"""Returns the N dimensional vector dot product of sequences a and b
"""
dot = 0.0
for i in range(len(a)):
dot += a[i] * b[i]
return dot
def PerpProduct2D(a,b):
"""Computes the the 2D perpendicular product of sequences a and b.
The convention is a perp b.
The product is:
positive if b is to the left of a
negative if b is to the right of a
zero if b is colinear with a
left right defined as shortest angle (< 180)
"""
return (a[0] * b[1] - a[1] * b[0])
def DistancePointToTrack2D(a,track, b):
"""Computes the signed distance between point b and the track ray defined by
point a and track azimuth track
a and b are sequences x (east) coord is index 0, y (north) coord is index 1
track in degrees from north
x = east
y = north
The distance is
positive if b is to the left of the track line
negative if b is to the right of the track line
zero if b is colinear with the track line
left right defined as shortest angle (< 180)
"""
dx = math.sin(DEGTORAD * track) #normalized vector
dy = math.cos(DEGTORAD * track) #normalized vector
return (dx * (b[1] - a[1]) - dy * (b[0] - a[0]))
def SpheroidLLLLToDNDE(a,b):
"""Computes the flat earth approx of change in north east position meters
for a change in lat lon location on spheroid.
from location lat0 lon0 to location lat1 lon1
point lat0 lon0 in total fractional degrees north east positive
point lat1, lon1 in total fractional degrees north east positive
returns tuple (dn,de) where dn is delta north and de is delta east meters
Uses WGS84 spheroid
"""
re = 6378137.0 #equitorial radius in meters
f = 1/298.257223563 #flattening
e2 = f*(2.0 - f) #eccentricity squared
def SphereLLLLToDNDE(lat0,lon0,lat1,lon1):
"""Computes the flat earth approx of change in north east position meters
for a change in lat lon location on sphere.
from location lat0 lon0 to location lat1 lon1
point lat0 lon0 in total fractional degrees north east positive
point lat1, lon1 in total fractional degrees north east positive
returns tuple (dn,de) where dn is delta north and de is delta east meters
Uses sphere 1 nm = 1 minute 1852 meters per nautical mile
"""
r = 6366710.0 #radius of earth in meters = 1852 * 60 * 180/pi
dlat = (lat1 - lat0)
dlon = (lon1 - lon0)
avlat = (lat1 + lat0)/2.0
#avlat = lat0
dn = r * dlat * DEGTORAD
de = r * dlon * DEGTORAD * math.cos( DEGTORAD * avlat)
return (dn, de)
def sphereLLByDNDEToLL(lat0, lon0, dn, de):
"""
Returns new lat lon location on sphere in total fractional
degrees north east positive
Using the flat earth approx of sphere
given relative position dn (north) meters and de (east) meters
from the given location lat0 lon0
returns tuple (lat1,lon1)
Uses sphere 1 nm = 1 minute 1852 meters per nautical mile
"""
r = 6366710.0 #radius of earth in meters = 1852 * 60 * 180/pi
dlat = dn/(r * DEGTORAD)
lat1 = lat0 + dlat
avlat = (lat1 + lat0)/2.0
try:
dlon = de / (r * DEGTORAD * math.cos(DEGTORAD * avlat))
except ZeroDivisionError:
dlon = 0.0
lon1 = lon0 + dlon
avlat = (lat1 + lat0)/2.0
return (lat1, lon1)
SphereLLByDNDEToLL = sphereLLByDNDEToLL
def SphereLLbyRBtoLL(lat0,lon0,range,bearing):
"""Computes new lat lon location on sphere
from the flat earth approx of change in range meters at bearing degrees from
from the given location lat0 lon0
point lat0 lon0 in total fractional degrees north east positive
returns tuple (lat1,lon1)
Uses sphere 1 nm = 1 minute 1852 meters per nautical mile
"""
r = 6366710.0 #radius of earth in meters = 1852 * 60 * 180/pi
dn = range * math.cos(DEGTORAD * bearing)
de = range * math.sin(DEGTORAD * bearing)
dlat = dn/(r * DEGTORAD)
lat1 = lat0 + dlat
avlat = (lat1 + lat0)/2.0
try:
dlon = de / (r * DEGTORAD * math.cos(DEGTORAD * avlat))
except ZeroDivisionError:
dlon = 0.0
lon1 = lon0 + dlon
avlat = (lat1 + lat0)/2.0
return (lat1, lon1)
def SphereLLLLToRB(lat0,lon0,lat1,lon1):
"""Computes the flat earth approx of change in range meters bearing degrees
for a change in lat lon location.
from location lat0 lon0 to location lat1 lon1
point lat0 lon0 in total fractional degrees north east positive
point lat1, lon1 in total fractional degrees north east positive
returns tuple (dn,de) where dn is delta north and de is delta east meters
Uses sphere 1 nm = 1 minute 1852 meters per nautical mile
"""
r = 6366710.0 #radius of earth in meters = 1852 * 60 * 180/pi
dlat = (lat1 - lat0)
dlon = (lon1 - lon0)
avlat = (lat1 + lat0)/2.0
#avlat = lat0
dn = r * dlat * DEGTORAD
de = r * dlon * DEGTORAD * math.cos( DEGTORAD * avlat)
range = (dn * dn + de * de) ** 0.5
bearing = RADTODEG * ((math.pi / 2.0) - math.atan2(dn,de))
return (range, bearing)
def RBToDNDE(range, bearing):
"""Computes change in north east position for an offset
of range (meters) at bearing (degrees)
returns tuple(delta north, delta East)
"""
dn = range * math.cos(DEGTORAD * bearing)
de = range * math.sin(DEGTORAD * bearing)
return (dn, de)
def DNDEToRB(dn ,de):
"""Computes relative range (meters) and bearing (degrees)for change
in position of north (meters) east (meters)
returns tuple(Range, Bearing)
"""
range = (dn * dn + de * de) ** 0.5
bearing = RADTODEG * ((math.pi / 2.0) - math.atan2(dn,de))
return (range, bearing)
def DegMinToFracDeg(latDeg, latMin, lonDeg, lonMin):
"""Converts location in separated format of Deg and Min
to combined format of total fractional degrees
lat is in signed fractional degrees positive = North negative = South
lon in in signed fractional dregrees positive = East negative = West
latDeg are in signed degrees North positive South Negative
latMin are in signed minutes North positive South Negative
lonDeg are in signed degrees East positive West Negative
lonMin are in signed minutes East positive West Negative
"""
if sign(latDeg) != sign(latMin):
latMin = - latMin
if sign(lonDeg) != sign(lonMin):
lonMin = - lonMin
lat = latDeg + (latMin / 60.0)
lon = lonDeg + (lonMin / 60.0)
return (lat, lon)
def FracDegToDegMin(lat, lon):
"""Converts location in format of total fractional degrees to
separated format of deg and minutes
lat is in signed fractional degrees positive = North negative = South
lon in in signed fractional dregrees positive = East negative = West
latDeg are in signed degrees North positive South Negative
latMin are in signed minutes North positive South Negative
lonDeg are in signed degrees East positive West Negative
lonMin are in signed minutes East positive West Negative
"""
latDeg = int(lat)
latMin = (lat - latDeg) * 60.0
lonDeg = int(lon)
lonMin = (lon - lonDeg) * 60.0
return (latDeg, latMin, lonDeg, lonMin)
def FracDegToHuman(lat, lon):
"""Converts location in format of total fractional degrees to
tuple (latDM, lonDM) of human friendly string of form
latDegXlatMin where X is N if lat positive and S if lat negative
latDeg in units of integer degrees [0 ,90]
lat Min in units of fractinal minutes [0.0, 60.0)
and
lonDegXlonMin where X is E if lon positive and W if lon negative
lonDeg in units of integer degrees [0 ,180]
lon Min in units of fractinal minutes [0.0, 60.0)
lat is in signed fractional degrees positive = North, negative = South
[-90, 90]
lon in in signed fractional dregrees positive = East, negative = West
[-180, 180]
Does not handle wrapping lat over poles or lon past halfway round
"""
latDeg, latMin, lonDeg, lonMin = FracDegToDegMin(lat, lon)
if latDeg >= 0:
latDM = "%dN%0.3f" % (latDeg, latMin)
else:
latDM = "%dS%0.3f" % (-latDeg, -latMin)
if lonDeg >= 0:
lonDM = "%dE%0.3f" % (lonDeg, lonMin)
else:
lonDM = "%dW%0.3f" % (-lonDeg, -lonMin)
return (latDM, lonDM)
def HumanLatToFracDeg(latDM):
"""Converts latDM in human friendly string of form
latDegXlatMin where X is N if lat positive and S if lat negative
latDeg in units of integer degrees [0 ,90]
lat Min in units of fractinal minutes [0.0, 60.0)
to lat in total fractional degrees
lat is in signed fractional degrees positive = North, negative = South
[-90, 90]
Does not handle wrapping lat over poles or lon past halfway round
"""
latDM = latDM.upper()
if ('N' in latDM):
(degrees,minutes) = latDM.split('N')
lat = int(degrees) + (float(minutes) / 60.0)
elif ('S' in latDM):
(degrees,minutes) = latDM.split('S')
lat = - (int(degrees) + (float(minutes) / 60.0))
else:
raise ValueError("Bad format for latitude '{0}'".format(latDM))
return (lat)
def HumanLonToFracDeg(lonDM):
"""Converts lonDM in human friendly string of form
lonDegXlonMin where X is E if lon positive and W if lon negative
lonDeg in units of integer degrees [0 ,180]
lon Min in units of fractinal minutes [0.0, 60.0)
to lon in total fractional degrees
lon in in signed fractional dregrees positive = East, negative = West
[-180, 180]
Does not handle wrapping lat over poles or lon past halfway round
"""
lonDM = lonDM.upper()
if ('E' in lonDM):
(degrees,minutes) = lonDM.split('E')
lon = int(degrees) + (float(minutes) / 60.0)
elif ('W' in lonDM):
(degrees,minutes) = lonDM.split('W')
lon = - (int(degrees) + (float(minutes) / 60.0))
else:
raise ValueError("Bad format for longitude '{0}'".format(lonDM))
return (lon)
def HumanToFracDeg(latDM, lonDM):
"""Converts pair of coordinates in human friendly strings of form DegXMin to
total fractional degrees where
the result is positive if X is N or E and
the result is negative if X is S or W
Does not handle wrapping over poles or past halfway round
"""
lat = HumanLatToFracDeg(latDM)
lon = HumanLonToFracDeg(lonDM)
return (lat,lon)
def HumanLLToFracDeg(hdm):
"""Converts a coordinate in human friendly string of form DegXMin to
total fractional degrees where
the result is positive if X is N or E and
the result is negative if X is S or W
Does not handle wrapping over poles or past halfway round
"""
dm = REO_LatLonNE.findall(hdm) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (deg + min_/60.0)
dm = REO_LatLonSW.findall(hdm) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (-(deg + min_/60.0))
raise ValueError("Bad format for lat or lon '{0}'".format(hdm))
def Midpoint(latDM0, lonDM0, latDM1, lonDM1):
"""Computes the midpoint of a trackline between
(latDM0,lonDM0) and (latDM1,lonDM1)
arguments are in human friendly degrees fractional minutes format
40N35.67 70W56.45
"""
lat0 = HumanLLToFracDeg(latDM0)
lon0 = HumanLLToFracDeg(lonDM0)
lat1 = HumanLLToFracDeg(latDM1)
lon1 = HumanLLToFracDeg(lonDM1)
dn, de = SphereLLLLToDNDE(lat0,lon0,lat1,lon1)
dn = dn/2.0 #get half the distance
de = de/2.0
lat1, lon1 = SphereLLByDNDEToLL(lat0,lon0,dn,de) #midpoint
latDM, lonDM = FracDegToHuman(lat1, lon1)
return (latDM, lonDM)
def Endpoint(latDM0, lonDM0, range, bearing):
"""Computes the endpoint track from latDM, lonDm of range at bearing
arguments are in human friendly degrees fractional minutes format
40N35.67 70W56.45
"""
lat0 = HumanLLToFracDeg(latDM0)
lon0 = HumanLLToFracDeg(lonDM0)
lat1, lon1 = SphereLLbyRBtoLL(lat0,lon0,range,bearing)
latDM1, lonDM1 = FracDegToHuman(lat1, lon1)
return (latDM1, lonDM1)
|
#!/usr/bin/env python3
"""Pre-commit hook to verify that all extras are documented in README.rst"""
import configparser
import re
from pathlib import Path
repo_dir = Path(__file__).parent.parent.parent
config = configparser.ConfigParser(strict=False)
config.read(repo_dir / "setup.cfg")
all_extra = []
extra_to_exclude = {"tests", "mypy", "docs"}
all_extras = set(config["options.extras_require"].keys()) - extra_to_exclude
readme_path = repo_dir / "README.rst"
extra_doc = """
.. list-table::
:header-rows: 1
* - Extra Name
- Installation Command
- Dependencies
"""
for extra in sorted(all_extras):
extra_doc += f"""
* - ``{extra}``
- ``pip install 'astronomer-providers[{extra}]'``
- {extra.replace(".", " ").title()}
"""
with open(readme_path, "r") as readme_file:
readme_contents = readme_file.read()
new_readme_text = re.sub(
r".. EXTRA_DOC_START([\s\S]*).. EXTRA_DOC_END",
f".. EXTRA_DOC_START{extra_doc}\n.. EXTRA_DOC_END",
readme_contents,
flags=re.MULTILINE,
)
if new_readme_text != readme_contents:
with open(readme_path, "w") as readme_file:
readme_file.write(new_readme_text)
|
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
"""Array.
Running time: O(n) where n == len(time).
"""
d = collections.defaultdict(int)
for t in time:
d[t % 60] += 1
res = 0
for k, v in d.items():
if k == 0 or k == 30:
res += (v - 1) * v // 2
else:
o = d.get(60 - k, 0)
res += o * v / 2.0
return int(res)
|
from ..utils import Object
class GetBackgroundUrl(Object):
"""
Constructs a persistent HTTP URL for a background
Attributes:
ID (:obj:`str`): ``GetBackgroundUrl``
Args:
name (:obj:`str`):
Background name
type (:class:`telegram.api.types.BackgroundType`):
Background type
Returns:
HttpUrl
Raises:
:class:`telegram.Error`
"""
ID = "getBackgroundUrl"
def __init__(self, name, type, extra=None, **kwargs):
self.extra = extra
self.name = name # str
self.type = type # BackgroundType
@staticmethod
def read(q: dict, *args) -> "GetBackgroundUrl":
name = q.get('name')
type = Object.read(q.get('type'))
return GetBackgroundUrl(name, type)
|
import pandas as pd
import numpy as np
import os
import plotly.plotly as py
import plotly.graph_objs as go
'''Trend CO2'''
def trend_co2(co2_anno):
# Create a trace
trace = go.Scatter(
x = co2_anno['Anno'],
y = co2_anno['Media CO2'],
mode = 'lines+markers',
name = 'CO2 Trend'
)
data = [trace]
# Edit the layout
layout = dict(title = 'Trend storico della concentrazione di CO2 in ppm (parti per milione)',
yaxis = dict(title = 'CO2 (ppm)')
)
annotations = []
annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.1,
xanchor='center', yanchor='top',
text='Fonte: Osservatorio Mauna Loa, Hawaii (NOAA-ESRL)',
font=dict(family='Arial',
size=12,
color='rgb(150,150,150)'),
showarrow=False))
layout['annotations'] = annotations
fig = dict(data=data, layout=layout)
return fig
'''Trend Temperatura Terrestre'''
def trend_temp(global_temp):
trace = go.Scatter(
x = global_temp['Year'],
y = global_temp['Glob_C'],
mode = 'lines'
)
data = [trace]
# Edit the layout
layout = dict(title = 'Trend storico della temperatura terrestre in gradi C°',
yaxis = dict(title = 'gradi (C°)'),
shapes=[
{
'type':'line','x0': global_temp[:1].Year.values[0],'y0': global_temp[:1].Glob_C.values[0],
'x1': global_temp[-1:].Year.values[0],'y1': global_temp[:1].Glob_C.values[0],
'line': {'color': 'rgb(178,34,34)','width': 2}
},
{
'type':'line','x0': global_temp[:1].Year.values[0],'y0': global_temp[-1:].Glob_C.values[0],
'x1': global_temp[-1:].Year.values[0],'y1': global_temp[-1:].Glob_C.values[0],
'line': {'color': 'rgb(178,34,34)','width': 2}
}
])
annotations = []
annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.1,
xanchor='center', yanchor='top',
text='Fonte: National Aeronautics and Space Administration (NASA)',
font=dict(family='Arial',
size=12,
color='rgb(150,150,150)'),
showarrow=False)
)
annotations.append(dict(x=global_temp[-1:].Year.values[0],y=global_temp[:1].Glob_C.values[0],xref='x',yref='y',
text=str(global_temp[:1].Year.values[0])+': '+str(global_temp[:1].Glob_C.values[0])+' C°',
ax=0,ay=-10)
)
annotations.append(dict(x=global_temp[-1:].Year.values[0],y=global_temp[-1:].Glob_C.values[0],xref='x',yref='y',
text=str(global_temp[-1:].Year.values[0])+': '+str(global_temp[-1:].Glob_C.values[0])+' C°',
ax=0,ay=-10)
)
layout['annotations'] = annotations
fig = dict(data=data, layout=layout)
return fig
'''% Riduzione Gas Serra'''
def perc_rid(df_all):
hover_text = []
for index, row in df_all.iterrows():
paese = 'Paese: ' + str(row['Name'])
perc_rid = '% riduzione: '+ '{:.1%}'.format(row['perc_rid']/100)
gdp = 'PIL: '+ '{:,}'.format(row['GDP'])+'M€'
pop = 'Popolazione: '+ '{:,}'.format(row['Habitant'])+'k'
hover_text.append(paese + '<br>' + perc_rid + '<br>' + gdp + '<br>' + pop)
trace0 = go.Scatter(
x=df_all['GDP'],
y=df_all['Habitant'],
mode='markers',
text = hover_text,
hoverinfo='text',
marker=dict(
color = df_all['perc_rid'],
colorbar=dict(title='% riduzione'),
colorscale='Blues',
size=df_all['perc_rid']
)
)
layout = go.Layout(
title='% di Riduzione dei Gas Serra rispetto al 1990',
xaxis=dict(
title='Prodotto Interno Lordo (PIL) in milioni di €'+'<br>'+'<i>Fonte: Eurostat</i>',
type='log',
autorange=True
),
yaxis=dict(
title='Numero di abitanti in migliaia',
type='log',
autorange=True
)
)
data = [trace0]
fig = go.Figure(data=data, layout=layout)
return fig
'''% Riduzione Gas Serra - Mappa'''
def perc_rid_map(df_all):
hover_text = []
for index, row in df_all.iterrows():
paese = 'Paese: ' + str(row['Name'])
perc_rid = '% riduzione: '+ '{:.1%}'.format(row['perc_rid']/100)
gdp = 'PIL: '+ '{:,}'.format(row['GDP'])+'M€'
pop = 'Popolazione: '+ '{:,}'.format(row['Habitant'])+'k'
hover_text.append(paese + '<br>' + perc_rid + '<br>' + gdp + '<br>' + pop)
data = [go.Choropleth(
colorscale = 'Blues',
autocolorscale = False,
locations = df_all['Name'],
z = df_all['perc_rid'],
locationmode = 'country names',
text = hover_text,
marker = go.choropleth.Marker(
line = go.choropleth.marker.Line(
color = 'rgb(255,255,255)',
width = 2
)),
colorbar = go.choropleth.ColorBar(
title = "% Riduzione")
)]
layout = go.Layout(
title = 'Mappa % di Riduzione dei Gas Serra rispetto al 1990',
geo = go.layout.Geo(
scope = 'europe',
projection = go.layout.geo.Projection(type = 'natural earth'),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
fig = go.Figure(data = data, layout = layout)
return fig
'''% Spesa Clima'''
def perc_spesa(df_all):
hover_text = []
for index, row in df_all.iterrows():
paese = 'Paese: ' + str(row['Name'])
perc_spesa = '% spesa clima: '+ '{:.1%}'.format(row['perc_spesa']/100)
gdp = 'PIL: '+ '{:,}'.format(row['GDP'])+'M€'
pop = 'Popolazione: '+ '{:,}'.format(row['Habitant'])+'k'
hover_text.append(paese + '<br>' + perc_spesa + '<br>' + gdp + '<br>' + pop)
trace0 = go.Scatter(
x=df_all['GDP'],
y=df_all['Habitant'],
mode='markers',
text = hover_text,
hoverinfo='text',
marker=dict(
color = df_all['perc_spesa'],
colorbar=dict(title='% spesa'),
colorscale='Blues',
size=df_all['perc_spesa']*100
)
)
layout = go.Layout(
title='% di Spesa per il Clima rispetto il Totale della Spesa (2016)',
xaxis=dict(
title='Prodotto Interno Lordo (PIL) in milioni di €'+'<br>'+'<i>Fonte: Eurostat</i>',
type='log',
autorange=True
),
yaxis=dict(
title='Numero di abitanti in migliaia',
type='log',
autorange=True
)
)
data = [trace0]
fig_spesa = go.Figure(data=data, layout=layout)
return fig_spesa
|
#!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Xiaohui Zhang
# Apache 2.0.
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
from collections import defaultdict
import argparse
import sys
class StrToBoolAction(argparse.Action):
""" A custom action to convert bools from shell format i.e., true/false
to python format i.e., True/False """
def __call__(self, parser, namespace, values, option_string=None):
if values == "true":
setattr(namespace, self.dest, True)
elif values == "false":
setattr(namespace, self.dest, False)
else:
raise Exception("Unknown value {0} for --{1}".format(values, self.dest))
def GetArgs():
parser = argparse.ArgumentParser(description = "Converts pronunciation statistics (from phonetic decoding or g2p) "
"into a lexicon for. We prune the pronunciations "
"based on a provided stats file, and optionally filter out entries which are present "
"in a filter lexicon.",
epilog = "e.g. steps/dict/prons_to_lexicon.py --min-prob=0.4 \\"
"--filter-lexicon=exp/tri3_lex_0.4_work/phone_decode/filter_lexicon.txt \\"
"exp/tri3_lex_0.4_work/phone_decode/prons.txt \\"
"exp/tri3_lex_0.4_work/lexicon_phone_decoding.txt"
"See steps/dict/learn_lexicon_greedy.sh for examples in detail.")
parser.add_argument("--set-sum-to-one", type = str, default = False,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the sum of "
"probabilities is 1.")
parser.add_argument("--set-max-to-one", type = str, default = True,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the max "
"probability is 1.")
parser.add_argument("--top-N", type = int, default = 0,
help = "If non-zero, we just take the top N pronunciations (according to stats/pron-probs) for each word.")
parser.add_argument("--min-prob", type = float, default = 0.1,
help = "Remove pronunciation with probabilities less "
"than this value after normalization.")
parser.add_argument("--filter-lexicon", metavar='<filter-lexicon>', type = str, default = '',
help = "Exclude entries in this filter lexicon from the output lexicon."
"each line must be <word> <phones>")
parser.add_argument("stats_file", metavar='<stats-file>', type = str,
help = "Input lexicon file containing pronunciation statistics/probs in the first column."
"each line must be <counts> <word> <phones>")
parser.add_argument("out_lexicon", metavar='<out-lexicon>', type = str,
help = "Output lexicon.")
print (' '.join(sys.argv), file = sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
if args.stats_file == "-":
args.stats_file_handle = sys.stdin
else:
args.stats_file_handle = open(args.stats_file)
if args.filter_lexicon is not '':
if args.filter_lexicon == "-":
args.filter_lexicon_handle = sys.stdout
else:
args.filter_lexicon_handle = open(args.filter_lexicon)
if args.out_lexicon == "-":
args.out_lexicon_handle = sys.stdout
else:
args.out_lexicon_handle = open(args.out_lexicon, "w")
if args.set_max_to_one == args.set_sum_to_one:
raise Exception("Cannot have both "
"set-max-to-one and set-sum-to-one as true or false.")
return args
def ReadStats(args):
lexicon = {}
word_count = {}
for line in args.stats_file_handle:
splits = line.strip().split()
if len(splits) < 3:
continue
word = splits[1]
count = float(splits[0])
phones = ' '.join(splits[2:])
lexicon[(word, phones)] = lexicon.get((word, phones), 0) + count
word_count[word] = word_count.get(word, 0) + count
return [lexicon, word_count]
def ReadLexicon(lexicon_file_handle):
lexicon = set()
if lexicon_file_handle:
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in lexicon file.')
word = splits[0]
phones = ' '.join(splits[1:])
lexicon.add((word, phones))
return lexicon
def ConvertWordCountsToProbs(args, lexicon, word_count):
word_probs = {}
for entry, count in lexicon.iteritems():
word = entry[0]
phones = entry[1]
prob = float(count) / float(word_count[word])
if word in word_probs:
word_probs[word].append((phones, prob))
else:
word_probs[word] = [(phones, prob)]
return word_probs
def ConvertWordProbsToLexicon(word_probs):
lexicon = {}
for word, entry in word_probs.iteritems():
for x in entry:
lexicon[(word, x[0])] = lexicon.get((word,x[0]), 0) + x[1]
return lexicon
def NormalizeLexicon(lexicon, set_max_to_one = True,
set_sum_to_one = False, min_prob = 0):
word_probs = {}
for entry, prob in lexicon.iteritems():
t = word_probs.get(entry[0], (0,0))
word_probs[entry[0]] = (t[0] + prob, max(t[1], prob))
for entry, prob in lexicon.iteritems():
if set_max_to_one:
prob = prob / word_probs[entry[0]][1]
elif set_sum_to_one:
prob = prob / word_probs[entry[0]][0]
if prob < min_prob:
prob = 0
lexicon[entry] = prob
def TakeTopN(lexicon, top_N):
lexicon_reshaped = defaultdict(list)
lexicon_pruned = {}
for entry, prob in lexicon.iteritems():
lexicon_reshaped[entry[0]].append([entry[1], prob])
for word in lexicon_reshaped:
prons = lexicon_reshaped[word]
sorted_prons = sorted(prons, reverse=True, key=lambda prons: prons[1])
for i in range(len(sorted_prons)):
if i >= top_N:
lexicon[(word, sorted_prons[i][0])] = 0
def WriteLexicon(args, lexicon, filter_lexicon):
words = set()
num_removed = 0
num_filtered = 0
for entry, prob in lexicon.iteritems():
if prob == 0:
num_removed += 1
continue
if entry in filter_lexicon:
num_filtered += 1
continue
words.add(entry[0])
print("{0} {1}".format(entry[0], entry[1]),
file = args.out_lexicon_handle)
print ("Before pruning, the total num. pronunciations is: {}".format(len(lexicon)), file=sys.stderr)
print ("Removed {0} pronunciations by setting min_prob {1}".format(num_removed, args.min_prob), file=sys.stderr)
print ("Filtered out {} pronunciations in the filter lexicon.".format(num_filtered), file=sys.stderr)
num_prons_from_phone_decoding = len(lexicon) - num_removed - num_filtered
print ("Num. pronunciations in the output lexicon, which solely come from phone decoding"
"is {0}. num. words is {1}".format(num_prons_from_phone_decoding, len(words)), file=sys.stderr)
def Main():
args = GetArgs()
[lexicon, word_count] = ReadStats(args)
word_probs = ConvertWordCountsToProbs(args, lexicon, word_count)
lexicon = ConvertWordProbsToLexicon(word_probs)
filter_lexicon = set()
if args.filter_lexicon is not '':
filter_lexicon = ReadLexicon(args.filter_lexicon_handle)
if args.top_N > 0:
TakeTopN(lexicon, args.top_N)
else:
NormalizeLexicon(lexicon, set_max_to_one = args.set_max_to_one,
set_sum_to_one = args.set_sum_to_one,
min_prob = args.min_prob)
WriteLexicon(args, lexicon, filter_lexicon)
args.out_lexicon_handle.close()
if __name__ == "__main__":
Main()
|
import re
from app.utils import email
def init(func):
"""Decorator, that calls function on module import"""
func()
return func
def jsonifyBlunder(data):
return {
'id': str(data['id']),
'game_id': str(data['game_id']),
'move_index': data['move_index'],
'forcedLine': data['forced_line'],
'fenBefore': data['fen_before'],
'blunderMove': data['blunder_move'],
'elo': data['elo']
}
def validateUsername(username):
if username is None:
return "Username is empty"
if len(username) < 3:
return "Username must contains at least 3 letter"
# TODO: Use precompiled regexes
usernameRegex = "^[-a-zA-Z0-9!#$%&'*+/=?^_`{|}~]+(\\.[-a-zA-Z0-9!#$%&'*+/=?^_`{|}~]+)*$"
if not re.match(usernameRegex, username):
return "Your name is too strange"
return None
def validatePassword(password):
if password is None:
return "Password is empty"
if len(password) < 5:
return "Your password must be at least 5 characters long"
return None
def validateEmail(email):
if email is None:
return "Email is empty"
if email.strip() != email or email.lower() != email:
return "Invalid email"
from app.utils.email import MXDomainValidation
if not MXDomainValidation(email):
return "Invalid email"
return None
def validateUser(username, password, email):
usernameValidation = validateUsername(username)
if usernameValidation is not None:
return {
'status': 'error',
'field': 'username',
'message': usernameValidation
}
passwordValidation = validatePassword(password)
if passwordValidation is not None:
return {
'status': 'error',
'field': 'password',
'message': passwordValidation
}
emailValidation = validateEmail(email)
if emailValidation is not None:
return {
'status': 'error',
'field': 'email',
'message': emailValidation
}
return None
def validateCode(email, validation_code):
if validation_code is None:
return {
'status': 'error',
'field': 'validation_code',
'message': 'Validation Code is empty'
}
from app.db import postgre
validation = postgre.user.validateUserGet(email=email)
if validation is None:
return {
'status': 'error',
'field': 'validation_code',
'message': 'Incorrect validation code'
}
(count_tries, stored_code, date_create, date_update) = validation
if stored_code != validation_code:
return {
'status': 'error',
'field': 'validation_code',
'message': 'Incorrect validation code'
}
return None
|
from .avm import AVM, NoAVMPresent # noqa
__version__ = "0.9.6.dev0"
|
"""
Support for foobar2000 Music Player as media player
via pyfoobar2k https://gitlab.com/ed0zer-projects/pyfoobar2k
And foobar2000 component foo_httpcontrol by oblikoamorale https://bitbucket.org/oblikoamorale/foo_httpcontrol
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.helpers import script, config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, MediaPlayerEntity)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC, SUPPORT_VOLUME_STEP, SUPPORT_TURN_ON, SUPPORT_TURN_OFF,
SUPPORT_STOP, SUPPORT_SELECT_SOURCE, SUPPORT_SEEK, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_MUTE,
SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_SHUFFLE_SET)
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, CONF_USERNAME,
CONF_PASSWORD, STATE_OFF, STATE_PAUSED, STATE_PLAYING,
CONF_TIMEOUT, STATE_UNKNOWN, STATE_IDLE)
REQUIREMENTS = ['pyfoobar2k==0.2.8']
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Foobar2000'
DEFAULT_PORT = '8888'
DEFAULT_TIMEOUT = 3
DEFAULT_VOLUME_STEP = 5
CONF_VOLUME_STEP = 'volume_step'
CONF_TURN_ON_ACTION = 'turn_on_action'
CONF_TURN_OFF_ACTION = 'turn_off_action'
SUPPORT_FOOBAR_PLAYER = \
SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PLAY | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_STOP | SUPPORT_SEEK | SUPPORT_SHUFFLE_SET | \
SUPPORT_VOLUME_STEP | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_VOLUME_STEP, default=DEFAULT_VOLUME_STEP): cv.positive_int,
vol.Optional(CONF_TURN_ON_ACTION, default=None): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_TURN_OFF_ACTION, default=None): cv.SCRIPT_SCHEMA})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Foobar Player platform."""
from pyfoobar2k import FoobarRemote
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
timeout = config.get(CONF_TIMEOUT)
volume_step = config.get(CONF_VOLUME_STEP)
turn_on_action = config.get(CONF_TURN_ON_ACTION)
turn_off_action = config.get(CONF_TURN_OFF_ACTION)
remote = FoobarRemote(host, port, username, password, timeout)
add_devices([FoobarDevice(hass, remote, name, volume_step, turn_on_action, turn_off_action)])
class FoobarDevice(MediaPlayerEntity):
def __init__(self, hass, remote, name, volume_step, turn_on_action=None, turn_off_action=None):
self._name = name
self._remote = remote
self.hass = hass
self._volume = 0.0
self._track_name = ''
self._track_artist = ''
self._track_album_name = ''
self._track_duration = 0
self._track_position = 0
self._track_position_updated_at = None
self._albumart = ''
self._current_playlist = ''
self._playlists = []
self._shuffle = 0
self._volume_step = volume_step
self._selected_source = None
self._state = STATE_UNKNOWN
self._base_url = self._remote.url
self._albumart_path = ''
# Script creation for the turn on/off config options
if turn_on_action is not None:
turn_on_action = script.Script(
self.hass, turn_on_action,
"{} turn ON script".format(self.name),
self.async_update_ha_state(True))
if turn_off_action is not None:
turn_off_action = script.Script(
self.hass, turn_off_action,
"{} turn OFF script".format(self.name),
self.async_update_ha_state(True))
self._turn_on_action = turn_on_action
self._turn_off_action = turn_off_action
def update(self):
try:
info = self._remote.state()
if info:
if info['isPlaying'] == '1':
self._state = STATE_PLAYING
elif info['isPaused'] == '1':
self._state = STATE_PAUSED
else:
self._state = STATE_IDLE
else:
self._state = STATE_OFF
self.schedule_update_ha_state()
if self._state in [STATE_PLAYING, STATE_PAUSED]:
self._track_name = info['title']
self._track_artist = info['artist']
self._track_album_name = info['album']
self._volume = int(info['volume']) / 100
self._shuffle = info['playbackorder']
self._track_duration = int(info['itemPlayingLen'])
self._albumart_path = info['albumArt']
self._track_position = int(info['itemPlayingPos'])
self._track_position_updated_at = dt_util.utcnow()
if self._state in [STATE_PLAYING, STATE_PAUSED, STATE_IDLE]:
sources_info = self._remote.playlist()
if sources_info:
current_playlist_position = int(sources_info['playlistActive'])
playlists_raw = sources_info['playlists']
self._current_playlist = playlists_raw[current_playlist_position]['name']
self._playlists = [item["name"] for item in playlists_raw]
else:
_LOGGER.warning("Updating %s sources failed:", self._name)
except Exception as e:
_LOGGER.error("Updating %s state failed: %s", self._name, e)
self._state = STATE_UNKNOWN
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return True
@property
def volume_level(self):
"""Volume level of the media player (0 to 1)."""
return float(self._volume)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing track."""
return self._track_name
@property
def media_artist(self):
"""Artist of current playing track."""
return self._track_artist
@property
def media_album_name(self):
"""Album name of current playing track."""
return self._track_album_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = SUPPORT_FOOBAR_PLAYER
if self._turn_on_action is not None:
supported_features |= SUPPORT_TURN_ON
if self._turn_off_action is not None:
supported_features |= SUPPORT_TURN_OFF
return supported_features
def turn_on(self):
"""Execute turn_on_action to turn on media player."""
if self._turn_on_action is not None:
self._turn_on_action.run(variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_on requested but turn_on_action is none")
def turn_off(self):
"""Execute turn_off_action to turn on media player."""
if self._turn_off_action is not None:
self._turn_off_action.run(variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_off requested but turn_off_action is none")
def media_play_pause(self):
"""Send the media player the command for play/pause."""
self._remote.cmd('PlayOrPause')
def media_pause(self):
"""Send the media player the command for play/pause if playing."""
if self._state == STATE_PLAYING:
self._remote.cmd('PlayOrPause')
def media_stop(self):
"""Send the media player the stop command."""
self._remote.cmd('Stop')
def media_play(self):
"""Send the media player the command to play at the current playlist."""
self._remote.cmd('Start')
def media_previous_track(self):
"""Send the media player the command for prev track."""
self._remote.cmd('StartPrevious')
def media_next_track(self):
"""Send the media player the command for next track."""
self._remote.cmd('StartNext')
def set_volume_level(self, volume):
"""Send the media player the command for setting the volume."""
self._remote.cmd('Volume', int(volume * 100))
def volume_up(self):
"""Send the media player the command for volume down."""
self._remote.cmd('VolumeDelta', self._volume_step)
def volume_down(self):
"""Send the media player the command for volume down."""
self._remote.cmd('VolumeDelta', -self._volume_step)
def mute_volume(self, mute):
"""Mute the volume."""
self._remote.cmd('VolumeMuteToggle')
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._track_position_updated_at
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._track_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._state in [STATE_PLAYING, STATE_PAUSED]:
return self._track_position
def media_seek(self, position):
"""Send the media player the command to seek in current playing media."""
self._remote.cmd('SeekSecond', position)
@property
def media_image_url(self):
"""Image url of current playing media."""
if 'cover_not_available' not in self._albumart_path:
self._albumart = '{}/{}'.format(self._base_url, self._albumart_path)
return self._albumart
@property
def source(self):
"""Return current source name."""
return self._current_playlist
@property
def source_list(self):
"""List of available input sources."""
return self._playlists
def select_source(self, source):
"""Select input source."""
playlists = self._remote.playlist()['playlists']
source_position = [index for index, item in enumerate(playlists) if item['name'] == source][0]
"""ignoring first playlist in playlists index"""
if source_position == 0:
return None
if source_position is not None:
self._remote.cmd('SwitchPlaylist', source_position)
self._remote.cmd('Start', 0)
self._current_playlist = source
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return self._current_playlist
def set_shuffle(self, shuffle):
"""Send the media player the command to enable/disable shuffle mode."""
playback_order = 4 if shuffle else 0
self._remote.cmd('PlaybackOrder', playback_order)
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return True if int(self._shuffle) == 4 else False
|
import pytest
from wemake_python_styleguide.violations.consistency import (
WrongMethodOrderViolation,
)
from wemake_python_styleguide.visitors.ast.classes import (
ClassMethodOrderVisitor,
)
correct_method_order = """
class Test(object):
def __new__(self):
...
def __init__(self):
...
def __call__(self):
...
def __await__(self):
...
def public(self):
...
def __bool__(self):
...
def public1(self):
...
def _protected(self):
...
def _mixed(self):
...
def __private(self):
...
def __private2(self):
...
"""
nested_functions = """
class Test(object):
def _protected(self):
def factory():
...
...
"""
class_template = """
class Template(object):
def {0}(self):
...
def {1}(self):
...
"""
@pytest.mark.parametrize('code', [
correct_method_order,
nested_functions,
])
def test_correct_method_order(
assert_errors,
parse_ast_tree,
default_options,
code,
mode,
):
"""Testing that correct method order is allowed."""
tree = parse_ast_tree(mode(code))
visitor = ClassMethodOrderVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize(('first', 'second'), [
('__init__', '__new__'),
('__call__', '__init__'),
('__call__', '__new__'),
('__await__', '__call__'),
('public', '__new__'),
('public', '__init__'),
('public', '__call__'),
('public', '__await__'),
('__magic__', '__new__'),
('__magic__', '__init__'),
('__magic__', '__call__'),
('__magic__', '__await__'),
('_protected', '__new__'),
('_protected', '__init__'),
('_protected', '__call__'),
('_protected', '__await__'),
('_protected', 'public'),
('_protected', '__magic__'),
('__private', '__new__'),
('__private', '__init__'),
('__private', '__call__'),
('__private', '__await__'),
('__private', 'public'),
('__private', '__magic__'),
('__private', '_protected'),
])
def test_incorrect_method_order(
assert_errors,
parse_ast_tree,
default_options,
first,
second,
mode,
):
"""Testing that incorrect method order is prohibited."""
tree = parse_ast_tree(mode(class_template.format(first, second)))
visitor = ClassMethodOrderVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [WrongMethodOrderViolation])
|
directory.functions.log_message(u'EntityEmail for Entity ' +
str(model) + u') added by: ' + request.user.username + u', value: ' +
value + u'\n')
return HttpResponse(
u'<a class="edit_rightclick" id="EntityEmail_email_' + str(email.id) + u'" href="mailto:' + value + u'">' + value + u'</a>' +
u'''<span class="edit" id="EntityEmail_new_%s">Click to add
email.</span>
<script language="JavaScript" type="text/javascript">
<!--
register_editables();
// -->
</script>''' % str(email.id))
|
import sys
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import utils
def build_network():
network = tflearn.input_data(shape=[None, 2])
network = tflearn.fully_connected(network, 64, activation='relu')
network = dropout(network, 0.9)
network = tflearn.fully_connected(network, 128, activation='relu')
network = dropout(network, 0.9)
network = tflearn.fully_connected(network, 128, activation='relu')
network = dropout(network, 0.9)
network = tflearn.fully_connected(network, 2, activation='softmax')
network = tflearn.regression(network, optimizer='sgd', learning_rate=0.1,
loss='categorical_crossentropy')
return network
def train(in_file):
xvals, yvals = utils.load_hot(in_file)
xvals, yvals = utils.randomize(xvals, yvals)
network = build_network()
model = tflearn.DNN(network)
model.fit(xvals, yvals, n_epoch=400, validation_set=0.2, show_metric=True)
model.save('complicated.tflearn')
def predict(in_file, out_file):
xvals, yvals = utils.load_hot(in_file)
network = build_network()
predictions = utils.predict_hot(xvals, network, 'complicated.tflearn')
print('Accuracy: {}%'.format(utils.get_accuracy_hot(yvals, predictions)))
utils.write_predictions(xvals, predictions, out_file)
def main(argv):
if argv[1] == 'train':
train(argv[2])
elif argv[1] == 'predict':
predict(argv[2], argv[3])
else:
raise ValueError('Unknown operation {}'.format(argv[1]))
if __name__ == "__main__":
main(sys.argv)
|
from discord.ext import commands
from ..events.utils import Utils
class Reload(commands.Cog):
"""Reload Command Class"""
def __init__(self, bot):
self.bot = bot
self.desc = "A command that reloads commands 🧠"
self.usage = "reload [cogs.category.command]"
@commands.command(name='reload', aliases=['rl'])
@commands.is_owner()
async def reload(self, ctx, command=None):
if not command:
embed = await Utils(self.bot).embed(ctx, title="Please specify a command to reload.",
description="", color=0xDE6246)
return await ctx.send(embed=embed)
try:
self.bot.unload_extension(command)
self.bot.load_extension(command)
except Exception as error:
error_handler = await Utils(self.bot).error(ctx, str(error))
return await ctx.send(embed=error_handler)
else:
embed = await Utils(self.bot).embed(ctx, title=f"The ``{command.split('.')[2]}`` command has been reloaded.",
description="", color=ctx.author.color)
return await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Reload(bot))
|
# coding=utf8
# Copyright 1999-2017 Alibaba Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import env
from component import constants
def get_task_file(task_file_name, data_dir=None):
if not data_dir:
data_dir = env.data_dir
if task_file_name:
return data_dir + task_file_name
def get_service_file(task_file_name, data_dir=None):
if task_file_name:
return get_task_file(task_file_name, data_dir) + ".service"
def get_http_path(task_file_name):
if task_file_name:
return constants.PEER_HTTP_PATH_PREFIX + task_file_name
def get_task_name(file_path):
index = file_path.rfind(".service")
if index != -1:
return file_path[:index]
return file_path
def create_item(task_id, node, dst_cid='', piece_range='',
result=constants.RESULT_INVALID, status=constants.TASK_STATUS_RUNNING,
piece_cont=[]):
return {'dstCid': dst_cid, 'range': piece_range, 'result': result, 'taskId': task_id,
'superNode': node, 'status': status, "pieceCont": piece_cont}
def get_local_rate(piece_task):
if env.local_limit:
return env.local_limit
return int(piece_task["downLink"]) * 1024
|
#!/usr/bin/env python3
import statistics
# read numbers one line each, then calculate mean
nums = []
while True:
try:
n = float(input())
nums.append(n)
except EOFError:
break
print(statistics.mean(nums))
|
import os
from collections import defaultdict
from unittest.mock import Mock
import pytest
from qtpy.QtWidgets import QAction, QShortcut
from napari import Viewer
from napari._qt.qt_event_loop import _ipython_has_eventloop, run, set_app_id
@pytest.mark.skipif(os.name != "Windows", reason="Windows specific")
def test_windows_grouping_overwrite(qapp):
import ctypes
def get_app_id():
mem = ctypes.POINTER(ctypes.c_wchar)()
ctypes.windll.shell32.GetCurrentProcessExplicitAppUserModelID(
ctypes.byref(mem)
)
res = ctypes.wstring_at(mem)
ctypes.windll.Ole32.CoTaskMemFree(mem)
return res
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("test_text")
assert get_app_id() == "test_text"
set_app_id("custom_string")
assert get_app_id() == "custom_string"
set_app_id("")
assert get_app_id() == ""
def test_run_outside_ipython(qapp, monkeypatch):
"""Test that we don't incorrectly give ipython the event loop."""
assert not _ipython_has_eventloop()
v1 = Viewer(show=False)
assert not _ipython_has_eventloop()
v2 = Viewer(show=False)
assert not _ipython_has_eventloop()
with monkeypatch.context() as m:
mock_exec = Mock()
m.setattr(qapp, 'exec_', mock_exec)
run()
mock_exec.assert_called_once()
v1.close()
v2.close()
def test_shortcut_collision(make_napari_viewer):
viewer = make_napari_viewer()
defined_shortcuts = defaultdict(list)
problematic_shortcuts = []
shortcuts = viewer.window._qt_window.findChildren(QShortcut)
for shortcut in shortcuts:
key = shortcut.key().toString()
if key == "Ctrl+M":
# menubar toggle support
# https://github.com/napari/napari/pull/3204
continue
if key and key in defined_shortcuts:
problematic_shortcuts.append(key)
defined_shortcuts[key].append(key)
actions = viewer.window._qt_window.findChildren(QAction)
for action in actions:
key = action.shortcut().toString()
if key and key in defined_shortcuts:
problematic_shortcuts.append(key)
defined_shortcuts[key].append(key)
assert not problematic_shortcuts
|
def aumentar(num, porcentagem):
"""
-> Calcula o valor acrescido de uma determinada porcentagem
:param num: numero que será acrescido da porcentagem
:param porcentagem: valor da porcentagem a ser calculada
:return: o resultado do cálculo
"""
resultado = num + (num * (porcentagem / 100))
return resultado
def diminuir(num, porcentagem):
"""
-> Calcula o valor reduzido de uma determinada porcentagem
:param num: numero que será acrescido da porcentagem
:param porcentagem: valor da porcentagem a ser calculada
:return: o resultado do cálculo
"""
resultado = num - (num * (porcentagem / 100))
return resultado
def dobro(num):
"""
-> Calcula o dobro de um determinado número
:param num: número alvo da multiplicação por 2
:return: o resultado
"""
resultado = num * 2
return resultado
def metade(num):
"""
-> Calcula a metade de um determinado número
:param num: número alvo da divisão por 2
:return: o resultado
"""
resultado = num / 2
return resultado
def formatacao(num: float, moeda: str = 'R$'):
string_num = f'{moeda}{num:.2f}'.replace('.', ',')
return string_num
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sheet
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .x_sheet_filter_descriptor import XSheetFilterDescriptor as XSheetFilterDescriptor_47cc0ff7
class XSheetFilterable(XInterface_8f010a43):
"""
represents something that can be filtered using an XSheetFilterDescriptor.
See Also:
`API XSheetFilterable <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1sheet_1_1XSheetFilterable.html>`_
"""
__ooo_ns__: str = 'com.sun.star.sheet'
__ooo_full_ns__: str = 'com.sun.star.sheet.XSheetFilterable'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.sheet.XSheetFilterable'
@abstractmethod
def createFilterDescriptor(self, bEmpty: bool) -> 'XSheetFilterDescriptor_47cc0ff7':
"""
creates a filter descriptor.
"""
@abstractmethod
def filter(self, xDescriptor: 'XSheetFilterDescriptor_47cc0ff7') -> None:
"""
performs a filter operation, using the settings of the passed filter descriptor.
"""
__all__ = ['XSheetFilterable']
|
# import sys
# import os
# sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from aiohttp import web
from motor import motor_asyncio as ma
from parser.parser import iteration
from parser import settings
from parser.views import routes
class PostsApiTest(AioHTTPTestCase):
async def get_application(self):
app = web.Application()
settings.DB_NAME = 'tests_parser'
app.add_routes(routes)
app.client = ma.AsyncIOMotorClient(settings.DB_CLIENT)
app.client.drop_database(settings.DB_NAME)
app.db = app.client[settings.DB_NAME]
await app.db.posts.insert_one({'title': '1 test', 'url': 'http://test.com/1', 'created': 1580151926})
await app.db.posts.insert_one({'title': '2 test', 'url': 'http://test.com/2', 'created': 1580171926})
return app
@unittest_run_loop
async def test_posts_get(self):
resp = await self.client.request("GET", "/posts")
assert resp.status == 200
resp = await resp.json()
assert len(resp) is 2
@unittest_run_loop
async def test_posts_limit(self):
resp = await self.client.request("GET", "/posts?limit=1")
assert resp.status == 200
resp = await resp.json()
assert len(resp) is 1
@unittest_run_loop
async def test_posts_ordering(self):
resp = await self.client.request("GET", "/posts?limit=1&order=title")
assert resp.status == 200
resp = await resp.json()
assert '1 test' in resp[0].get('title')
resp = await self.client.request("GET", "/posts?limit=1&order=-title")
assert resp.status == 200
resp = await resp.json()
assert '2 test' in resp[0].get('title')
@unittest_run_loop
async def test_parser(self):
collection = self.app.db[settings.DB_NAME].test_posts
count = await collection.count_documents({})
assert count is 0
await iteration(collection)
count = await collection.count_documents({})
assert count is 30
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
import torch
TrainingMode = torch.onnx.TrainingMode
from packaging.version import Version
def torch_onnx_export(
model,
args,
f,
export_params=True,
verbose=False,
training=TrainingMode.EVAL,
input_names=None,
output_names=None,
operator_export_type=None,
opset_version=None,
_retain_param_name=None,
do_constant_folding=True,
example_outputs=None,
strip_doc_string=None,
dynamic_axes=None,
keep_initializers_as_inputs=None,
custom_opsets=None,
enable_onnx_checker=None,
use_external_data_format=None,
export_modules_as_functions=False):
if Version(torch.__version__) >= Version("1.11.0"):
torch.onnx.export(
model=model,
args=args,
f=f,
export_params=export_params,
verbose=verbose,
training=training,
input_names=input_names,
output_names=output_names,
operator_export_type=operator_export_type,
opset_version=opset_version,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
keep_initializers_as_inputs=keep_initializers_as_inputs,
custom_opsets=custom_opsets,
export_modules_as_functions=export_modules_as_functions)
else:
torch.onnx.export(
model=model,
args=args,
f=f,
export_params=export_params,
verbose=verbose,
training=training,
input_names=input_names,
output_names=output_names,
operator_export_type=operator_export_type,
opset_version=opset_version,
_retain_param_name=_retain_param_name,
do_constant_folding=do_constant_folding,
example_outputs=example_outputs,
strip_doc_string=strip_doc_string,
dynamic_axes=dynamic_axes,
keep_initializers_as_inputs=keep_initializers_as_inputs,
custom_opsets=custom_opsets,
enable_onnx_checker=enable_onnx_checker,
use_external_data_format=use_external_data_format)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the Audit log library for Pony Mail codename Foal
It manages viewing and editing the audit log.
"""
import plugins.session
import typing
import time
class AuditLogEntry:
_keys: tuple = (
"id",
"date",
"action",
"remote",
"author",
"target",
"lid",
"log",
)
def __init__(self, doc):
for key in self._keys:
if key in doc:
setattr(self, key, doc[key])
async def view(
session: plugins.session.SessionObject, page: int = 0, num_entries: int = 50, raw: bool = False, filter: typing.Tuple = ()
) -> typing.AsyncGenerator:
""" Returns N entries from the audit log, paginated """
assert session.database, "No database connection could be found!"
if not filter:
res = await session.database.search(
index=session.database.dbs.db_auditlog, size=num_entries, from_=page * num_entries, sort="date:desc",
)
else:
res = await session.database.search(
index=session.database.dbs.db_auditlog, size=num_entries, from_=page * num_entries, sort="date:desc",
body={
"query": {"bool": {"must": [{"terms": {"action": filter}}]}}
},
)
for doc in res["hits"]["hits"]:
doc["_source"]["id"] = doc["_id"]
if raw:
yield doc["_source"]
else:
yield AuditLogEntry(doc["_source"])
async def add_entry(session: plugins.session.SessionObject, action: str, target: str, lid: str, log: str) -> None:
""" Adds an entry to the audit log"""
# Default log entries based on type
if not log and action == "delete":
log = f"Removed email {target} from {lid} archives"
if not log and action == "edit":
log = f"Modified email {target} from {lid} archives"
assert session.credentials, "No session credentials could be found!"
assert session.database, "Session not connected to database!"
await session.database.index(
index=session.database.dbs.db_auditlog,
body={
"date": time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(time.time())),
"action": action,
"remote": session.remote,
"author": f"{session.credentials.uid}@{session.credentials.oauth_provider}",
"target": target,
"lid": lid,
"log": log,
},
)
|
import random
template = """[Fact]
public void Test_Func{n}()
{{
Func<{types}, object> f = new EmptyFunc<{types}, object>();
Assert.Equal(default(object), f({args}));
}}
"""
def gen(n: int):
types = ", ".join("int" for _ in range(n))
args = ", ".join(hex(random.randint(0, 1 << 16)) for _ in range(n))
return template.format(n=n, types=types, args=args)
for i in range(1, 16):
print(gen(i))
|
import os
from setuptools import setup
import sys
# pip workaround
os.chdir(os.path.abspath(os.path.dirname(__file__)))
packages = []
for rootdir, dirs, files in os.walk('mldebugger'):
if '__init__.py' in files:
packages.append(rootdir.replace('\\', '.').replace('/', '.'))
req = ['numpy',
'zmq',
'certifi>=2017.4.17',
'Pillow',
'image',
'scikit-learn==0.19.0',
'sklearn==0.0',
'vistrails==2.2.4',
'nose==1.3.7',
'pandas==0.24.0',
'scipy == 0.19.1',
'Django == 1.11.29']
if sys.version_info < (2, 7):
req.append('argparse')
setup(name='MLDebugger',
version='0.1',
packages=['mldebugger'],
entry_points={
'console_scripts': [
'mldebugger = mldebugger.run:main',
'worker = mldebugger.workers.vistrails_worker',
'python_worker = mldebugger.workers.python_worker']},
install_requires=req,
description="MlDebugger library",
author="Raoni Lourenco",
author_email='raoni@nyu.edu',
maintainer='Raoni Lourenco',
maintainer_email='raoni@nyu.edu',
keywords=['Machine Learning Pipelines',
'Provenance',
'Heuristic Algorithms',
'Debugging',
'Combinatorial Design',
'Parameter Exploration'])
|
from magic_repr import make_repr
from .side import Side
from ermaket.utils.xml import XMLObject
__all__ = ['Relation']
class Relation(XMLObject):
def __init__(self, name, sides):
self.name = name
self.sides = sides
@property
def _tag_name(self):
return 'relation'
@classmethod
def make(cls, name, *sides):
return cls(name, [Side(*args) for args in sides])
@classmethod
def _from_xml(cls, tag):
return cls._make_args(
tag.find('name').text,
[Side.from_xml(t) for t in tag.find_all('side')]
)
def __len__(self):
return len(self.sides)
def invert(self):
assert len(self.sides) == 2
return Relation(self.name, self.sides[::-1])
def to_xml(self):
tag = self.soup.new_tag(self._tag_name)
tag.append(self.new_tag('name', self.name))
[tag.append(s.to_xml()) for s in self.sides]
return tag
Relation.__repr__ = make_repr('name', 'sides')
|
valores = list()
while True:
num = int(input('Informe um numero: '))
if num not in valores:
valores.append(num)
print('O numero Foi adicionado!!!')
else:
print('Este valor já tem!')
resp = ' '
while resp not in 'SN':
resp = str(input('Quer acrescentar mais numeros? [S/N] ')).upper().strip()[0]
if resp == 'N':
break
valores.sort()
print(f'Os valores digitados foram {valores}.')
|
from flask import Flask, request, redirect
import requests
import json
CLIENT_ID = 'f1e42d14f45491f9ca34'
CLIENT_SECRET = ''
OAUTH_STATES = []
app = Flask(__name__)
@app.route('/auth_state')
def auth_state():
# save anti csrf secret
secret = request.args.get('state')
OAUTH_STATES.append(secret)
@app.route('/callback')
def callback():
code = request.args.get('code')
state = request.args.get('state')
data = {'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'code': code,
'state': state}
resp = requests.post('https://github.com/login/oauth/access_token',
data=data,
headers={'Accept': 'application/json'})
oauth_resp = resp.json()
route = json.loads(state)['route']
return redirect('http://127.0.0.1:3000/?route={}&gh_access_token={}'.format(route, oauth_resp['access_token']))
if __name__ == '__main__':
app.run(port=1235)
|
"""
CAT12 segmentation interface utilities.
"""
from django_mri.analysis.interfaces.matlab.spm.cat12.utils.batch_templates import \
CAT12_TEMPLATES # noqa: E501
|
# @Author : Wang Xiaoqiang
# @GitHub : https://github.com/rzjing
# @File : scheduler.py
# @Time : 2020/1/2 23:01
from datetime import datetime
from apscheduler.jobstores.base import JobLookupError
from flask_restful import Resource
from app import v1, scheduler, make_response, parser
from app.common.utils import say
_parser = parser.copy()
_parser.add_argument('id', type=str, location='args', help='job.id')
_parser.add_argument('trigger', type=str, default='once', choices=['once', 'interval', 'cron'], location='form')
_parser.add_argument('jobstore', type=str, default='default', choices=['default', 'redis'], location=['args', 'form'])
class Scheduler(Resource):
def __init__(self):
self.error = None
self.args = _parser.parse_args()
def get(self):
if self.args['id']:
job = scheduler.get_job(self.args['id'])
return make_response(job={'id': job.id, 'next_run_time': job.next_run_time, 'args': job.args})
else:
jobs = scheduler.get_jobs()
return make_response(
jobs=[{'id': job.id, 'next_run_time': job.next_run_time, 'args': job.args} for job in jobs]
)
def post(self):
trigger, jobstore = self.args['trigger'], self.args['jobstore']
if trigger == 'once':
job = scheduler.add_job(say, 'once', next_run_time=datetime.now(), params=[f'{trigger} -> add_job'])
elif trigger == 'interval':
job = scheduler.add_job(say, 'interval', seconds=3, params=[f'{trigger} -> add_job'], jobstore=jobstore)
else:
job = scheduler.add_job(say, 'cron', minute='*/2', params=[f'{trigger} -> add_job'], jobstore=jobstore)
return make_response(job={'id': job.id, 'next_run_time': job.next_run_time, 'args': job.args})
def delete(self):
try:
if self.args['id']:
scheduler.del_job(self.args['id'])
else:
scheduler.del_jobs()
except JobLookupError as e:
return make_response(code=400, error=e.args[0])
return make_response(info='deleted')
v1.add_resource(Scheduler, '/scheduler')
if __name__ == '__main__':
pass
|
# 给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。
# 方法: DP
# 状态方程:
# 当字符串长度大于 2 时: dp[i,j] = dp[i+1,j-1] & (S[i] == S[j])
# 当字符串长度等于 1 时: dp[i,j] = true
# 当字符串长度等于 2 时: dp[i,j] = (S[i] == S[j])
def longestPalindromeDp(s):
n = len(s)
dp = [[False] * n for _ in range(n)]
ans = ''
for l in range(n):
for i in range(n):
j = i + l
if j >= n:
break
if l == 0:
dp[i][j] = True
elif l == 1:
dp[i][j] = (s[i] == s[j])
else:
dp[i][j] = dp[i + 1][j - 1] and (s[i] == s[j])
if dp[i][j] and l + 1 > len(ans):
ans = s[i:j + 1]
return ans
|
import psycopg2 as psy
import dash
import dash_core_components as dcc
import dash_html_components as html
import os
import glob
import pandas as pd
import numpy as np
from dash.dependencies import Input, Output, State
from faker import Factory
import plotly.offline as py
import plotly.graph_objs as go
from IPython.display import Image
import sys
import logging
import dash_bootstrap_components as dbc
import base64
from datetime import datetime
now = datetime.now()
#Até aqui apenas declaramos as bibliotecas, pode ter algumas que foram importadas mas não esta sendo utilizada, pois foi sendo descartada
#com o tempo
# Agora iremos criar os dicionarios, que sera utilizado para codificar e descodificar as chaves utilizadas no banco de dados
dict_mudar = {
'evd' : 'evadidos',
'ing' : 'ingressantes'
}
dict_ano = {
0: 2013,
1: 2014,
2: 2015,
3: 2016,
4: 2017
}
dict_raca= { 0:'Não quis declarar',
1: 'Branca',
2: 'Preta',
3: 'Parda',
4: 'Amarela',
5: 'Indígena',
6: 'Não dispõe',
9: 'Não dispõe'
}
dict_ies= { 0:'nd',
1: 'FPB',
2: 'Nassau JP',
3: 'Nassau CG',
4: 'IFPB',
5: 'UFCG',
6: 'UFPB'
}
dict_order_axis_x= { 'Quantidade integralizada':['(0 - 500)','(500 - 1000)', '(1000 - 2500)', '(3000>)'], #Coloca a ordem que ficara no eixo X BARRA
'Por idade': ['(15-21)', '(21-25)', '(25-35)', '(35>)'],
'Por raca' : [''],
'Por sexo': [''],
'Por deficiencia': [''],
'Por quantidade': [2013,2014,2015,2016,2017],
'Atividade Extracurricular': [''],
'Vagas Reservadas' : [''],
'Para outra Ies': ['']
}
dict_radio = { 'evd' : 'fato_evasao',
'ing' : 'fato_ingresso'
}
dict_no = { 'FPB':1,
'NASSAU JP':2,
'NASSAU CG':3,
'IFPB':4 ,
'UFCG':5 ,
'UFPB':6
}
dict_sexo={0: 'Masculino',
1: 'Feminino',
2: 'Masculino'
}
dict_deficiencia= { 0:'Não',
1: 'Sim',
2: 'Não dispõe',
9: 'Não dispõe'
}
dict_qt_integ= {None:None,
'Não possui informação' : 'Não possui informação',
'(0 - 500)' : '(0 - 500)',
'(500 - 1000)' : '(500 - 1000)',
'(1000 - 2500)' : '(1000 - 2500)',
'(2500 - 3000)': '(2500 - 3000)',
'(3000>)' : '(3000>)'
}
dict_gamb={a:a for a in range(0,4000)}
dict_idade= {
'(15-21)':'(15-21)',
'(21-25)':'(21-25)',
'(25-35)':'(25-35)',
'(35>)':'(35>)'
}
dict_atividade= { 0: 'Não Possui',
1: 'Possui'
}
dict_reserva = {0:'Não reversado',
1: 'Reservado'
}
dict_dicionario={ 'Quantidade integralizada' : dict_qt_integ, #Qual dicionario vai ser usado para decodificar os valores
'Por raca' : dict_raca,
'Por sexo': dict_sexo,
'Por deficiencia': dict_deficiencia,
'Por idade' : dict_idade,
'Por quantidade': dict_gamb,
'Atividade Extracurricular' : dict_atividade,
'Vagas Reservadas': dict_reserva,
'Para outra Ies': dict_ies
}
dict_coluna= {'Quantidade integralizada' : 'qt_carga_horaria_integ', # Diz qual eh a coluna no banco de dados que vai corresponder a pergunta
'Por raca' : 'tp_cor_raca',
'Por sexo': 'tp_sexo',
'Por deficiencia': 'tp_deficiencia',
'Por idade' : 'idade',
'Por quantidade': 'censo',
'Atividade Extracurricular': 'in_atividade_extracurricular',
'Vagas Reservadas': 'in_reserva_vagas',
'Para outra Ies': 'sk_ies'
}
dict_eixo_x= {'Quantidade integralizada' : 'Quantidade integralizada', # Diz o que vai aparecer no eixo X de barra
'Por raca' : 'Raça',
'Por sexo': 'Sexo',
'Por deficiencia': 'Deficiencia',
'Por idade' : 'Idade',
'Por quantidade': 'Anos',
'Atividade Extracurricular' : 'Atividade Extracurricular',
'Vagas Reservadas': 'Vagas Reservadas',
'Para outra Ies': 'IES - Elétrica'
}
dict_eixo_y= {'Quantidade integralizada' : 'Quantidade de alunos ', # Diz o que vai aparecer no eixo Y de barra
'Por raca' : 'Quantidade de alunos ',
'Por sexo': 'Quantidade de alunos ',
'Por deficiencia': 'Quantidade de alunos ',
'Por idade' : 'Quantidade de alunos ',
'Por quantidade': 'Quantidade de alunos ',
'Atividade Extracurricular' : 'Quantidade de alunos',
'Vagas Reservadas': 'Quantidade de alunos',
'Para outra Ies': 'Quantidade de alunos'
}
# Termina aqui os dicionarios
mydb=psy.connect (
host='ec2-54-235-100-99.compute-1.amazonaws.com',
user = 'mhxcrjdnckxtbr',
password='041d51b54231eb4e36b2a8d58f5ae16bc5cfaab2303d426676580f62e52ebcc1',
database='d9k1k422mp16r5')
#mydb=psy.connect (
#host='localhost',
#user = 'ODG_adapt',
#password='observatorio',
#database='ODG_adapt')
mycursor=mydb.cursor()
#Aqui decalaramos o que ficara dentros dos botoes
Ies=['FPB','NASSAU JP', 'NASSAU CG', 'IFPB', 'UFCG', 'UFPB']
alo=dbc.themes.BOOTSTRAP
anos = [2013,2014,2015,2016,2017]
external_stylesheets = ['https://codepen.io/g4b1b13l/pen/VwwrYdL.css']
evadidos=['Quantidade integralizada',
'Por raca',
'Por sexo',
'Por deficiencia',
'Por idade',
'Por quantidade',
'Atividade Extracurricular',
'Para outra Ies'
]
ingressante=[
'Por raca',
'Por sexo',
'Por deficiencia',
'Por idade',
'Por quantidade',
'Vagas Reservadas']
dict_mudar_list = { #é para saber o que tem dentro de cada lista quando olhado as fatos.
'evd' : evadidos,
'ing' : ingressante
}
formato=['Pizza', 'Barra', 'Barra - Stacked']
#termina aqui os parametros dos botoes
#Comeca entao o codigo
app = dash.Dash(__name__, external_stylesheets=external_stylesheets,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
])
server = app.server
app.title = 'Plataforma_ODG'
app.layout = html.Div([
html.Div(
[html.H1(children= 'ODG - Observatório de Dados da Graduação'),
]
,
style={
'font-size': '5pt',
#'height': '75px',
'margin': '-10px -10px -10px',
'background-color': '#ADD8E6',
'text-align': 'center',
#'border-radius': '2px',
#'display': 'flex',
#'margin-left': '0',
}
),
dbc.Button('Menu', outline=True, id='menu_lateral'),
dcc.Checklist(
id='check',
options=[
{'label': '', 'value': 'ativado'},
],
value=['d'],
labelStyle={'display': 'none'}
),
html.Div([
html.Div([
html.Div([
html.Div([
html.Div([
html.Label('Comparar com dois graficos?',style={'padding-top': '30px'}),
dcc.RadioItems(
options=[
{'label': 'Sim', 'value': 'sim'},
{'label': 'Não', 'value': 'nao'},
],
id='dois_graphs',
value='nao',
),
],),
html.Div([
html.Label('Qual grafico deseja configurar?'),
dcc.Dropdown(
options=[
{'label': 'Gráfico 1, da esquerda', 'value': 'g1'},
{'label': 'Gráfico 2, da direita', 'value': 'g2'},
],
id='config_graph',
value='g1',
multi=False,
searchable=False,
#className='two columns',
#style={'display': 'block',
#'z-index': '1',
#}
),
],
id='configa_graph'
#className='two columns',
),
],
#className='row',
),
dcc.RadioItems(
options=[
#{'label': 'None', 'value': 'none'},
{'label': 'Evasão', 'value': 'evd'},
{'label': 'Ingresso', 'value': 'ing'}
],
id='escolher',
value='evd',
labelStyle={'display': 'inline-block',
'margin-top':'30px'}
),
html.Div([
html.Label('Censo'),
html.Div([
dcc.RangeSlider(
min=0,
max=4.2,
marks={
0: '2013',
1: '2014',
2: '2015',
3: '2016',
4: '2017'
},
value=[0,4],
id='variavel',
step=1,
#count= 1,
),
],
#id='variavel',
style = {'margin-top': '5px',
'margin-bottom': '25px',
'margin-left':'10px'},),
#html.Label('Censo'),
#dcc.Dropdown(
# id = 'variavel',
# options=[
# {'label': j, 'value': j} for j in anos
# ],
# multi=True
#),
html.Div([
html.Label('Tipo do Grafico'),
dcc.Dropdown(
id = 'pi_ou_bar',
options=[
{'label': fo, 'value': fo} for fo in formato
],
value='',
multi=False,
searchable=False,
placeholder='Selecione o tipo do gráfico'
)],
),
html.Div([
html.Label('Ies'),
dcc.Dropdown(
id = 'ies',
options=[
{'label': a, 'value': a} for a in Ies
],
multi=True,
searchable=False,
value= '',
placeholder='Selecione a Ies desejada',
)],
id='tudo_ies'
),
html.Div([
html.Label(children='Alunos evadidos por',
id='trocar_nome_da_pergunta'),
dcc.Dropdown(
id = 'tipo',
value='',
options=[
{'label': a, 'value': a} for a in evadidos
],
multi=False,
searchable=False,
placeholder='Selecione a pergunta',
)],
style = {'display': 'block'},
id='evasao'
),
],
id='dropdowns',
style = {'display': 'block'}
)
],
style={
#'margin-top': '60px',
#'display': 'inline-block'
#'background-color': '#add8e6',
} ,
id='tudin',
#className='two columns',
),
],
id='testando',
style={
#'left': '-300px',
'margin-left': '15px'},
className='barra'
),
html.Div(
[dcc.Graph(id = 'feature-graphic'),
]
,
id='class',
style={'display': 'inline-block',
'position': 'absolute',
'z-index': '2'
},
className='twelve columns',
),
#dcc.Graph(id = 'feature-graphic'),
html.Div(
[dcc.Graph(id='grafico-dois',),
],
style={'margin-left': '100px',
},
className='five columns'
),
html.Div(
[dcc.Graph(id='grafico-tres',
),
],
className='five columns'
),
],
className='row'
),
html.Div([
html.Div([
html.H5('Deixe sua sugestão de pergunta abaixo: '),
dbc.Form(
[
dbc.FormGroup(
[
dbc.Label("Email *", className="mr-2"),
dbc.Input(id='email',type="email", placeholder="Ex.: odg@gmail.com"),
],
className="mr-3",
),
dbc.FormGroup(
[
dbc.Label("Sugestão *", className="mr-2"),
dbc.Input(id='sugestao',type="text", placeholder="Ex.: Quantos alunos..."),
],
className="mr-3",
),
dbc.FormGroup(
[
dbc.Label("Nome", className="mr-2"),
dbc.Input(id='nome',type="text", placeholder="Ex.: João da Silva"),
],
className="mr-3",
),
html.Div([
dbc.Button("Enviar",id='submit-button', color="primary")],
style = {
'margin-top': '5px',
#'height' : '60%',
#'margin-left': '150px',
#'margin-bottom': '60px'}
}),
],
inline=False,
),
],
className='two columns',
),
html.Div([
dbc.Alert(
[
#html.H2("Quadro de mensagens"
# ),
dbc.Alert(
#html.H2(
children="Campo obrigatório não informado!",
# style= {'color': '#ffffff'},
# ),
id='alerta_vermelho',
is_open=False,
#duration=6000,
dismissable = True,
style = {
'background-color': '#ff0000',
'font-size': '18px',
'color': '#ffffff'
#'height' : '60%',
#'margin-left': '150px',
#'margin-bottom': '60px'}
}),
dbc.Alert(
#html.H2(
children="Mensagem enviada com sucesso, tentaremos um retorno o mais breve possivel!",
# style= {'color': '#ffffff'},
# ),
#'Testando este espaço',
id='alerta_verde',
is_open=False,
#duration=6000,
dismissable = True,
style = {
'background-color': '#77DD77',
'font-size': '18px',
'color': '#ffffff'
#'height' : '60%',
#'margin-left': '150px',
#'margin-bottom': '60px'}
}),
dbc.Alert(
#html.H2("Dado indisponível para este censo!",
# style= {'color': '#ffffff'},
# ),
children="Dado indisponível para este censo, campo valido apenas para censo de 2017 ou superior!",
#'Testando este espaço',
id='alerta_censo',
is_open=False,
#duration=6000,
dismissable = True,
style = {
'font-size': '18px',
'background-color': '#ff0000',
'color': '#ffffff'
#'height' : '60%',
#'margin-left': '150px',
#'margin-bottom': '60px'}
}),
html.Hr(),
]
)
,],
className='eight columns',
style = {
#'background-color': '#ff0000',
#'height' : '60%',
'margin-left': '150px',
'margin-bottom': '60px'}
)
],
className='row'
),
html.Div(id='output'),
],className='row',
style={'width': '100%',
'background-color': '#ffffff',
#'height' : '60%',
'display': 'inline-block'}
)
app.suppress_callback_exceptions=True
#esta funcao abaixo esta incompleta e precisa ser melhor trabalhada
@app.callback(
dash.dependencies.Output('alerta_censo', 'is_open'),
[dash.dependencies.Input('ies', 'value'),
dash.dependencies.Input('variavel', 'value')])
def mostra_alerta_quando_dado_errado(ies,censo):
#if radio=='evd':
if ies=='FPB' and censo < 2017 :
return True
else:
return False
###
@app.callback(
dash.dependencies.Output('tipo', 'options'),
[dash.dependencies.Input('escolher', 'value')])
def Muda_os_parametros_da_caixinha_da_pergunta(radio):
#if radio=='evd':
if radio=='ing':
return [{'label': i, 'value': i} for i in ingressante]
else:
return [{'label': i, 'value': i} for i in evadidos]
@app.callback(
dash.dependencies.Output('trocar_nome_da_pergunta', 'children'),
[dash.dependencies.Input('escolher', 'value')])
def muda_nome_da_pergunta_label(radio):
if radio == 'ing':
return 'Alunos ingressantes por'
if radio == 'evd':
return 'Alunos evadidos por'
@app.callback(
dash.dependencies.Output('class', 'style'),
[dash.dependencies.Input('dois_graphs', 'value')])
def some_o_grafico_para_aparecer_os_outros(valor):
if valor == 'sim':
return {'display': 'none'}
if valor == 'nao':
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output('grafico-tres', 'style'),
[dash.dependencies.Input('dois_graphs', 'value')])
def aparece_o_grafico_da_direita(valor):
if valor == 'sim':
return {'display': 'block'}
if valor == 'nao':
return {'display': 'none'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output('grafico-dois', 'style'),
[dash.dependencies.Input('dois_graphs', 'value')])
def aparece_o_grafico_da_esquerda(valor):
if valor == 'sim':
return {'display': 'block'}
if valor == 'nao':
return {'display': 'none'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output('configa_graph', 'style'),
[dash.dependencies.Input('dois_graphs', 'value')])
def aparece_caixinha_do_grafico_que_deseja_configurar_quando_clica_em_sim(toggle_value):
if toggle_value == 'sim':
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output('dropdowns', 'style'),
[dash.dependencies.Input('escolher', 'value')])
def aparece_ou_some_as_caixinhas(toggle_value):
if toggle_value == 'none':
return {'display': 'none'}
else:
return {'display': 'block'}
# comecando daqui eh apenas configurando as caixinhas de sugestao
@app.callback(
Output(component_id='nome', component_property='value'),
[Input('submit-button', 'n_clicks'),
],
state=[State(component_id='sugestao', component_property='value'),
State(component_id='email', component_property='value')])
def update(am, sugestao, email):
if(am):
if (sugestao and email):
return ''
return valor
@app.callback(
Output(component_id='email', component_property='value'),
[Input('submit-button', 'n_clicks'),
],
state=[State(component_id='sugestao', component_property='value'),
State(component_id='email', component_property='value')])
def update(am, sugestao, email):
if(am):
if (sugestao and email):
return ''
return valor
@app.callback(
Output(component_id='sugestao', component_property='value'),
[Input('submit-button', 'n_clicks')],
state=[State(component_id='sugestao', component_property='value'),
State(component_id='email', component_property='value')])
def update(ama,sugestao, email):
if(ama):
if (sugestao and email):
return ''
return valor
#termina aqui as sugestoes
#aqui eh a configuracao para mostrar os alertas, esta em adamento ainda
@app.callback(Output(component_id='alerta_vermelho', component_property='is_open'),
[Input('submit-button', 'n_clicks'),
],
state=[State(component_id='sugestao', component_property='value'),
State(component_id='email', component_property='value')]
)
def retornando_falso(n_clicks,sugestao,email):
if n_clicks:
if (sugestao and email):
return False
if n_clicks:
return True
#termina aqui
#Aqui é o commit da sugestao no banco de dados
@app.callback(
Output(component_id='alerta_verde', component_property='is_open'),
[Input('submit-button', 'n_clicks'),],
state=[State(component_id='sugestao', component_property='value'),
State(component_id='email', component_property='value'),
State(component_id='nome', component_property='value')])
def update_output_div(n_clicks, input_value, emailzin,nome):
if(input_value and emailzin):
mycursor.execute('''
INSERT INTO sugestoes (nome, email, sugestao,dia,mes,ano)
VALUES
(%s,%s,%s,%s,%s,%s)
''',(nome,emailzin, input_value,now.day,now.month,now.year))
mydb.commit()
return True
def forma_classes_qt_integ(x):
if x==None:
return None
else:
if x>=0.0 and x <500.0:
return '(0 - 500)'
if x>=500.0 and x <1000.0:
return '(500 - 1000)'
if x>=1000.0 and x <2500.0:
return '(1000 - 2500)'
if x>=2500.0 and x <3000.0:
return '(2500 - 3000)'
if x>=3000.0:
return '(3000>)'
def forma_classes_idade(x):
if x==None:
return None
else:
if x>=15 and x <21:
return '(15-21)'
if x>=21 and x <25:
return '(21-25)'
if x>=25 and x <35:
return '(25-35)'
if x>=35:
return '(35>)'
def cria_sql(pergunta,variavel,mycursor,mudar):
if pergunta == 'Por sexo':
return '''SELECT C.TP_SEXO, P.sk_ies
FROM dim_aluno AS C
JOIN ''' + dict_radio[mudar] + ''' AS P ON C.sk_aluno = P.sk_aluno
WHERE P.CENSO in %s '''
if pergunta == 'Por raca':
return '''SELECT C.TP_COR_RACA, P.sk_ies
FROM dim_aluno AS C
JOIN ''' + dict_radio[mudar] + ''' AS P ON C.sk_aluno = P.sk_aluno
WHERE P.CENSO in %s
'''
if pergunta == 'Quantidade integralizada':
return '''SELECT P.qt_carga_horaria_integ, P.sk_ies
FROM ''' + dict_radio[mudar] + ''' as P
WHERE P.Censo in %s'''
if pergunta == 'Por deficiencia':
return '''SELECT C.TP_DEFICIENCIA, P.sk_ies
FROM dim_aluno AS C
JOIN ''' + dict_radio[mudar] + ''' AS P ON C.sk_aluno = P.sk_aluno
WHERE P.CENSO in %s
'''
if pergunta == 'Por idade':
return '''SELECT P.sk_ies, (2017 - C.nu_ano_nascimento) as idade
FROM dim_aluno AS C
JOIN ''' + dict_radio[mudar] + ''' AS P ON C.sk_aluno = P.sk_aluno
WHERE P.CENSO in %s'''
if pergunta == 'Por quantidade':
return '''SELECT sk_ies,censo
FROM ''' + dict_radio[mudar] + '''
WHERE CENSO in %s'''
if pergunta == 'Atividade Extracurricular':
return '''SELECT sk_ies,in_atividade_extracurricular
FROM ''' + dict_radio[mudar] + '''
WHERE CENSO in %s'''
if pergunta == 'Vagas Reservadas':
return '''SELECT sk_ies,in_reserva_vagas
FROM ''' + dict_radio[mudar] + '''
WHERE CENSO in %s'''
if pergunta == 'Para outra Ies':
return '''SELECT sk_ies,censo,sk_aluno,dt_ingresso_curso
FROM ''' + dict_radio[mudar] + '''
WHERE CENSO in %s'''
def cria_sql_para_ingressantes(pergunta,variavel,mycursor,mudar):
return '''SELECT sk_ies,censo,sk_aluno,dt_ingresso_curso
FROM fato_ingresso
WHERE sk_aluno in %s'''
def tipo_graph(variavel,pergunta,no_ies,tipo,buttons,mudar):
#if variavel == 'Todos':
# variavel=(2013,2014,2015,2016,2017)
#else:
# variavel=(variavel,0)
variavel=[dict_ano[x] for x in variavel]
variavel=tuple(variavel)
variavel= [a+1 for a in range(variavel[0] - 1,variavel[1])]
variavel=tuple(variavel)
fig=go.Figure()
if variavel == () or pergunta not in dict_mudar_list[mudar]:
return fig
mycursor=mydb.cursor()
sql=cria_sql(pergunta, variavel,mycursor,mudar)
mycursor.execute(sql,(variavel,))
myresult= mycursor.fetchall()
colnames = [desc[0] for desc in mycursor.description]
df = pd.DataFrame(data=myresult, columns=colnames )
#print(df["qt_carga_horaria_integ"],flush=True)
if(pergunta=='Quantidade integralizada'):
df['qt_carga_horaria_integ']=df["qt_carga_horaria_integ"].apply(lambda x: forma_classes_qt_integ(x))
if(pergunta=='Por idade'):
df['idade']=df["idade"].apply(lambda x: forma_classes_idade(x))
#print(df['qt_carga_horaria_integ'],flush=True)
dicionario=dict_dicionario[pergunta]
# print('oi',flush=True)
if(tipo=='Pizza'):
xa =[dict_no[ies] for ies in no_ies]
#if(no_ies == 'Todos'):
# x=[dict_no[x] for x in ['FPB','NASSAU JP', 'NASSAU CG', 'IFPB', 'UFCG', 'UFPB']]
# flag=df.isin({'sk_ies' : x})
# b=df[(flag.sk_ies)]
#else:
#x = dict_no[no_ies]
#a=df['sk_ies']==x
#b=df[a]
flag=df.isin({'sk_ies' : xa})
b=df[(flag.sk_ies)]
if(pergunta == 'Para outra Ies'):
dict_aluno = {sk_aluno:data_ingresso for sk_aluno, data_ingresso in zip(list(b['sk_aluno']),list(b['dt_ingresso_curso']))}
dict_ies_flag = {sk_aluno:no_ies for sk_aluno, no_ies in zip(list(b['sk_aluno']),list(b['sk_ies']))}
alunos=tuple(b.sk_aluno)
dt_ingresso=tuple(b.dt_ingresso_curso)
sql=cria_sql_para_ingressantes(pergunta, variavel,mycursor,mudar)
mycursor.execute(sql,(alunos,))
myresult= mycursor.fetchall()
colnames = [desc[0] for desc in mycursor.description]
df = pd.DataFrame(data=myresult, columns=colnames )
for index, row in df.iterrows():
flag=row['sk_aluno']
data_aluno = row['dt_ingresso_curso']
data_aluno_2 = dict_aluno[flag]
if row['sk_ies'] == dict_ies_flag[flag]:
if data_aluno.year <= data_aluno_2.year:
df.drop(index,inplace=True)
else:
if data_aluno <= data_aluno_2:
df.drop(index,inplace=True)
#for index, row in df.iterrows():
# a=row['sk_aluno']
# if row['dt_ingresso_curso'] <= dict_aluno[a] :
# df.drop(index,inplace=True)
b=df
#xa =[dict_no[ies] for ies in no_ies]
#flag=temp.isin({'CO_UF_CURSO' : CO_UF, 'NO_CURSO' : NO_CURSO })
#temp=temp[(flag.CO_UF_CURSO) & (flag.NO_CURSO)]
#a=df['sk_ies']==x
#b=df[a]
classes_mais_votadas = b[dict_coluna[pergunta]].value_counts()
fig.add_trace((go.Pie(labels = [dicionario[x] for x in classes_mais_votadas.index],
values = classes_mais_votadas.values,
marker = {
'line' : {'color':'#000000','width':2}
},
hoverinfo='label+percent+value',
direction='clockwise'
)))
fig.update_layout(title={'text': 'Gráfico de ' + '<b>' + dict_mudar[mudar] + '</b>' + ' para análise por ' + dict_eixo_x[pergunta].lower(),
'xanchor': 'center',
'x': 0.5}
)
fig.layout.update(
updatemenus=[
go.layout.Updatemenu(
active=1,
buttons=list(buttons),)])
if (tipo.count("Barra")):
xa =[dict_no[ies] for ies in no_ies]
for x in xa:
a=df['sk_ies']==x
b=df[a]
if(pergunta == 'Para outra Ies'):
dict_aluno = {sk_aluno:data_ingresso for sk_aluno, data_ingresso in zip(list(b['sk_aluno']),list(b['dt_ingresso_curso']))}
dict_ies_flag = {sk_aluno:no_ies for sk_aluno, no_ies in zip(list(b['sk_aluno']),list(b['sk_ies']))}
alunos=tuple(b.sk_aluno)
dt_ingresso=tuple(b.dt_ingresso_curso)
sql=cria_sql_para_ingressantes(pergunta, variavel,mycursor,mudar)
mycursor.execute(sql,(alunos,))
myresult= mycursor.fetchall()
colnames = [desc[0] for desc in mycursor.description]
df = pd.DataFrame(data=myresult, columns=colnames )
for index, row in df.iterrows():
flag=row['sk_aluno']
data_aluno = row['dt_ingresso_curso']
data_aluno_2 = dict_aluno[flag]
if row['sk_ies'] == dict_ies_flag[flag]:
if data_aluno.year <= data_aluno_2.year:
df.drop(index,inplace=True)
else:
if data_aluno <= data_aluno_2:
df.drop(index,inplace=True)
#for index, row in df.iterrows():
# a=row['sk_aluno']
# if row['dt_ingresso_curso'] <= dict_aluno[a] :
# df.drop(index,inplace=True)
b=df
#xa =[dict_no[ies] for ies in no_ies]
flagzao=b[dict_coluna[pergunta]]
#print([dicionario[x] for x in flagzao],flush=True)
fig.add_trace((go.Histogram(
hoverinfo='y',
x=[dicionario[x] for x in flagzao],
name = dict_ies[x],
visible=True,
opacity = 0.8)))
#fig.layout.update(
#updatemenus=[
#go.layout.Updatemenu(
#active=1,
#buttons=list(buttons),)])
fig.update_layout(title={'text': 'Gráfico de ' + '<b>' + dict_mudar[mudar] + '</b>' + ' para análise por ' + dict_eixo_x[pergunta].lower(),
'xanchor': 'center',
'x': 0.5} ,
xaxis={'title': dict_eixo_x[pergunta],
'categoryarray':dict_order_axis_x[pergunta],
'type' : "category"},
yaxis={'title': dict_eixo_y[pergunta] + dict_mudar[mudar] },)
if tipo == 'Barra - Stacked':
fig.update_layout(barmode='stack')
fig.update_layout(
bargap=0.2,
bargroupgap=0.1
)
return go.Figure(fig)
@app.callback(Output('feature-graphic', 'figure'),
[Input('variavel', 'value'),
Input('tipo','value'),
Input('ies', 'value'),
Input('pi_ou_bar', 'value'),
Input(component_id='escolher', component_property='value'),
Input('dois_graphs', 'value')
],
#state=[State(component_id='escolher', component_property='value'),
#],
)
def update_graph(variavel,tipo,ies,forma, mudar,dois_graf):
fake = Factory.create()
fig=go.Figure()
trace=[]
buttons=[]
if(dois_graf=='nao'):
if(mudar):
if(tipo):
return tipo_graph(variavel,tipo,ies,forma,buttons,mudar)
else:
return fig
else:
return fig
#return testando(variavel,tipo,ies,forma)
#return go.Figure(fig)
@app.callback(Output('grafico-dois', 'figure'),
[Input('variavel', 'value'),
Input('tipo','value'),
Input('ies', 'value'),
Input('pi_ou_bar', 'value'),
Input(component_id='escolher', component_property='value'),
Input('dois_graphs', 'value'),
Input('config_graph', 'value')
],
#state=[State(component_id='escolher', component_property='value'),
#],
)
def grafico_um(variavel,tipo,ies,forma, mudar,dois_graf,config_graf):
fake = Factory.create()
fig=go.Figure()
trace=[]
buttons=[]
if(dois_graf == 'sim'):
if(config_graf=='g1'):
if(mudar):
if(tipo):
return tipo_graph(variavel,tipo,ies,forma,buttons,mudar)
else:
return fig
else:
return fig
else:
return valor
else:
return fig
@app.callback(Output('grafico-tres', 'figure'),
[Input('variavel', 'value'),
Input('tipo','value'),
Input('ies', 'value'),
Input('pi_ou_bar', 'value'),
Input(component_id='escolher', component_property='value'),
Input('dois_graphs', 'value'),
Input('config_graph', 'value')
],
#state=[State(component_id='escolher', component_property='value'),
#],
)
def grafico_dois(variavel,tipo,ies,forma, mudar,dois_graf,config_graf):
fake = Factory.create()
fig=go.Figure()
trace=[]
buttons=[]
if(dois_graf == 'sim'):
if(config_graf=='g2'):
if(mudar):
if(tipo):
return tipo_graph(variavel,tipo,ies,forma,buttons,mudar)
else:
return fig
else:
return fig
else:
return valor
else:
return fig
@app.callback(
dash.dependencies.Output('testando', 'style'),
[dash.dependencies.Input('check', 'value')])
def Muda_os_parametros_da_caixinha_da_pergunta(check):
#print(check,flush=True)
#if radio=='evd':
#if 'ativado' in check:
if 'ativado' in check:
return {'left':'0'}
else:
return {'left':'-350px'}
@app.callback(
dash.dependencies.Output('check', 'value'),
[dash.dependencies.Input('menu_lateral', 'n_clicks')],
[dash.dependencies.State('check', 'value')] )
def clica_ou_nao_check(click,a):
#print(check,flush=True)
#if radio=='evd':
#if 'ativado' in check:
if click:
if 'ativado' in a:
return ['']
else:
return ['ativado']
return ['']
if(__name__ == '__main__'):
app.run_server(debug=True,port=8093)
|
from fabric.api import task, env
env.is_python3 = True
env.project_name = '{{ project_name }}'
env.repository = 'git@bitbucket.org:bnzk/{project_name}.git'.format(**env)
env.sites = ('{{ project_name }}', )
env.is_postgresql = True # False for mysql! only used for put/get_db
env.needs_main_nginx_files = True
env.is_supervisord = True
env.is_nginx_gunicorn = True
env.is_uwsgi = False
env.is_apache = False
env.remote_ref = 'origin/main'
# these will be checked for changes
env.requirements_files = [
'requirements/deploy.txt',
'requirements/deploy.in',
'requirements/basic.in',
]
# this is used with pip install -r
env.requirements_file = env.requirements_files[0]
# ==============================================================================
# Tasks which set up deployment environments
# ==============================================================================
@task
def live():
"""
Use the live deployment environment.
"""
env.env_prefix = 'live'
env.deploy_crontab = True
env.main_user = '{project_name}'.format(**env)
server = '{main_user}@s20.wservices.ch'.format(**env)
env.roledefs = {
'web': [server],
'db': [server],
}
generic_env_settings()
@task
def stage():
"""
Use the sandbox deployment environment on xy.bnzk.ch.
"""
env.env_prefix = 'stage'
env.deploy_crontab = False
env.main_user = 'bnzk-stage'.format(**env)
server = '{main_user}@s20.wservices.ch'.format(**env)
env.roledefs = {
'web': [server],
'db': [server],
}
generic_env_settings()
def generic_env_settings():
if not getattr(env, 'deploy_crontab', None):
env.deploy_crontab = False
env.project_dir = '/home/{main_user}/sites/{project_name}-{env_prefix}'.format(**env)
env.virtualenv_dir = '{project_dir}/virtualenv'.format(**env)
env.gunicorn_restart_command = '~/init/{site}-{env_prefix}.sh restart'
env.gunicorn_stop_command = '~/init/{site}-{env_prefix}.sh stop'
env.nginx_restart_command = '~/init/nginx.sh restart'
# not needed with uwsgi emporer mode, cp is enough
# env.uwsgi_restart_command = 'touch $HOME/uwsgi.d/{site}-{env_prefix}.ini'
env.project_conf = 'project.settings._{project_name}_{env_prefix}'.format(**env)
stage()
|
from scrapy.cmdline import execute
if __name__ == "__main__":
execute(['scrapy','crawl', 'old_house'])
|
"""
Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
Licensed under the NVIDIA Source Code License. See LICENSE at the main github page.
Authors: Seung Wook Kim, Jonah Philion, Antonio Torralba, Sanja Fidler
"""
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import numpy as np
import utils
import sys
sys.path.append('..')
import torch.nn.utils.spectral_norm as SN
from simulator_model.model_utils import View
from simulator_model import model_utils
from simulator_model import layers
import functools
from torch.nn import init
import random
class DiscriminatorSingleLatent(nn.Module):
def __init__(self, opts):
super(DiscriminatorSingleLatent, self).__init__()
self.opts = opts
dim = opts.nfilterD * 16
self.which_linear = functools.partial(layers.SNLinear,
num_svs=1, num_itrs=1,
eps=1e-12)
sinput_dim = opts.latent_z_size
l = [self.which_linear(sinput_dim, dim)]
l.append(nn.BatchNorm1d(dim)),
l.append(nn.LeakyReLU(0.2))
num_layers = 3
if utils.check_arg(opts, 'D_num_base_layer'):
num_layers = opts.D_num_base_layer
for _ in range(num_layers):
l.append(self.which_linear(dim, dim))
l.append(nn.BatchNorm1d(dim))
l.append(nn.LeakyReLU(0.2))
self.base = nn.Sequential(*l)
self.d_final = nn.Sequential(self.which_linear(dim, dim),
nn.BatchNorm1d(dim),
nn.LeakyReLU(0.2),
self.which_linear(dim, 1))
def forward(self, x):
h = self.base(x)
return self.d_final(h), h
class Discriminator(nn.Module):
def __init__(self, opts, nfilter=32, nfilter_max=1024):
super(Discriminator, self).__init__()
self.opts = opts
self.disentangle_style = utils.check_arg(self.opts, 'disentangle_style')
self.separate_holistic_style_dim = self.opts.separate_holistic_style_dim
f_size = 4
self.ds = DiscriminatorSingleLatent(opts)
conv3d_dim = opts.nfilterD_temp * 16
self.temporal_window = self.opts.config_temporal
self.conv3d, self.conv3d_final = \
model_utils.choose_netD_temporal(
self.opts, conv3d_dim, window=self.temporal_window
)
self.conv3d = nn.ModuleList(self.conv3d)
self.conv3d_final = nn.ModuleList(self.conv3d_final)
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=f_size, padding=0,
num_svs=1, num_itrs=1,
eps=1e-12)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=1, num_itrs=1,
eps=1e-12)
# For action discriminator
self.trans_conv = self.which_linear(opts.nfilterD*16*2, opts.nfilterD*16)
self.to_transition_feature = nn.Sequential(self.trans_conv,
nn.LeakyReLU(0.2),
View((-1, opts.nfilterD*16)))
action_space = 10 if not utils.check_arg(self.opts, 'action_space') else self.opts.action_space
self.action_to_feat = nn.Linear(action_space, opts.nfilterD*16)
self.reconstruct_action_z = self.which_linear(opts.nfilterD*16, action_space) # 4, 1, 0),
def forward(self, images, actions, states, warm_up, neg_actions=None, epoch=0):
dout = {}
neg_action_predictions, rev_predictions, content_predictions = None, None, []
neg_content_predictions, action_predictions = None, None
batch_size = actions[0].size(0)
if warm_up == 0:
warm_up = 1 # even if warm_up is 0, the first screen is from GT
gt_states = torch.cat(states[:warm_up], dim=0)
single_frame_predictions_all, tmp_features = self.ds(torch.cat([gt_states, images], dim=0))
single_frame_predictions_all = single_frame_predictions_all[warm_up * batch_size:]
frame_features = tmp_features[warm_up*batch_size:]
next_features = frame_features
# action discriminator
prev_frames = torch.cat([tmp_features[:warm_up*batch_size],
tmp_features[(warm_up+warm_up-1)*batch_size:-batch_size]], dim=0)
if self.opts.input_detach:
prev_frames = prev_frames.detach()
transition_features = self.to_transition_feature(torch.cat([prev_frames, next_features], dim=1))
action_features = self.action_to_feat(torch.cat(actions[:-1], dim=0))
if neg_actions is not None:
neg_action_features = self.action_to_feat(torch.cat(neg_actions[:-1], dim=0))
action_recon = self.reconstruct_action_z(transition_features)
new_l = []
temporal_predictions = []
stacked = torch.cat([action_features, transition_features], dim=1)
stacked = stacked.view(len(actions)-1, batch_size, -1).permute(1,0,2)
stacked = stacked.permute(0, 2, 1)
if neg_actions is not None:
neg_stacked = torch.cat([neg_action_features, transition_features], dim=1)
neg_stacked = neg_stacked.view(len(actions) - 1, batch_size, -1).permute(1, 0, 2)
neg_stacked = neg_stacked.permute(0, 2, 1)
if self.opts.do_latent:
neg_stacked = neg_stacked.unsqueeze(-1)
neg_content_predictions = []
aa = self.conv3d[0](neg_stacked)
a_out = self.conv3d_final[0](aa)
neg_content_predictions.append(a_out.view(batch_size, -1))
if self.temporal_window >= 12:
bb = self.conv3d[1](aa)
b_out = self.conv3d_final[1](bb)
neg_content_predictions.append(b_out.view(batch_size, -1))
if self.temporal_window >= 18:
cc = self.conv3d[2](bb)
c_out = self.conv3d_final[2](cc)
neg_content_predictions.append(c_out.view(batch_size, -1))
if self.temporal_window >= 30:
dd = self.conv3d[3](cc)
d_out = self.conv3d_final[3](dd)
neg_content_predictions.append(d_out.view(batch_size, -1))
stacked = stacked.unsqueeze(-1)
aa = self.conv3d[0](stacked)
a_out = self.conv3d_final[0](aa)
temporal_predictions.append(a_out.view(batch_size, -1))
if self.temporal_window >= 12:
bb = self.conv3d[1](aa)
b_out = self.conv3d_final[1](bb)
temporal_predictions.append(b_out.view(batch_size, -1))
if self.temporal_window >= 18:
cc = self.conv3d[2](bb)
c_out = self.conv3d_final[2](cc)
temporal_predictions.append(c_out.view(batch_size, -1))
if self.temporal_window >= 36:
dd = self.conv3d[3](cc)
d_out = self.conv3d_final[3](dd)
temporal_predictions.append(d_out.view(batch_size, -1))
dout['disc_features'] = frame_features[:(len(states)-1)*batch_size]
dout['action_predictions'] = action_predictions
dout['single_frame_predictions_all'] = single_frame_predictions_all
dout['content_predictions'] = temporal_predictions
dout['neg_action_predictions'] = neg_action_predictions
dout['neg_content_predictions'] = neg_content_predictions
dout['action_recon'] = action_recon
return dout
def update_opts(self, opts):
self.opts = opts
return
|
from django.conf.urls import url, include
from apps.base.views import index, informacion
from django.urls import include, path
app_name = "base";
urlpatterns = [
url(r'^$', index,name='index'),
url(r'^info$',informacion,name='informacion')
]
|
__author__ = "Frédéric BISSON"
__copyright__ = "Copyright 2022, Frédéric BISSON"
__credits__ = ["Frédéric BISSON"]
__license__ = "mit"
__maintainer__ = "Frédéric BISSON"
__email__ = "zigazou@protonmail.com"
from .PDFToken import PDFToken
class PDFName(PDFToken):
"""A PDF name (starting with /)"""
def __init__(self, name):
"""Create a new PDFName.
It sets the name of the PDFName to the name parameter.
The string must not be empty.
It can get its name from:
- another PDFName
- a byte string
- a unicode string which must use only ASCII characters
:param name: name to set.
:type name: PDFName or str or bytes
"""
assert type(name) in [PDFName, str, bytes]
if type(name) == PDFName:
self.name = name.name
elif type(name) == str:
assert len(name) > 0
self.name = name.encode('ascii')
else:
assert len(name) > 0
self.name = name
def __hash__(self) -> int:
"""Hash of a PDFName
The hash of a PDFName is simply the hash of its byte string name.
:return: The hash
:rtype: int
"""
return hash(self.name)
def __eq__(self, other):
"""Equality operator for PDFName.
A PDFName is:
- equal to any other PDFName with the same byte string
- equal to any byte string with the same byte string
- different from any other PDFToken subclass
Comparing a PDFName with anything else is not implemented.
:param other: The object to compare to our current object
:type other: any
:return: True or False or NotImplemented
:type: bool
"""
if type(other) == PDFName:
return self.name == other.name
elif type(other) == bytes:
return self.name == other
elif isinstance(other, PDFToken):
return False
else:
return NotImplemented
def __str__(self) -> str:
return self.name.decode('ascii')
def __bool__(self):
"""A PDFName is True if its name contains bytes (this should always
happen)."""
return self.name != None and len(self.name) > 0
def pretty(self) -> str:
return self._pretty("Name(%s)" % (self.name.decode('ascii'),))
def encode(self) -> bytes:
return b"/" + self.name
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import tempfile
from collections import defaultdict
from google.cloud import datastore
from group_defender.constants import CHAT, FILE_TYPES
from group_defender.store import datastore_client as client
def update_stats(chat_id, counts):
key = client.key(CHAT, chat_id)
with client.transaction():
chat = client.get(key)
if chat is None:
chat = datastore.Entity(key)
for file_type in counts:
if file_type in chat:
chat[file_type] += counts[file_type]
else:
chat[file_type] = 1
client.put(chat)
def get_stats(update, _):
query = client.query(kind=CHAT)
counts = defaultdict(int)
total = 0
for chat in query.fetch():
if chat.key.id > 0:
counts["num_users"] += 1
else:
counts["num_groups"] += 1
for file_type in FILE_TYPES:
if file_type in chat:
counts[file_type] += chat[file_type]
total += chat[file_type]
update.effective_message.reply_text(
f'Number of users: {counts["num_users"]}\nNumber of groups: {counts["num_groups"]}\n'
f"Total processed: {total}"
)
send_plot(update, counts)
def send_plot(update, counts):
nums = [counts[x] for x in FILE_TYPES]
x_pos = list(range(len(FILE_TYPES)))
plt.rcdefaults()
_, ax = plt.subplots()
ax.bar(x_pos, nums, align="center")
ax.set_xticks(x_pos)
ax.set_xticklabels(FILE_TYPES, rotation=45)
ax.set_xlabel("File Types")
ax.set_ylabel("Counts")
plt.tight_layout()
with tempfile.NamedTemporaryFile(suffix=".png") as tf:
plt.savefig(tf.name)
update.effective_message.reply_photo(open(tf.name, "rb"))
|
# -*- coding: utf-8 -*-
from nose.tools import * # noqa: F403
import jwe
import jwt
import mock
import furl
import time
from future.moves.urllib.parse import urlparse, urljoin
import datetime
from django.utils import timezone
import pytest
import pytz
import itsdangerous
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from addons.osfstorage.models import OsfStorageFile
from api_tests import utils as api_test_utils
from framework.auth import Auth
from framework.celery_tasks import handlers
from framework.postcommit_tasks.handlers import enqueue_postcommit_task, get_task_from_postcommit_queue
from framework.exceptions import PermissionsError
from website import settings, mails
from website.preprints.tasks import format_preprint, update_preprint_share, on_preprint_updated, update_or_create_preprint_identifiers, update_or_enqueue_on_preprint_updated
from website.project.views.contributor import find_preprint_provider
from website.identifiers.clients import CrossRefClient, ECSArXivCrossRefClient, crossref
from website.identifiers.utils import request_identifiers
from website.util.share import format_user
from framework.auth import Auth, cas, signing
from framework.celery_tasks import handlers
from framework.postcommit_tasks.handlers import enqueue_postcommit_task, get_task_from_postcommit_queue, postcommit_celery_queue
from framework.exceptions import PermissionsError, HTTPError
from framework.auth.core import Auth
from addons.osfstorage.models import OsfStorageFile
from addons.base import views
from osf.models import Tag, Preprint, PreprintLog, PreprintContributor, Subject, Session
from osf.exceptions import PreprintStateError, ValidationError, ValidationValueError, PreprintProviderError
from osf.utils.permissions import READ, WRITE, ADMIN
from osf.utils.workflows import DefaultStates, RequestTypes
from osf_tests.utils import MockShareResponse
from tests.base import assert_datetime_equal, OsfTestCase
from tests.utils import assert_preprint_logs
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
UserFactory,
PreprintFactory,
NodeFactory,
TagFactory,
SubjectFactory,
UserFactory,
UnregUserFactory,
PreprintProviderFactory,
PreprintRequestFactory,
)
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def node(user):
return NodeFactory(creator=user)
@pytest.fixture()
def project(user):
return ProjectFactory(creator=user)
@pytest.fixture()
def preprint(user):
return PreprintFactory(creator=user)
@pytest.fixture()
def auth(user):
return Auth(user)
@pytest.fixture()
def subject():
return SubjectFactory()
class TestPreprint:
def test_preprint_factory(self, preprint):
assert preprint.title is not None
assert preprint.description is not None
assert preprint.provider is not None
assert preprint.is_published is True
assert preprint.is_public is True
assert preprint.creator is not None
assert preprint.files.first() == preprint.primary_file
assert preprint.deleted is None
assert preprint.root_folder is not None
class TestPreprintProperties:
def test_contributors(self, preprint):
assert len(preprint.contributors) == 1
assert preprint.contributors[0] == preprint.creator
def test_verified_publishable(self, preprint):
preprint.is_published = False
assert preprint.verified_publishable is False
preprint.is_published = True
preprint.deleted = datetime.datetime.now()
assert preprint.verified_publishable is False
preprint.deleted = None
assert preprint.verified_publishable is True
def test_is_deleted(self, preprint):
assert preprint.deleted is None
assert preprint.is_deleted is False
preprint.deleted = timezone.now()
preprint.save()
assert preprint.deleted is not None
assert preprint.is_deleted is True
def test_is_preprint_orphan(self, preprint):
assert preprint.is_preprint_orphan is False
preprint.primary_file = None
preprint.save()
assert preprint.is_preprint_orphan is True
def test_has_submitted_preprint(self, preprint):
preprint.machine_state = 'initial'
preprint.save()
assert preprint.has_submitted_preprint is False
preprint.machine_state = 'pending'
preprint.save()
assert preprint.has_submitted_preprint is True
def test_deep_url(self, preprint):
assert preprint.deep_url == '/preprints/{}/'.format(preprint._id)
def test_url_(self, preprint):
assert preprint.url == '/preprints/{}/{}/'.format(preprint.provider._id, preprint._id)
def test_absolute_url(self, preprint):
assert preprint.absolute_url == urljoin(
preprint.provider.domain if preprint.provider.domain_redirect_enabled else settings.DOMAIN,
preprint.url
)
def test_absolute_api_v2_url(self, preprint):
assert '/preprints/{}/'.format(preprint._id) in preprint.absolute_api_v2_url
def test_admin_contributor_or_group_member_ids(self, preprint, user):
user2 = UserFactory()
assert len(preprint.admin_contributor_or_group_member_ids) == 1
assert user._id in preprint.admin_contributor_or_group_member_ids
preprint.add_permission(user2, ADMIN, save=True)
assert len(preprint.admin_contributor_or_group_member_ids) == 2
assert user2._id in preprint.admin_contributor_or_group_member_ids
def test_visible_contributor_ids(self, preprint):
assert preprint.visible_contributor_ids[0] == preprint.creator._id
def test_all_tags(self, preprint, auth):
preprint.add_tags(['test_tag_1'], auth)
preprint.save()
assert len(preprint.all_tags) == 1
assert preprint.all_tags[0].name == 'test_tag_1'
def test_system_tags(self, preprint):
assert preprint.system_tags.exists() is False
class TestPreprintSubjects:
@pytest.fixture()
def write_contrib(self, preprint):
write_contrib = AuthUserFactory()
preprint.add_contributor(write_contrib, auth=Auth(preprint.creator), permissions=WRITE)
preprint.save()
return write_contrib
def test_set_subjects(self, preprint, auth):
subject = SubjectFactory()
subjects = [[subject._id]]
preprint.set_subjects(subjects, auth)
assert preprint.subjects.count() == 1
assert subject in preprint.subjects.all()
def test_admin_can_set_subjects(self, preprint, subject):
initial_subjects = list(preprint.subjects.all())
preprint.set_subjects([[subject._id]], auth=Auth(preprint.creator))
preprint.reload()
assert initial_subjects != list(preprint.subjects.all())
def test_write_can_set_subjects(self, preprint, subject, write_contrib):
initial_subjects = list(preprint.subjects.all())
preprint.set_subjects([[subject._id]], auth=Auth(write_contrib))
preprint.reload()
assert initial_subjects != list(preprint.subjects.all())
class TestLogging:
def test_add_log(self, preprint, auth):
preprint.add_log(PreprintLog.FILE_UPDATED, params={'preprint': preprint._id}, auth=auth)
preprint.save()
last_log = preprint.logs.latest()
assert last_log.action == PreprintLog.FILE_UPDATED
# date is tzaware
assert last_log.created.tzinfo == pytz.utc
# updates preprint.modified
assert_datetime_equal(preprint.modified, last_log.created)
class TestTagging:
def test_add_tag(self, preprint, auth):
preprint.add_tag('FoO', auth=auth)
preprint.save()
tag = Tag.objects.get(name='FoO')
assert preprint.tags.count() == 1
assert tag in preprint.tags.all()
last_log = preprint.logs.all().order_by('-created')[0]
assert last_log.action == PreprintLog.TAG_ADDED
assert last_log.params['tag'] == 'FoO'
assert last_log.params['preprint'] == preprint._id
def test_add_system_tag(self, preprint):
original_log_count = preprint.logs.count()
preprint.add_system_tag('FoO')
preprint.save()
tag = Tag.all_tags.get(name='FoO', system=True)
assert preprint.all_tags.count() == 1
assert tag in preprint.all_tags.all()
assert tag.system is True
# No log added
new_log_count = preprint.logs.count()
assert original_log_count == new_log_count
def test_add_system_tag_instance(self, preprint):
tag = TagFactory(system=True)
preprint.add_system_tag(tag)
assert tag in preprint.all_tags.all()
def test_add_system_tag_non_system_instance(self, preprint):
tag = TagFactory(system=False)
with pytest.raises(ValueError):
preprint.add_system_tag(tag)
assert tag not in preprint.all_tags.all()
def test_system_tags_property(self, preprint, auth):
other_preprint = ProjectFactory()
other_preprint.add_system_tag('bAr')
preprint.add_system_tag('FoO')
preprint.add_tag('bAr', auth=auth)
assert 'FoO' in preprint.system_tags
assert 'bAr' not in preprint.system_tags
class TestSearch:
@mock.patch('website.search.search.update_preprint')
def test_update_search(self, mock_update_preprint, preprint):
preprint.update_search()
assert mock_update_preprint.called
class TestPreprintCreation:
def test_creator_is_added_as_contributor(self, fake):
user = UserFactory()
preprint = Preprint(
title=fake.bs(),
creator=user,
provider=PreprintProviderFactory()
)
preprint.save()
assert preprint.is_contributor(user) is True
contributor = PreprintContributor.objects.get(user=user, preprint=preprint)
assert contributor.visible is True
assert preprint.has_permission(user, ADMIN) is True
assert preprint.has_permission(user, WRITE) is True
assert preprint.has_permission(user, READ) is True
def test_default_region_set_to_user_settings_osfstorage_default(self, fake):
user = UserFactory()
preprint = Preprint(
title=fake.bs,
creator=user,
provider=PreprintProviderFactory()
)
preprint.save()
assert preprint.region.id == user.get_addon('osfstorage').default_region_id
def test_root_folder_created_automatically(self, fake):
user = UserFactory()
preprint = Preprint(
title=fake.bs,
creator=user,
provider=PreprintProviderFactory()
)
preprint.save()
assert preprint.root_folder is not None
assert preprint.root_folder.is_root is True
# Copied from osf_tests/test_node.py
class TestContributorMethods:
def test_add_contributor(self, preprint, user, auth):
# A user is added as a contributor
user = UserFactory()
preprint.add_contributor(contributor=user, auth=auth)
preprint.save()
assert preprint.is_contributor(user) is True
assert preprint.has_permission(user, ADMIN) is False
assert preprint.has_permission(user, WRITE) is True
assert preprint.has_permission(user, READ) is True
last_log = preprint.logs.all().order_by('-created')[0]
assert last_log.action == 'contributor_added'
assert last_log.params['contributors'] == [user._id]
def test_add_contributors(self, preprint, auth):
user1 = UserFactory()
user2 = UserFactory()
preprint.add_contributors(
[
{'user': user1, 'permissions': ADMIN, 'visible': True},
{'user': user2, 'permissions': WRITE, 'visible': False}
],
auth=auth
)
last_log = preprint.logs.all().order_by('-created')[0]
assert (
last_log.params['contributors'] ==
[user1._id, user2._id]
)
assert preprint.is_contributor(user1)
assert preprint.is_contributor(user2)
assert user1._id in preprint.visible_contributor_ids
assert user2._id not in preprint.visible_contributor_ids
assert preprint.get_permissions(user1) == [READ, WRITE, ADMIN]
assert preprint.get_permissions(user2) == [READ, WRITE]
last_log = preprint.logs.all().order_by('-created')[0]
assert (
last_log.params['contributors'] ==
[user1._id, user2._id]
)
def test_cant_add_creator_as_contributor_twice(self, preprint, user):
preprint.add_contributor(contributor=user)
preprint.save()
assert len(preprint.contributors) == 1
def test_cant_add_same_contributor_twice(self, preprint):
contrib = UserFactory()
preprint.add_contributor(contributor=contrib)
preprint.save()
preprint.add_contributor(contributor=contrib)
preprint.save()
assert len(preprint.contributors) == 2
def test_remove_unregistered_conributor_removes_unclaimed_record(self, preprint, auth):
new_user = preprint.add_unregistered_contributor(fullname='David Davidson',
email='david@davidson.com', auth=auth)
preprint.save()
assert preprint.is_contributor(new_user) # sanity check
assert preprint._primary_key in new_user.unclaimed_records
preprint.remove_contributor(
auth=auth,
contributor=new_user
)
preprint.save()
new_user.refresh_from_db()
assert preprint.is_contributor(new_user) is False
assert preprint._primary_key not in new_user.unclaimed_records
def test_is_contributor(self, preprint):
contrib, noncontrib = UserFactory(), UserFactory()
PreprintContributor.objects.create(user=contrib, preprint=preprint)
assert preprint.is_contributor(contrib) is True
assert preprint.is_contributor(noncontrib) is False
assert preprint.is_contributor(None) is False
def test_visible_contributor_ids(self, preprint, user):
visible_contrib = UserFactory()
invisible_contrib = UserFactory()
PreprintContributor.objects.create(user=visible_contrib, preprint=preprint, visible=True)
PreprintContributor.objects.create(user=invisible_contrib, preprint=preprint, visible=False)
assert visible_contrib._id in preprint.visible_contributor_ids
assert invisible_contrib._id not in preprint.visible_contributor_ids
def test_visible_contributors(self, preprint, user):
visible_contrib = UserFactory()
invisible_contrib = UserFactory()
PreprintContributor.objects.create(user=visible_contrib, preprint=preprint, visible=True)
PreprintContributor.objects.create(user=invisible_contrib, preprint=preprint, visible=False)
assert visible_contrib in preprint.visible_contributors
assert invisible_contrib not in preprint.visible_contributors
def test_set_visible_false(self, preprint, auth):
contrib = UserFactory()
PreprintContributor.objects.create(user=contrib, preprint=preprint, visible=True)
preprint.set_visible(contrib, visible=False, auth=auth)
preprint.save()
assert PreprintContributor.objects.filter(user=contrib, preprint=preprint, visible=False).exists() is True
last_log = preprint.logs.all().order_by('-created')[0]
assert last_log.user == auth.user
assert last_log.action == PreprintLog.MADE_CONTRIBUTOR_INVISIBLE
def test_set_visible_true(self, preprint, auth):
contrib = UserFactory()
PreprintContributor.objects.create(user=contrib, preprint=preprint, visible=False)
preprint.set_visible(contrib, visible=True, auth=auth)
preprint.save()
assert PreprintContributor.objects.filter(user=contrib, preprint=preprint, visible=True).exists() is True
last_log = preprint.logs.all().order_by('-created')[0]
assert last_log.user == auth.user
assert last_log.action == PreprintLog.MADE_CONTRIBUTOR_VISIBLE
def test_set_visible_is_noop_if_visibility_is_unchanged(self, preprint, auth):
visible, invisible = UserFactory(), UserFactory()
PreprintContributor.objects.create(user=visible, preprint=preprint, visible=True)
PreprintContributor.objects.create(user=invisible, preprint=preprint, visible=False)
original_log_count = preprint.logs.count()
preprint.set_visible(invisible, visible=False, auth=auth)
preprint.set_visible(visible, visible=True, auth=auth)
preprint.save()
assert preprint.logs.count() == original_log_count
def test_set_visible_contributor_with_only_one_contributor(self, preprint, user):
with pytest.raises(ValueError) as excinfo:
preprint.set_visible(user=user, visible=False, auth=None)
assert str(excinfo.value) == 'Must have at least one visible contributor'
def test_set_visible_missing(self, preprint):
with pytest.raises(ValueError):
preprint.set_visible(UserFactory(), True)
def test_remove_contributor(self, preprint, auth):
# A user is added as a contributor
user2 = UserFactory()
preprint.add_contributor(contributor=user2, auth=auth, save=True)
assert user2 in preprint.contributors
assert preprint.has_permission(user2, WRITE)
# The user is removed
preprint.remove_contributor(auth=auth, contributor=user2)
preprint.reload()
assert user2 not in preprint.contributors
assert preprint.get_permissions(user2) == []
assert preprint.logs.latest().action == 'contributor_removed'
assert preprint.logs.latest().params['contributors'] == [user2._id]
def test_remove_contributors(self, preprint, auth):
user1 = UserFactory()
user2 = UserFactory()
preprint.add_contributors(
[
{'user': user1, 'permissions': WRITE, 'visible': True},
{'user': user2, 'permissions': WRITE, 'visible': True}
],
auth=auth
)
assert user1 in preprint.contributors
assert user2 in preprint.contributors
assert preprint.has_permission(user1, WRITE)
assert preprint.has_permission(user2, WRITE)
preprint.remove_contributors(auth=auth, contributors=[user1, user2], save=True)
preprint.reload()
assert user1 not in preprint.contributors
assert user2 not in preprint.contributors
assert preprint.get_permissions(user1) == []
assert preprint.get_permissions(user2) == []
assert preprint.logs.latest().action == 'contributor_removed'
def test_replace_contributor(self, preprint):
contrib = UserFactory()
preprint.add_contributor(contrib, auth=Auth(preprint.creator))
preprint.save()
assert contrib in preprint.contributors.all() # sanity check
replacer = UserFactory()
old_length = preprint.contributors.count()
preprint.replace_contributor(contrib, replacer)
preprint.save()
new_length = preprint.contributors.count()
assert contrib not in preprint.contributors.all()
assert replacer in preprint.contributors.all()
assert old_length == new_length
# test unclaimed_records is removed
assert (
preprint._id not in
contrib.unclaimed_records.keys()
)
def test_permission_override_fails_if_no_admins(self, preprint, user):
# User has admin permissions because they are the creator
# Cannot lower permissions
with pytest.raises(PreprintStateError):
preprint.add_contributor(user, permissions=WRITE)
def test_update_contributor(self, preprint, auth):
new_contrib = AuthUserFactory()
preprint.add_contributor(new_contrib, permissions=WRITE, auth=auth)
assert preprint.get_permissions(new_contrib) == [READ, WRITE]
assert preprint.get_visible(new_contrib) is True
preprint.update_contributor(
new_contrib,
READ,
False,
auth=auth
)
assert preprint.get_permissions(new_contrib) == [READ]
assert preprint.get_visible(new_contrib) is False
def test_update_contributor_non_admin_raises_error(self, preprint, auth):
non_admin = AuthUserFactory()
preprint.add_contributor(
non_admin,
permissions=WRITE,
auth=auth
)
with pytest.raises(PermissionsError):
preprint.update_contributor(
non_admin,
None,
False,
auth=Auth(non_admin)
)
def test_update_contributor_only_admin_raises_error(self, preprint, auth):
with pytest.raises(PreprintStateError):
preprint.update_contributor(
auth.user,
WRITE,
True,
auth=auth
)
def test_update_contributor_non_contrib_raises_error(self, preprint, auth):
non_contrib = AuthUserFactory()
with pytest.raises(ValueError):
preprint.update_contributor(
non_contrib,
ADMIN,
True,
auth=auth
)
# Copied from tests/test_models.py
class TestPreprintAddContributorRegisteredOrNot:
def test_add_contributor_user_id(self, user, preprint):
registered_user = UserFactory()
contributor_obj = preprint.add_contributor_registered_or_not(auth=Auth(user), user_id=registered_user._id, save=True)
contributor = contributor_obj.user
assert contributor in preprint.contributors
assert contributor.is_registered is True
def test_add_contributor_user_id_already_contributor(self, user, preprint):
with pytest.raises(ValidationError) as excinfo:
preprint.add_contributor_registered_or_not(auth=Auth(user), user_id=user._id, save=True)
assert 'is already a contributor' in excinfo.value.message
def test_add_contributor_invalid_user_id(self, user, preprint):
with pytest.raises(ValueError) as excinfo:
preprint.add_contributor_registered_or_not(auth=Auth(user), user_id='abcde', save=True)
assert 'was not found' in str(excinfo.value)
def test_add_contributor_fullname_email(self, user, preprint):
contributor_obj = preprint.add_contributor_registered_or_not(auth=Auth(user), full_name='Jane Doe', email='jane@doe.com')
contributor = contributor_obj.user
assert contributor in preprint.contributors
assert contributor.is_registered is False
def test_add_contributor_fullname(self, user, preprint):
contributor_obj = preprint.add_contributor_registered_or_not(auth=Auth(user), full_name='Jane Doe')
contributor = contributor_obj.user
assert contributor in preprint.contributors
assert contributor.is_registered is False
def test_add_contributor_fullname_email_already_exists(self, user, preprint):
registered_user = UserFactory()
contributor_obj = preprint.add_contributor_registered_or_not(auth=Auth(user), full_name='F Mercury', email=registered_user.username)
contributor = contributor_obj.user
assert contributor in preprint.contributors
assert contributor.is_registered is True
class TestContributorVisibility:
@pytest.fixture()
def user2(self):
return UserFactory()
@pytest.fixture()
def preprint2(self, user2, user, auth):
preprint = PreprintFactory(creator=user)
preprint.add_contributor(contributor=user2, auth=auth)
return preprint
def test_get_visible_true(self, preprint2):
assert preprint2.get_visible(preprint2.creator) is True
def test_get_visible_false(self, preprint2, user2, auth):
preprint2.set_visible(preprint2.creator, False)
assert preprint2.get_visible(preprint2.creator) is False
def test_make_invisible(self, preprint2):
preprint2.set_visible(preprint2.creator, False, save=True)
preprint2.reload()
assert preprint2.creator._id not in preprint2.visible_contributor_ids
assert preprint2.creator not in preprint2.visible_contributors
assert preprint2.logs.latest().action == PreprintLog.MADE_CONTRIBUTOR_INVISIBLE
def test_make_visible(self, preprint2, user2):
preprint2.set_visible(preprint2.creator, False, save=True)
preprint2.set_visible(preprint2.creator, True, save=True)
preprint2.reload()
assert preprint2.creator._id in preprint2.visible_contributor_ids
assert preprint2.creator in preprint2.visible_contributors
assert preprint2.logs.latest().action == PreprintLog.MADE_CONTRIBUTOR_VISIBLE
# Regression project test: Ensure that hiding and showing the first contributor
# does not change the visible contributor order
assert list(preprint2.visible_contributors) == [preprint2.creator, user2]
def test_set_visible_missing(self, preprint2):
with pytest.raises(ValueError):
preprint2.set_visible(UserFactory(), True)
class TestPermissionMethods:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user)
def test_has_permission(self, preprint):
user = UserFactory()
contributor = PreprintContributor.objects.create(
preprint=preprint, user=user,
)
preprint.add_permission(user, READ)
assert preprint.has_permission(user, READ) is True
assert preprint.has_permission(user, WRITE) is False
assert preprint.has_permission(user, ADMIN) is False
preprint.add_permission(user, WRITE)
assert contributor.user in preprint.contributors
assert preprint.has_permission(user, WRITE) is True
user.is_superuser = True
user.save()
assert preprint.has_permission(user, ADMIN) is False
def test_has_permission_passed_non_contributor_returns_false(self, preprint):
noncontrib = UserFactory()
assert preprint.has_permission(noncontrib, READ) is False
def test_get_permissions(self, preprint):
user = UserFactory()
contributor = PreprintContributor.objects.create(
preprint=preprint, user=user,
)
preprint.add_permission(user, READ)
assert preprint.get_permissions(user) == [READ]
preprint.add_permission(user, WRITE)
assert preprint.get_permissions(user) == [READ, WRITE]
assert contributor.user in preprint.contributors
def test_add_permission(self, preprint):
user = UserFactory()
PreprintContributor.objects.create(
preprint=preprint, user=user,
)
preprint.add_permission(user, WRITE)
preprint.save()
assert preprint.has_permission(user, WRITE) is True
def test_remove_permission(self, preprint):
assert preprint.has_permission(preprint.creator, ADMIN) is True
assert preprint.has_permission(preprint.creator, WRITE) is True
assert preprint.has_permission(preprint.creator, WRITE) is True
preprint.remove_permission(preprint.creator, ADMIN)
assert preprint.has_permission(preprint.creator, ADMIN) is False
assert preprint.has_permission(preprint.creator, WRITE) is False
assert preprint.has_permission(preprint.creator, WRITE) is False
def test_remove_permission_not_granted(self, preprint, auth):
contrib = UserFactory()
preprint.add_contributor(contrib, permissions=WRITE, auth=auth)
with pytest.raises(ValueError):
preprint.remove_permission(contrib, ADMIN)
def test_set_permissions(self, preprint):
user = UserFactory()
preprint.set_permissions(user, WRITE, save=True)
assert preprint.has_permission(user, ADMIN) is False
assert preprint.has_permission(user, WRITE) is True
assert preprint.has_permission(user, READ) is True
preprint.set_permissions(user, READ, save=True)
assert preprint.has_permission(user, ADMIN) is False
assert preprint.has_permission(user, WRITE) is False
assert preprint.has_permission(user, READ) is True
preprint.set_permissions(user, ADMIN, save=True)
assert preprint.has_permission(user, ADMIN) is True
assert preprint.has_permission(user, WRITE) is True
assert preprint.has_permission(user, READ) is True
def test_set_permissions_raises_error_if_only_admins_permissions_are_reduced(self, preprint):
# creator is the only admin
with pytest.raises(PreprintStateError) as excinfo:
preprint.set_permissions(preprint.creator, permissions=WRITE)
assert excinfo.value.args[0] == 'Must have at least one registered admin contributor'
def test_add_permission_with_admin_also_grants_read_and_write(self, preprint):
user = UserFactory()
PreprintContributor.objects.create(
preprint=preprint, user=user,
)
preprint.add_permission(user, ADMIN)
preprint.save()
assert preprint.has_permission(user, ADMIN)
assert preprint.has_permission(user, WRITE)
def test_add_permission_already_granted(self, preprint):
user = UserFactory()
PreprintContributor.objects.create(
preprint=preprint, user=user
)
preprint.add_permission(user, ADMIN)
with pytest.raises(ValueError):
preprint.add_permission(user, ADMIN)
def test_contributor_can_edit(self, preprint, auth):
contributor = UserFactory()
contributor_auth = Auth(user=contributor)
other_guy = UserFactory()
other_guy_auth = Auth(user=other_guy)
preprint.add_contributor(
contributor=contributor, auth=auth)
preprint.save()
# write contribs can now edit preprints
assert bool(preprint.can_edit(contributor_auth)) is True
assert bool(preprint.can_edit(other_guy_auth)) is False
def test_can_edit_can_be_passed_a_user(self, user, preprint):
assert bool(preprint.can_edit(user=user)) is True
def test_creator_can_edit(self, auth, preprint):
assert bool(preprint.can_edit(auth)) is True
def test_noncontributor_cant_edit_public(self):
user1 = UserFactory()
user1_auth = Auth(user=user1)
preprint = PreprintFactory(is_public=True)
# Noncontributor can't edit
assert bool(preprint.can_edit(user1_auth)) is False
def test_can_view_private(self, preprint, auth):
preprint.is_public = False
preprint.save()
# Create contributor and noncontributor
contributor = UserFactory()
contributor_auth = Auth(user=contributor)
other_guy = UserFactory()
other_guy_auth = Auth(user=other_guy)
preprint.add_contributor(
contributor=contributor, auth=auth)
preprint.save()
# Only creator and contributor can view
assert preprint.can_view(auth)
assert preprint.can_view(contributor_auth)
assert preprint.can_view(other_guy_auth) is False
def test_creator_cannot_edit_project_if_they_are_removed(self):
creator = UserFactory()
preprint = PreprintFactory(creator=creator)
contrib = UserFactory()
preprint.add_contributor(contrib, permissions=ADMIN, auth=Auth(user=creator))
preprint.save()
assert creator in preprint.contributors.all()
# Creator is removed from project
preprint.remove_contributor(creator, auth=Auth(user=contrib))
assert preprint.can_view(Auth(user=creator)) is True
assert preprint.can_edit(Auth(user=creator)) is False
assert preprint.is_contributor(creator) is False
def test_can_view_public(self, preprint, auth):
# Create contributor and noncontributor
contributor = UserFactory()
contributor_auth = Auth(user=contributor)
other_guy = UserFactory()
other_guy_auth = Auth(user=other_guy)
preprint.add_contributor(
contributor=contributor, auth=auth)
# Change preprint to public
preprint.is_public = True
preprint.save()
# Creator, contributor, and noncontributor can view
assert preprint.can_view(auth)
assert preprint.can_view(contributor_auth)
assert preprint.can_view(other_guy_auth)
def test_can_view_unpublished(self, preprint, auth):
# Create contributor and noncontributor
contributor = UserFactory()
contributor_auth = Auth(user=contributor)
other_guy = UserFactory()
other_guy_auth = Auth(user=other_guy)
preprint.add_contributor(
contributor=contributor, auth=auth)
# Change preprint to unpublished
preprint.is_published = False
preprint.save()
# Creator, contributor, and noncontributor can view
assert preprint.can_view(auth)
assert preprint.can_view(contributor_auth)
assert preprint.can_view(other_guy_auth) is False
# Copied from tests/test_models.py
@pytest.mark.enable_implicit_clean
class TestAddUnregisteredContributor:
def test_add_unregistered_contributor(self, preprint, user, auth):
preprint.add_unregistered_contributor(
email='foo@bar.com',
fullname='Weezy F. Baby',
auth=auth
)
preprint.save()
latest_contributor = PreprintContributor.objects.get(preprint=preprint, user__username='foo@bar.com').user
assert latest_contributor.username == 'foo@bar.com'
assert latest_contributor.fullname == 'Weezy F. Baby'
assert bool(latest_contributor.is_registered) is False
# A log event was added
assert preprint.logs.first().action == 'contributor_added'
assert preprint._id in latest_contributor.unclaimed_records, 'unclaimed record was added'
unclaimed_data = latest_contributor.get_unclaimed_record(preprint._primary_key)
assert unclaimed_data['referrer_id'] == user._primary_key
assert bool(preprint.is_contributor(latest_contributor)) is True
assert unclaimed_data['email'] == 'foo@bar.com'
def test_add_unregistered_adds_new_unclaimed_record_if_user_already_in_db(self, fake, preprint, auth):
user = UnregUserFactory()
given_name = fake.name()
new_user = preprint.add_unregistered_contributor(
email=user.username,
fullname=given_name,
auth=auth
)
preprint.save()
# new unclaimed record was added
assert preprint._primary_key in new_user.unclaimed_records
unclaimed_data = new_user.get_unclaimed_record(preprint._primary_key)
assert unclaimed_data['name'] == given_name
def test_add_unregistered_raises_error_if_user_is_registered(self, preprint, auth):
user = UserFactory(is_registered=True) # A registered user
with pytest.raises(ValidationError):
preprint.add_unregistered_contributor(
email=user.username,
fullname=user.fullname,
auth=auth
)
class TestPreprintSpam:
@mock.patch.object(settings, 'SPAM_FLAGGED_MAKE_NODE_PRIVATE', True)
def test_set_privacy_on_spammy_preprint(self, preprint):
preprint.is_public = False
preprint.save()
with mock.patch.object(Preprint, 'is_spammy', mock.PropertyMock(return_value=True)):
with pytest.raises(PreprintStateError):
preprint.set_privacy('public')
def test_check_spam_disabled_by_default(self, preprint, user):
# SPAM_CHECK_ENABLED is False by default
with mock.patch('osf.models.preprint.Preprint._get_spam_content', mock.Mock(return_value='some content!')):
with mock.patch('osf.models.preprint.Preprint.do_check_spam', mock.Mock(side_effect=Exception('should not get here'))):
preprint.set_privacy('public')
assert preprint.check_spam(user, None, None) is False
@mock.patch.object(settings, 'SPAM_CHECK_ENABLED', True)
def test_check_spam_only_public_preprint_by_default(self, preprint, user):
# SPAM_CHECK_PUBLIC_ONLY is True by default
with mock.patch('osf.models.preprint.Preprint._get_spam_content', mock.Mock(return_value='some content!')):
with mock.patch('osf.models.preprint.Preprint.do_check_spam', mock.Mock(side_effect=Exception('should not get here'))):
preprint.set_privacy('private')
assert preprint.check_spam(user, None, None) is False
@mock.patch.object(settings, 'SPAM_CHECK_ENABLED', True)
def test_check_spam_skips_ham_user(self, preprint, user):
with mock.patch('osf.models.Preprint._get_spam_content', mock.Mock(return_value='some content!')):
with mock.patch('osf.models.Preprint.do_check_spam', mock.Mock(side_effect=Exception('should not get here'))):
user.confirm_ham()
preprint.set_privacy('public')
assert preprint.check_spam(user, None, None) is False
@mock.patch.object(settings, 'SPAM_CHECK_ENABLED', True)
@mock.patch.object(settings, 'SPAM_CHECK_PUBLIC_ONLY', False)
def test_check_spam_on_private_preprint(self, preprint, user):
preprint.is_public = False
preprint.save()
with mock.patch('osf.models.preprint.Preprint._get_spam_content', mock.Mock(return_value='some content!')):
with mock.patch('osf.models.preprint.Preprint.do_check_spam', mock.Mock(return_value=True)):
preprint.set_privacy('private')
assert preprint.check_spam(user, None, None) is True
@mock.patch('website.mailchimp_utils.unsubscribe_mailchimp')
@mock.patch.object(settings, 'SPAM_CHECK_ENABLED', True)
@mock.patch.object(settings, 'SPAM_ACCOUNT_SUSPENSION_ENABLED', True)
def test_check_spam_on_private_preprint_bans_new_spam_user(self, mock_send_mail, preprint, user):
preprint.is_public = False
preprint.save()
with mock.patch('osf.models.Preprint._get_spam_content', mock.Mock(return_value='some content!')):
with mock.patch('osf.models.Preprint.do_check_spam', mock.Mock(return_value=True)):
user.date_confirmed = timezone.now()
preprint.set_privacy('public')
user2 = UserFactory()
# preprint w/ one contributor
preprint2 = PreprintFactory(creator=user, description='foobar2', is_public=True)
preprint2.save()
# preprint with more than one contributor
preprint3 = PreprintFactory(creator=user, description='foobar3', is_public=True)
preprint3.add_contributor(user2)
preprint3.save()
assert preprint.check_spam(user, None, None) is True
assert user.is_disabled is True
assert preprint.is_public is False
preprint2.reload()
assert preprint2.is_public is False
preprint3.reload()
assert preprint3.is_public is True
@mock.patch('website.mailchimp_utils.unsubscribe_mailchimp')
@mock.patch.object(settings, 'SPAM_CHECK_ENABLED', True)
@mock.patch.object(settings, 'SPAM_ACCOUNT_SUSPENSION_ENABLED', True)
def test_check_spam_on_private_preprint_does_not_ban_existing_user(self, mock_send_mail, preprint, user):
preprint.is_public = False
preprint.save()
with mock.patch('osf.models.Preprint._get_spam_content', mock.Mock(return_value='some content!')):
with mock.patch('osf.models.Preprint.do_check_spam', mock.Mock(return_value=True)):
preprint.creator.date_confirmed = timezone.now() - datetime.timedelta(days=9001)
preprint.set_privacy('public')
assert preprint.check_spam(user, None, None) is True
assert preprint.is_public is True
def test_flag_spam_make_preprint_private(self, preprint):
assert preprint.is_public
with mock.patch.object(settings, 'SPAM_FLAGGED_MAKE_NODE_PRIVATE', True):
preprint.flag_spam()
preprint.save()
assert preprint.is_spammy
assert preprint.is_public is False
def test_flag_spam_do_not_make_preprint_private(self, preprint):
assert preprint.is_public
with mock.patch.object(settings, 'SPAM_FLAGGED_MAKE_NODE_PRIVATE', False):
preprint.flag_spam()
preprint.save()
assert preprint.is_spammy
assert preprint.is_public
def test_confirm_spam_makes_preprint_private(self, preprint):
assert preprint.is_public
preprint.confirm_spam()
preprint.save()
assert preprint.is_spammy
assert preprint.is_public is False
# copied from tests/test_models.py
class TestManageContributors:
def test_contributor_manage_visibility(self, preprint, user, auth):
reg_user1 = UserFactory()
#This makes sure manage_contributors uses set_visible so visibility for contributors is added before visibility
#for other contributors is removed ensuring there is always at least one visible contributor
preprint.add_contributor(contributor=user, permissions=ADMIN, auth=auth)
preprint.add_contributor(contributor=reg_user1, permissions=ADMIN, auth=auth)
preprint.manage_contributors(
user_dicts=[
{'id': user._id, 'permissions': ADMIN, 'visible': True},
{'id': reg_user1._id, 'permissions': ADMIN, 'visible': False},
],
auth=auth,
save=True
)
preprint.manage_contributors(
user_dicts=[
{'id': user._id, 'permissions': ADMIN, 'visible': False},
{'id': reg_user1._id, 'permissions': ADMIN, 'visible': True},
],
auth=auth,
save=True
)
assert len(preprint.visible_contributor_ids) == 1
def test_contributor_set_visibility_validation(self, preprint, user, auth):
reg_user1, reg_user2 = UserFactory(), UserFactory()
preprint.add_contributors(
[
{'user': reg_user1, 'permissions': ADMIN, 'visible': True},
{'user': reg_user2, 'permissions': ADMIN, 'visible': False},
]
)
print(preprint.visible_contributor_ids)
with pytest.raises(ValueError) as e:
preprint.set_visible(user=reg_user1, visible=False, auth=None)
preprint.set_visible(user=user, visible=False, auth=None)
assert e.value.message == 'Must have at least one visible contributor'
def test_manage_contributors_cannot_remove_last_admin_contributor(self, auth, preprint):
user2 = UserFactory()
preprint.add_contributor(contributor=user2, permissions=WRITE, auth=auth)
preprint.save()
with pytest.raises(PreprintStateError) as excinfo:
preprint.manage_contributors(
user_dicts=[{'id': user2._id,
'permissions': WRITE,
'visible': True}],
auth=auth,
save=True
)
assert excinfo.value.args[0] == 'Must have at least one registered admin contributor'
def test_manage_contributors_reordering(self, preprint, user, auth):
user2, user3 = UserFactory(), UserFactory()
preprint.add_contributor(contributor=user2, auth=auth)
preprint.add_contributor(contributor=user3, auth=auth)
preprint.save()
assert list(preprint.contributors.all()) == [user, user2, user3]
preprint.manage_contributors(
user_dicts=[
{
'id': user2._id,
'permissions': WRITE,
'visible': True,
},
{
'id': user3._id,
'permissions': WRITE,
'visible': True,
},
{
'id': user._id,
'permissions': ADMIN,
'visible': True,
},
],
auth=auth,
save=True
)
assert list(preprint.contributors.all()) == [user2, user3, user]
def test_manage_contributors_logs_when_users_reorder(self, preprint, user, auth):
user2 = UserFactory()
preprint.add_contributor(contributor=user2, permissions=WRITE, auth=auth)
preprint.save()
preprint.manage_contributors(
user_dicts=[
{
'id': user2._id,
'permissions': WRITE,
'visible': True,
},
{
'id': user._id,
'permissions': ADMIN,
'visible': True,
},
],
auth=auth,
save=True
)
latest_log = preprint.logs.latest()
assert latest_log.action == PreprintLog.CONTRIB_REORDERED
assert latest_log.user == user
assert user._id in latest_log.params['contributors']
assert user2._id in latest_log.params['contributors']
def test_manage_contributors_logs_when_permissions_change(self, preprint, user, auth):
user2 = UserFactory()
preprint.add_contributor(contributor=user2, permissions=WRITE, auth=auth)
preprint.save()
preprint.manage_contributors(
user_dicts=[
{
'id': user._id,
'permissions': ADMIN,
'visible': True,
},
{
'id': user2._id,
'permissions': READ,
'visible': True,
},
],
auth=auth,
save=True
)
latest_log = preprint.logs.latest()
assert latest_log.action == PreprintLog.PERMISSIONS_UPDATED
assert latest_log.user == user
assert user2._id in latest_log.params['contributors']
assert user._id not in latest_log.params['contributors']
def test_manage_contributors_new_contributor(self, preprint, user, auth):
user = UserFactory()
users = [
{'id': user._id, 'permissions': READ, 'visible': True},
{'id': preprint.creator._id, 'permissions': [READ, WRITE, ADMIN], 'visible': True},
]
with pytest.raises(ValueError) as excinfo:
preprint.manage_contributors(
users, auth=auth, save=True
)
assert excinfo.value.args[0] == 'User {0} not in contributors'.format(user.fullname)
def test_manage_contributors_no_contributors(self, preprint, auth):
with pytest.raises(PreprintStateError):
preprint.manage_contributors(
[], auth=auth, save=True,
)
def test_manage_contributors_no_admins(self, preprint, auth):
user = UserFactory()
preprint.add_contributor(
user,
permissions=ADMIN,
save=True
)
users = [
{'id': preprint.creator._id, 'permissions': READ, 'visible': True},
{'id': user._id, 'permissions': READ, 'visible': True},
]
with pytest.raises(PreprintStateError):
preprint.manage_contributors(
users, auth=auth, save=True,
)
def test_manage_contributors_no_registered_admins(self, preprint, auth):
unregistered = UnregUserFactory()
preprint.add_unregistered_contributor(
unregistered.fullname,
unregistered.email,
auth=Auth(preprint.creator),
permissions=ADMIN,
existing_user=unregistered
)
users = [
{'id': preprint.creator._id, 'permissions': READ, 'visible': True},
{'id': unregistered._id, 'permissions': ADMIN, 'visible': True},
]
with pytest.raises(PreprintStateError):
preprint.manage_contributors(
users, auth=auth, save=True,
)
def test_get_admin_contributors(self, user, auth, preprint):
read, write, admin = UserFactory(), UserFactory(), UserFactory()
nonactive_admin = UserFactory()
noncontrib = UserFactory()
preprint = PreprintFactory(creator=user)
preprint.add_contributor(read, auth=auth, permissions=READ)
preprint.add_contributor(write, auth=auth, permissions=WRITE)
preprint.add_contributor(admin, auth=auth, permissions=ADMIN)
preprint.add_contributor(nonactive_admin, auth=auth, permissions=ADMIN)
preprint.save()
nonactive_admin.is_disabled = True
nonactive_admin.save()
result = list(preprint.get_admin_contributors([
read, write, admin, noncontrib, nonactive_admin
]))
assert admin in result
assert read not in result
assert write not in result
assert noncontrib not in result
assert nonactive_admin not in result
class TestContributorOrdering:
def test_can_get_contributor_order(self, preprint):
user1, user2 = UserFactory(), UserFactory()
contrib1 = PreprintContributor.objects.create(user=user1, preprint=preprint)
contrib2 = PreprintContributor.objects.create(user=user2, preprint=preprint)
creator_contrib = PreprintContributor.objects.get(user=preprint.creator, preprint=preprint)
assert list(preprint.get_preprintcontributor_order()) == [creator_contrib.id, contrib1.id, contrib2.id]
assert list(preprint.contributors.all()) == [preprint.creator, user1, user2]
def test_can_set_contributor_order(self, preprint):
user1, user2 = UserFactory(), UserFactory()
contrib1 = PreprintContributor.objects.create(user=user1, preprint=preprint)
contrib2 = PreprintContributor.objects.create(user=user2, preprint=preprint)
creator_contrib = PreprintContributor.objects.get(user=preprint.creator, preprint=preprint)
preprint.set_preprintcontributor_order([contrib1.id, contrib2.id, creator_contrib.id])
assert list(preprint.get_preprintcontributor_order()) == [contrib1.id, contrib2.id, creator_contrib.id]
assert list(preprint.contributors.all()) == [user1, user2, preprint.creator]
def test_move_contributor(self, user, preprint, auth):
user1 = UserFactory()
user2 = UserFactory()
preprint.add_contributors(
[
{'user': user1, 'permissions': WRITE, 'visible': True},
{'user': user2, 'permissions': WRITE, 'visible': True}
],
auth=auth
)
user_contrib_id = preprint.preprintcontributor_set.get(user=user).id
user1_contrib_id = preprint.preprintcontributor_set.get(user=user1).id
user2_contrib_id = preprint.preprintcontributor_set.get(user=user2).id
old_order = [user_contrib_id, user1_contrib_id, user2_contrib_id]
assert list(preprint.get_preprintcontributor_order()) == old_order
preprint.move_contributor(user2, auth=auth, index=0, save=True)
new_order = [user2_contrib_id, user_contrib_id, user1_contrib_id]
assert list(preprint.get_preprintcontributor_order()) == new_order
@pytest.mark.enable_implicit_clean
class TestDOIValidation:
def test_validate_bad_doi(self, preprint):
with pytest.raises(ValidationError):
preprint.article_doi = 'nope'
preprint.save()
with pytest.raises(ValidationError):
preprint.article_doi = 'https://dx.doi.org/10.123.456'
preprint.save() # should save the bare DOI, not a URL
with pytest.raises(ValidationError):
preprint.article_doi = 'doi:10.10.1038/nwooo1170'
preprint.save() # should save without doi: prefix
def test_validate_good_doi(self, preprint):
doi = '10.11038/nwooo1170'
preprint.article_doi = doi
preprint.save()
assert preprint.article_doi == doi
# copied from tests/test_models.py
class TestPreprintUpdate:
def test_set_title_works_with_valid_title(self, user, auth):
proj = ProjectFactory(title='That Was Then', creator=user)
proj.set_title('This is now', auth=auth)
proj.save()
# Title was changed
assert proj.title == 'This is now'
# A log event was saved
latest_log = proj.logs.latest()
assert latest_log.action == 'edit_title'
assert latest_log.params['title_original'] == 'That Was Then'
def test_set_title_fails_if_empty_or_whitespace(self, user, auth):
proj = ProjectFactory(title='That Was Then', creator=user)
with pytest.raises(ValidationValueError):
proj.set_title(' ', auth=auth)
with pytest.raises(ValidationValueError):
proj.set_title('', auth=auth)
assert proj.title == 'That Was Then'
def test_set_title_fails_if_too_long(self, user, auth):
proj = ProjectFactory(title='That Was Then', creator=user)
long_title = ''.join('a' for _ in range(513))
with pytest.raises(ValidationValueError):
proj.set_title(long_title, auth=auth)
def test_set_description(self, preprint, auth):
old_desc = preprint.description
preprint.set_description(
'new description', auth=auth)
preprint.save()
assert preprint.description, 'new description'
latest_log = preprint.logs.latest()
assert latest_log.action, PreprintLog.EDITED_DESCRIPTION
assert latest_log.params['description_original'], old_desc
assert latest_log.params['description_new'], 'new description'
def test_updating_title_twice_with_same_title(self, fake, auth, preprint):
original_n_logs = preprint.logs.count()
new_title = fake.bs()
preprint.set_title(new_title, auth=auth, save=True)
assert preprint.logs.count() == original_n_logs + 1 # sanity check
# Call update with same title
preprint.set_title(new_title, auth=auth, save=True)
# A new log is not created
assert preprint.logs.count() == original_n_logs + 1
def test_updating_description_twice_with_same_content(self, fake, auth, preprint):
original_n_logs = preprint.logs.count()
new_desc = fake.bs()
preprint.set_description(new_desc, auth=auth, save=True)
assert preprint.logs.count() == original_n_logs + 1 # sanity check
# Call update with same description
preprint.set_description(new_desc, auth=auth, save=True)
# A new log is not created
assert preprint.logs.count() == original_n_logs + 1
class TestSetPreprintFile(OsfTestCase):
def setUp(self):
super(TestSetPreprintFile, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.read_write_user = AuthUserFactory()
self.read_write_user_auth = Auth(user=self.read_write_user)
self.project = ProjectFactory(creator=self.user)
self.preprint = PreprintFactory(project=self.project, creator=self.user, finish=False)
self.file = OsfStorageFile.create(
target=self.preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.file_two = OsfStorageFile.create(
target=self.preprint,
path='/pandapanda.txt',
name='pandapanda.txt',
materialized_path='/pandapanda.txt')
self.file_two.save()
self.preprint.add_contributor(self.read_write_user, permissions=WRITE)
self.project.save()
@assert_preprint_logs(PreprintLog.PUBLISHED, 'preprint')
def test_is_preprint_property_new_file_to_published(self):
assert_false(self.preprint.is_published)
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
self.preprint.reload()
assert_false(self.preprint.is_published)
with assert_raises(ValueError):
self.preprint.set_published(True, auth=self.auth, save=True)
self.preprint.reload()
self.preprint.provider = PreprintProviderFactory()
self.preprint.set_subjects([[SubjectFactory()._id]], auth=self.auth)
self.preprint.reload()
assert_false(self.preprint.is_published)
self.preprint.set_published(True, auth=self.auth, save=True)
self.preprint.reload()
assert_true(self.preprint.is_published)
@assert_preprint_logs(PreprintLog.SUPPLEMENTAL_NODE_ADDED, 'preprint')
def test_set_supplemental_node(self):
assert_false(self.preprint.is_published)
project = ProjectFactory(creator=self.preprint.creator)
self.preprint.set_supplemental_node(project, auth=self.auth, save=True)
self.preprint.reload()
assert self.preprint.node == project
def test_set_supplemental_node_deleted(self):
project = ProjectFactory(creator=self.preprint.creator)
with assert_raises(ValueError):
project.is_deleted= True
project.save()
self.preprint.set_supplemental_node(project, auth=self.auth, save=True)
def test_set_supplemental_node_already_has_a_preprint(self):
project_two = ProjectFactory(creator=self.preprint.creator)
preprint = PreprintFactory(project=project_two, provider=self.preprint.provider)
self.preprint.set_supplemental_node(project_two, auth=self.auth, save=True)
assert project_two.preprints.count() == 2
def test_preprint_made_public(self):
# Testing for migrated preprints, that may have had is_public = False
self.preprint.is_public = False
self.preprint.save()
assert_false(self.preprint.is_public)
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_false(self.preprint.is_public)
with assert_raises(ValueError):
self.preprint.set_published(True, auth=self.auth, save=True)
self.preprint.reload()
self.preprint.provider = PreprintProviderFactory()
self.preprint.set_subjects([[SubjectFactory()._id]], auth=self.auth)
self.preprint.reload()
assert_false(self.preprint.is_public)
self.preprint.set_published(True, auth=self.auth, save=True)
self.project.reload()
assert_true(self.preprint.is_public)
def test_add_primary_file(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_equal(self.preprint.primary_file, self.file)
assert_equal(type(self.preprint.primary_file), type(self.file))
@assert_preprint_logs(PreprintLog.FILE_UPDATED, 'preprint')
def test_change_primary_file(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_equal(self.preprint.primary_file, self.file)
self.preprint.set_primary_file(self.file_two, auth=self.auth, save=True)
assert_equal(self.preprint.primary_file._id, self.file_two._id)
def test_add_invalid_file(self):
with assert_raises(AttributeError):
self.preprint.set_primary_file('inatlanta', auth=self.auth, save=True)
def test_removing_primary_file_creates_orphan(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_false(self.preprint.is_preprint_orphan)
self.preprint.primary_file = None
self.preprint.save()
assert_true(self.preprint.is_preprint_orphan)
def test_preprint_created_date(self):
self.preprint.set_primary_file(self.file, auth=self.auth, save=True)
assert_equal(self.preprint.primary_file._id, self.file._id)
assert(self.preprint.created)
assert_not_equal(self.project.created, self.preprint.created)
class TestPreprintPermissions(OsfTestCase):
def setUp(self):
super(TestPreprintPermissions, self).setUp()
self.user = AuthUserFactory()
self.noncontrib = AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.read_contrib = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.preprint = PreprintFactory(project=self.project, is_published=False, creator=self.user)
self.preprint.add_contributor(self.write_contrib, permissions=WRITE)
self.preprint.add_contributor(self.read_contrib, permissions=READ)
self.file = OsfStorageFile.create(
target=self.preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
def test_noncontrib_cannot_set_subjects(self):
initial_subjects = list(self.preprint.subjects.all())
with assert_raises(PermissionsError):
self.preprint.set_subjects([[SubjectFactory()._id]], auth=Auth(self.noncontrib))
self.preprint.reload()
assert_equal(initial_subjects, list(self.preprint.subjects.all()))
def test_read_cannot_set_subjects(self):
initial_subjects = list(self.preprint.subjects.all())
with assert_raises(PermissionsError):
self.preprint.set_subjects([[SubjectFactory()._id]], auth=Auth(self.read_contrib))
self.preprint.reload()
assert_equal(initial_subjects, list(self.preprint.subjects.all()))
def test_write_can_set_subjects(self):
initial_subjects = list(self.preprint.subjects.all())
self.preprint.set_subjects([[SubjectFactory()._id]], auth=Auth(self.write_contrib))
self.preprint.reload()
assert_not_equal(initial_subjects, list(self.preprint.subjects.all()))
def test_admin_can_set_subjects(self):
initial_subjects = list(self.preprint.subjects.all())
self.preprint.set_subjects([[SubjectFactory()._id]], auth=Auth(self.user))
self.preprint.reload()
assert_not_equal(initial_subjects, list(self.preprint.subjects.all()))
def test_noncontrib_cannot_set_file(self):
initial_file = self.preprint.primary_file
with assert_raises(PermissionsError):
self.preprint.set_primary_file(self.file, auth=Auth(self.noncontrib), save=True)
self.preprint.reload()
assert_equal(initial_file._id, self.preprint.primary_file._id)
def test_read_contrib_cannot_set_file(self):
initial_file = self.preprint.primary_file
with assert_raises(PermissionsError):
self.preprint.set_primary_file(self.file, auth=Auth(self.read_contrib), save=True)
self.preprint.reload()
assert_equal(initial_file._id, self.preprint.primary_file._id)
def test_write_contrib_can_set_file(self):
initial_file = self.preprint.primary_file
self.preprint.set_primary_file(self.file, auth=Auth(self.write_contrib), save=True)
self.preprint.reload()
assert_equal(self.file._id, self.preprint.primary_file._id)
def test_admin_can_set_file(self):
initial_file = self.preprint.primary_file
self.preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
self.preprint.reload()
assert_equal(self.file._id, self.preprint.primary_file._id)
def test_primary_file_must_target_preprint(self):
file = OsfStorageFile.create(
target=self.project,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
file.save()
with assert_raises(ValueError):
self.preprint.set_primary_file(file, auth=Auth(self.user), save=True)
def test_non_admin_cannot_publish(self):
assert_false(self.preprint.is_published)
with assert_raises(PermissionsError):
self.preprint.set_published(True, auth=Auth(self.noncontrib), save=True)
assert_false(self.preprint.is_published)
def test_read_cannot_publish(self):
assert_false(self.preprint.is_published)
with assert_raises(PermissionsError):
self.preprint.set_published(True, auth=Auth(self.read_contrib), save=True)
assert_false(self.preprint.is_published)
def test_write_cannot_publish(self):
assert_false(self.preprint.is_published)
with assert_raises(PermissionsError):
self.preprint.set_published(True, auth=Auth(self.write_contrib), save=True)
assert_false(self.preprint.is_published)
def test_admin_can_publish(self):
assert_false(self.preprint.is_published)
self.preprint.set_published(True, auth=Auth(self.user), save=True)
assert_true(self.preprint.is_published)
def test_admin_cannot_unpublish(self):
assert_false(self.preprint.is_published)
self.preprint.set_published(True, auth=Auth(self.user), save=True)
assert_true(self.preprint.is_published)
with assert_raises(ValueError) as e:
self.preprint.set_published(False, auth=Auth(self.user), save=True)
assert_in('Cannot unpublish', str(e.exception))
def test_set_title_permissions(self):
original_title = self.preprint.title
new_title = 'My new preprint title'
# noncontrib
with assert_raises(PermissionsError):
self.preprint.set_title(new_title, auth=Auth(self.noncontrib), save=True)
assert_equal(self.preprint.title, original_title)
# read
with assert_raises(PermissionsError):
self.preprint.set_title(new_title, auth=Auth(self.read_contrib), save=True)
assert_equal(self.preprint.title, original_title)
# write
self.preprint.set_title(new_title, auth=Auth(self.write_contrib), save=True)
assert_equal(self.preprint.title, new_title)
# admin
self.preprint.title = original_title
self.preprint.save()
self.preprint.set_title(new_title, auth=Auth(self.user), save=True)
assert_equal(self.preprint.title, new_title)
def test_set_abstract_permissions(self):
original_abstract = self.preprint.description
new_abstract = 'This is my preprint abstract'
# noncontrib
with assert_raises(PermissionsError):
self.preprint.set_description(new_abstract, auth=Auth(self.noncontrib), save=True)
assert_equal(self.preprint.description, original_abstract)
# read
with assert_raises(PermissionsError):
self.preprint.set_description(new_abstract, auth=Auth(self.read_contrib), save=True)
assert_equal(self.preprint.description, original_abstract)
# write
self.preprint.set_description(new_abstract, auth=Auth(self.write_contrib), save=True)
assert_equal(self.preprint.description, new_abstract)
# admin
self.preprint.description = original_abstract
self.preprint.save()
self.preprint.set_description(new_abstract, auth=Auth(self.user), save=True)
assert_equal(self.preprint.description, new_abstract)
def test_set_privacy(self):
# Not currently exposed, but adding is_public field for legacy preprints and spam
self.preprint.is_public = False
self.preprint.save()
# noncontrib
with assert_raises(PermissionsError):
self.preprint.set_privacy('public', auth=Auth(self.noncontrib), save=True)
assert_false(self.preprint.is_public)
# read
with assert_raises(PermissionsError):
self.preprint.set_privacy('public', auth=Auth(self.read_contrib), save=True)
assert_false(self.preprint.is_public)
# write
self.preprint.set_privacy('public', auth=Auth(self.write_contrib), save=True)
assert_true(self.preprint.is_public)
# admin
self.preprint.is_public = False
self.preprint.save()
self.preprint.set_privacy('public', auth=Auth(self.user), save=True)
assert_true(self.preprint.is_public)
def test_set_supplemental_node_project_permissions(self):
# contributors have proper permissions on preprint, but not supplementary_node
self.preprint.node = None
self.preprint.save()
project = ProjectFactory(creator=self.preprint.creator)
project.add_contributor(self.read_contrib, READ, save=True)
project.add_contributor(self.write_contrib, WRITE, save=True)
self.preprint.add_contributor(self.read_contrib, ADMIN, save=True)
self.preprint.add_contributor(self.write_contrib, ADMIN, save=True)
self.preprint.add_contributor(self.noncontrib, ADMIN, save=True)
# noncontrib
with assert_raises(PermissionsError):
self.preprint.set_supplemental_node(project, auth=Auth(self.noncontrib), save=True)
assert self.preprint.node is None
# read
with assert_raises(PermissionsError):
self.preprint.set_supplemental_node(project, auth=Auth(self.read_contrib), save=True)
assert self.preprint.node is None
# write
self.preprint.set_supplemental_node(project, auth=Auth(self.write_contrib), save=True)
assert self.preprint.node == project
# admin
self.preprint.node = None
self.preprint.save()
self.preprint.set_supplemental_node(project, auth=Auth(self.user), save=True)
assert self.preprint.node == project
def test_set_supplemental_node_preprint_permissions(self):
# contributors have proper permissions on the supplementary node, but not the preprint
self.preprint.node = None
self.preprint.save()
project = ProjectFactory(creator=self.preprint.creator)
project.add_contributor(self.read_contrib, ADMIN, save=True)
project.add_contributor(self.write_contrib, ADMIN, save=True)
project.add_contributor(self.noncontrib, ADMIN, save=True)
# noncontrib
with assert_raises(PermissionsError):
self.preprint.set_supplemental_node(project, auth=Auth(self.noncontrib), save=True)
assert self.preprint.node is None
# read
with assert_raises(PermissionsError):
self.preprint.set_supplemental_node(project, auth=Auth(self.read_contrib), save=True)
assert self.preprint.node is None
# write
self.preprint.set_supplemental_node(project, auth=Auth(self.write_contrib), save=True)
assert self.preprint.node == project
# admin
self.preprint.node = None
self.preprint.save()
self.preprint.set_supplemental_node(project, auth=Auth(self.user), save=True)
assert self.preprint.node == project
class TestPreprintProvider(OsfTestCase):
def setUp(self):
super(TestPreprintProvider, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.provider_osf = PreprintProviderFactory(_id='osf')
self.preprint = PreprintFactory(provider=None, is_published=False)
self.provider = PreprintProviderFactory(name='WWEArxiv')
self.provider_one = PreprintProviderFactory(name='DoughnutArxiv')
self.provider_two = PreprintProviderFactory(name='IceCreamArxiv')
self.subject_one = SubjectFactory(provider=self.provider_one)
self.subject_osf = SubjectFactory(provider=self.provider_osf)
def test_add_provider(self):
assert_not_equal(self.preprint.provider, self.provider)
self.preprint.provider = self.provider
self.preprint.save()
self.preprint.reload()
assert_equal(self.preprint.provider, self.provider)
def test_remove_provider(self):
self.preprint.provider = None
self.preprint.save()
self.preprint.reload()
assert_equal(self.preprint.provider, None)
def test_find_provider(self):
self.preprint.provider = self.provider
self.preprint.save()
self.preprint.reload()
assert ('branded', self.provider) == find_preprint_provider(self.preprint)
def test_top_level_subjects(self):
subj_a = SubjectFactory(provider=self.provider, text='A')
subj_b = SubjectFactory(provider=self.provider, text='B')
subj_aa = SubjectFactory(provider=self.provider, text='AA', parent=subj_a)
subj_ab = SubjectFactory(provider=self.provider, text='AB', parent=subj_a)
subj_ba = SubjectFactory(provider=self.provider, text='BA', parent=subj_b)
subj_bb = SubjectFactory(provider=self.provider, text='BB', parent=subj_b)
subj_aaa = SubjectFactory(provider=self.provider, text='AAA', parent=subj_aa)
some_other_provider = PreprintProviderFactory(name='asdfArxiv')
subj_asdf = SubjectFactory(provider=some_other_provider)
assert set(self.provider.top_level_subjects) == set([subj_a, subj_b])
def test_all_subjects(self):
subj_a = SubjectFactory(provider=self.provider, text='A')
subj_b = SubjectFactory(provider=self.provider, text='B')
subj_aa = SubjectFactory(provider=self.provider, text='AA', parent=subj_a)
subj_ab = SubjectFactory(provider=self.provider, text='AB', parent=subj_a)
subj_ba = SubjectFactory(provider=self.provider, text='BA', parent=subj_b)
subj_bb = SubjectFactory(provider=self.provider, text='BB', parent=subj_b)
subj_aaa = SubjectFactory(provider=self.provider, text='AAA', parent=subj_aa)
some_other_provider = PreprintProviderFactory(name='asdfArxiv')
subj_asdf = SubjectFactory(provider=some_other_provider)
assert set(self.provider.all_subjects) == set([subj_a, subj_b, subj_aa, subj_ab, subj_ba, subj_bb, subj_aaa])
def test_highlighted_subjects(self):
subj_a = SubjectFactory(provider=self.provider, text='A')
subj_b = SubjectFactory(provider=self.provider, text='B')
subj_aa = SubjectFactory(provider=self.provider, text='AA', parent=subj_a)
subj_ab = SubjectFactory(provider=self.provider, text='AB', parent=subj_a)
subj_ba = SubjectFactory(provider=self.provider, text='BA', parent=subj_b)
subj_bb = SubjectFactory(provider=self.provider, text='BB', parent=subj_b)
subj_aaa = SubjectFactory(provider=self.provider, text='AAA', parent=subj_aa)
assert self.provider.has_highlighted_subjects is False
assert set(self.provider.highlighted_subjects) == set([subj_a, subj_b])
subj_aaa.highlighted = True
subj_aaa.save()
assert self.provider.has_highlighted_subjects is True
assert set(self.provider.highlighted_subjects) == set([subj_aaa])
def test_change_preprint_provider_custom_taxonomies(self):
subject_two = SubjectFactory(provider=self.provider_two,
bepress_subject=self.subject_one.bepress_subject)
preprint = PreprintFactory(subjects=[[self.subject_one._id]], provider=self.provider_one, creator=self.user)
subject_problems = preprint.map_subjects_between_providers(self.provider_one, self.provider_two, self.auth)
preprint.refresh_from_db()
assert subject_problems == []
assert subject_two in preprint.subjects.all()
def test_change_preprint_provider_from_osf(self):
subject_two = SubjectFactory(provider=self.provider_one,
bepress_subject=self.subject_osf)
preprint = PreprintFactory(subjects=[[self.subject_osf._id]], provider=self.provider_osf, creator=self.user)
subject_problems = preprint.map_subjects_between_providers(self.provider_osf, self.provider_one, self.auth)
preprint.refresh_from_db()
assert subject_problems == []
assert subject_two in preprint.subjects.all()
def test_change_preprint_provider_to_osf(self):
subject_two = SubjectFactory(provider=self.provider_one,
bepress_subject=self.subject_osf)
preprint = PreprintFactory(subjects=[[subject_two._id]], provider=self.provider_one, creator=self.user)
subject_problems = preprint.map_subjects_between_providers(self.provider_one, self.provider_osf, self.auth)
preprint.refresh_from_db()
assert subject_problems == []
assert self.subject_osf in preprint.subjects.all()
def test_change_preprint_provider_problem_subject(self):
subject_two = SubjectFactory(provider=self.provider_one,
bepress_subject=self.subject_osf)
preprint = PreprintFactory(subjects=[[subject_two._id]], provider=self.provider_one, creator=self.user)
subject_problems = preprint.map_subjects_between_providers(self.provider_one, self.provider_two, self.auth)
preprint.refresh_from_db()
assert subject_problems == [subject_two.text]
assert subject_two in preprint.subjects.all()
class TestPreprintIdentifiers(OsfTestCase):
def setUp(self):
super(TestPreprintIdentifiers, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
def test_update_or_create_preprint_identifiers_called(self):
published_preprint = PreprintFactory(is_published=True, creator=self.user)
with mock.patch.object(published_preprint, 'request_identifier_update') as mock_update_doi:
update_or_create_preprint_identifiers(published_preprint)
assert mock_update_doi.called
assert mock_update_doi.call_count == 1
@mock.patch('website.settings.CROSSREF_URL', 'http://test.osf.crossref.test')
def test_correct_doi_client_called(self):
osf_preprint = PreprintFactory(is_published=True, creator=self.user, provider=PreprintProviderFactory())
assert isinstance(osf_preprint.get_doi_client(), CrossRefClient)
ecsarxiv_preprint = PreprintFactory(is_published=True, creator=self.user, provider=PreprintProviderFactory(_id='ecsarxiv'))
assert isinstance(ecsarxiv_preprint.get_doi_client(), ECSArXivCrossRefClient)
def test_qatest_doesnt_make_dois(self):
preprint = PreprintFactory(is_published=True, creator=self.user, provider=PreprintProviderFactory())
preprint.add_tag('qatest', self.auth)
assert not request_identifiers(preprint)
@pytest.mark.enable_implicit_clean
class TestOnPreprintUpdatedTask(OsfTestCase):
def setUp(self):
super(TestOnPreprintUpdatedTask, self).setUp()
self.user = AuthUserFactory()
if len(self.user.fullname.split(' ')) > 2:
# Prevent unexpected keys ('suffix', 'additional_name')
self.user.fullname = 'David Davidson'
self.user.middle_names = ''
self.user.suffix = ''
self.user.save()
self.auth = Auth(user=self.user)
self.preprint = PreprintFactory()
thesis_provider = PreprintProviderFactory(share_publish_type='Thesis')
self.thesis = PreprintFactory(provider=thesis_provider)
for pp in [self.preprint, self.thesis]:
pp.add_tag('preprint', self.auth, save=False)
pp.add_tag('spoderman', self.auth, save=False)
pp.add_unregistered_contributor('BoJack Horseman', 'horse@man.org', Auth(pp.creator))
pp.add_contributor(self.user, visible=False)
pp.save()
pp.creator.given_name = u'ZZYZ'
if len(pp.creator.fullname.split(' ')) > 2:
# Prevent unexpected keys ('suffix', 'additional_name')
pp.creator.fullname = 'David Davidson'
pp.creator.middle_names = ''
pp.creator.suffix = ''
pp.creator.save()
pp.set_subjects([[SubjectFactory()._id]], auth=Auth(pp.creator))
def tearDown(self):
handlers.celery_before_request()
super(TestOnPreprintUpdatedTask, self).tearDown()
def test_update_or_enqueue_on_preprint_updated(self):
# enqueue_postcommit_task automatically calls task in testing now.
# This test modified to stick something in the postcommit_queue manually so
# we can test that the queue is modified properly.
first_subjects = [15]
args = ()
kwargs = {'preprint_id': self.preprint._id, 'old_subjects': first_subjects, 'update_share': False, 'share_type': None, 'saved_fields': ['contributors']}
postcommit_celery_queue().update({'asdfasd': on_preprint_updated.si(*args, **kwargs)})
second_subjects = [16, 17]
update_or_enqueue_on_preprint_updated(
self.preprint._id,
old_subjects=second_subjects,
saved_fields={'title': 'Hello'}
)
updated_task = get_task_from_postcommit_queue(
'website.preprints.tasks.on_preprint_updated',
predicate=lambda task: task.kwargs['preprint_id'] == self.preprint._id
)
assert 'title' in updated_task.kwargs['saved_fields']
assert 'contributors' in updated_task.kwargs['saved_fields']
assert set(first_subjects + second_subjects).issubset(updated_task.kwargs['old_subjects'])
def test_format_preprint(self):
res = format_preprint(self.preprint, self.preprint.provider.share_publish_type)
assert set(gn['@type'] for gn in res) == {'creator', 'contributor', 'throughsubjects', 'subject', 'throughtags', 'tag', 'workidentifier', 'agentidentifier', 'person', 'preprint', 'workrelation', 'creativework'}
nodes = dict(enumerate(res))
preprint = nodes.pop(next(k for k, v in iter(nodes.items()) if v['@type'] == 'preprint'))
assert preprint['title'] == self.preprint.title
assert preprint['description'] == self.preprint.description
assert preprint['is_deleted'] == (not self.preprint.is_published or not self.preprint.is_public or self.preprint.is_preprint_orphan)
assert preprint['date_updated'] == self.preprint.modified.isoformat()
assert preprint['date_published'] == self.preprint.date_published.isoformat()
tags = [nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'tag']
through_tags = [nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'throughtags']
assert sorted(tag['@id'] for tag in tags) == sorted(tt['tag']['@id'] for tt in through_tags)
assert sorted(tag['name'] for tag in tags) == ['preprint', 'spoderman']
subjects = [nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'subject']
through_subjects = [nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'throughsubjects']
s_ids = [s['@id'] for s in subjects]
ts_ids = [ts['subject']['@id'] for ts in through_subjects]
cs_ids = [i for i in set(s.get('central_synonym', {}).get('@id') for s in subjects) if i]
for ts in ts_ids:
assert ts in s_ids
assert ts not in cs_ids # Only aliased subjects are connected to self.preprint
for s in subjects:
subject = Subject.objects.get(text=s['name'])
assert s['uri'].endswith('v2/taxonomies/{}/'.format(subject._id)) # This cannot change
assert set(subject['name'] for subject in subjects) == set([s.text for s in self.preprint.subjects.all()] + [s.bepress_subject.text for s in self.preprint.subjects.filter(bepress_subject__isnull=False)])
people = sorted([nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'person'], key=lambda x: x['given_name'])
expected_people = sorted([{
'@type': 'person',
'given_name': u'BoJack',
'family_name': u'Horseman',
}, {
'@type': 'person',
'given_name': self.user.given_name,
'family_name': self.user.family_name,
}, {
'@type': 'person',
'given_name': self.preprint.creator.given_name,
'family_name': self.preprint.creator.family_name,
}], key=lambda x: x['given_name'])
for i, p in enumerate(expected_people):
expected_people[i]['@id'] = people[i]['@id']
assert people == expected_people
creators = sorted([nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'creator'], key=lambda x: x['order_cited'])
assert creators == [{
'@id': creators[0]['@id'],
'@type': 'creator',
'order_cited': 0,
'cited_as': u'{}'.format(self.preprint.creator.fullname),
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.preprint.creator.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}, {
'@id': creators[1]['@id'],
'@type': 'creator',
'order_cited': 1,
'cited_as': u'BoJack Horseman',
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == u'BoJack'][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
contributors = [nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'contributor']
assert contributors == [{
'@id': contributors[0]['@id'],
'@type': 'contributor',
'cited_as': u'{}'.format(self.user.fullname),
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.user.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
agentidentifiers = {nodes.pop(k)['uri'] for k, v in list(nodes.items()) if v['@type'] == 'agentidentifier'}
assert agentidentifiers == set([
'mailto:' + self.user.username,
'mailto:' + self.preprint.creator.username,
self.user.profile_image_url(),
self.preprint.creator.profile_image_url(),
]) | set(user.absolute_url for user in self.preprint.contributors)
related_work = next(nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'creativework')
assert set(related_work.keys()) == {'@id', '@type'} # Empty except @id and @type
osf_doi = next(nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'workidentifier' and 'doi' in v['uri'] and 'osf.io' in v['uri'])
assert osf_doi['creative_work'] == {'@id': preprint['@id'], '@type': preprint['@type']}
related_doi = next(nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'workidentifier' and 'doi' in v['uri'])
assert related_doi['creative_work'] == related_work
workidentifiers = next(nodes.pop(k) for k, v in nodes.items() if v['@type'] == 'workidentifier')
assert workidentifiers['uri'] == urljoin(settings.DOMAIN, self.preprint._id + '/')
relation = nodes.pop(list(nodes.keys())[0])
assert relation == {'@id': relation['@id'], '@type': 'workrelation', 'related': {'@id': related_work['@id'], '@type': related_work['@type']}, 'subject': {'@id': preprint['@id'], '@type': preprint['@type']}}
assert nodes == {}
def test_format_thesis(self):
res = format_preprint(self.thesis, self.thesis.provider.share_publish_type)
assert set(gn['@type'] for gn in res) == {'creator', 'contributor', 'throughsubjects', 'subject', 'throughtags', 'tag', 'workidentifier', 'agentidentifier', 'person', 'thesis', 'workrelation', 'creativework'}
nodes = dict(enumerate(res))
thesis = nodes.pop(next(k for k, v in nodes.items() if v['@type'] == 'thesis'))
assert thesis['title'] == self.thesis.title
assert thesis['description'] == self.thesis.description
def test_format_preprint_date_modified_node_updated(self):
self.preprint.save()
res = format_preprint(self.preprint, self.preprint.provider.share_publish_type)
nodes = dict(enumerate(res))
preprint = nodes.pop(next(k for k, v in nodes.items() if v['@type'] == 'preprint'))
assert preprint['date_updated'] == self.preprint.modified.isoformat()
def test_format_preprint_nones(self):
self.preprint.tags = []
self.preprint.date_published = None
self.preprint.article_doi = None
self.preprint.set_subjects([], auth=Auth(self.preprint.creator))
res = format_preprint(self.preprint, self.preprint.provider.share_publish_type)
assert self.preprint.provider != 'osf'
assert set(gn['@type'] for gn in res) == {'creator', 'contributor', 'workidentifier', 'agentidentifier', 'person', 'preprint'}
nodes = dict(enumerate(res))
preprint = nodes.pop(next(k for k, v in nodes.items() if v['@type'] == 'preprint'))
assert preprint['title'] == self.preprint.title
assert preprint['description'] == self.preprint.description
assert preprint['is_deleted'] == (not self.preprint.is_published or not self.preprint.is_public or self.preprint.is_preprint_orphan or (self.preprint.deleted or False))
assert preprint['date_updated'] == self.preprint.modified.isoformat()
assert preprint.get('date_published') is None
people = sorted([nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'person'], key=lambda x: x['given_name'])
expected_people = sorted([{
'@type': 'person',
'given_name': u'BoJack',
'family_name': u'Horseman',
}, {
'@type': 'person',
'given_name': self.user.given_name,
'family_name': self.user.family_name,
}, {
'@type': 'person',
'given_name': self.preprint.creator.given_name,
'family_name': self.preprint.creator.family_name,
}], key=lambda x: x['given_name'])
for i, p in enumerate(expected_people):
expected_people[i]['@id'] = people[i]['@id']
assert people == expected_people
creators = sorted([nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'creator'], key=lambda x: x['order_cited'])
assert creators == [{
'@id': creators[0]['@id'],
'@type': 'creator',
'order_cited': 0,
'cited_as': self.preprint.creator.fullname,
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.preprint.creator.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}, {
'@id': creators[1]['@id'],
'@type': 'creator',
'order_cited': 1,
'cited_as': u'BoJack Horseman',
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == u'BoJack'][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
contributors = [nodes.pop(k) for k, v in list(nodes.items()) if v['@type'] == 'contributor']
assert contributors == [{
'@id': contributors[0]['@id'],
'@type': 'contributor',
'cited_as': self.user.fullname,
'agent': {'@id': [p['@id'] for p in people if p['given_name'] == self.user.given_name][0], '@type': 'person'},
'creative_work': {'@id': preprint['@id'], '@type': preprint['@type']},
}]
agentidentifiers = {nodes.pop(k)['uri'] for k, v in list(nodes.items()) if v['@type'] == 'agentidentifier'}
assert agentidentifiers == set([
'mailto:' + self.user.username,
'mailto:' + self.preprint.creator.username,
self.user.profile_image_url(),
self.preprint.creator.profile_image_url(),
]) | set(user.absolute_url for user in self.preprint.contributors)
workidentifiers = {nodes.pop(k)['uri'] for k, v in list(nodes.items()) if v['@type'] == 'workidentifier'}
# URLs should *always* be osf.io/guid/
assert workidentifiers == set([urljoin(settings.DOMAIN, self.preprint._id) + '/', 'https://doi.org/{}'.format(self.preprint.get_identifier('doi').value)])
assert nodes == {}
def test_format_preprint_is_deleted(self):
self.file = OsfStorageFile.create(
target=self.preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
CASES = {
'is_published': (True, False),
'is_published': (False, True),
'is_public': (True, False),
'is_public': (False, True),
'primary_file': (self.file, False),
'primary_file': (None, True),
'deleted': (True, True),
'deleted': (False, False),
}
for key, (value, is_deleted) in CASES.items():
target = self.preprint
for k in key.split('.')[:-1]:
if k:
target = getattr(target, k)
orig_val = getattr(target, key.split('.')[-1])
setattr(target, key.split('.')[-1], value)
res = format_preprint(self.preprint, self.preprint.provider.share_publish_type)
preprint = next(v for v in res if v['@type'] == 'preprint')
assert preprint['is_deleted'] is is_deleted
setattr(target, key.split('.')[-1], orig_val)
def test_format_preprint_is_deleted_true_if_qatest_tag_is_added(self):
res = format_preprint(self.preprint, self.preprint.provider.share_publish_type)
preprint = next(v for v in res if v['@type'] == 'preprint')
assert preprint['is_deleted'] is False
self.preprint.add_tag('qatest', auth=self.auth, save=True)
res = format_preprint(self.preprint, self.preprint.provider.share_publish_type)
preprint = next(v for v in res if v['@type'] == 'preprint')
assert preprint['is_deleted'] is True
def test_unregistered_users_guids(self):
user = UserFactory.build(is_registered=False)
user.save()
node = format_user(user)
assert {x.attrs['uri'] for x in node.get_related()} == {user.absolute_url}
def test_verified_orcid(self):
user = UserFactory.build(is_registered=True)
user.external_identity = {'ORCID': {'fake-orcid': 'VERIFIED'}}
user.save()
node = format_user(user)
assert {x.attrs['uri'] for x in node.get_related()} == {'fake-orcid', user.absolute_url, user.profile_image_url()}
def test_unverified_orcid(self):
user = UserFactory.build(is_registered=True)
user.external_identity = {'ORCID': {'fake-orcid': 'SOMETHINGELSE'}}
user.save()
node = format_user(user)
assert {x.attrs['uri'] for x in node.get_related()} == {user.absolute_url, user.profile_image_url()}
class TestPreprintSaveShareHook(OsfTestCase):
def setUp(self):
super(TestPreprintSaveShareHook, self).setUp()
self.admin = AuthUserFactory()
self.auth = Auth(user=self.admin)
self.provider = PreprintProviderFactory(name='Lars Larson Snowmobiling Experience')
self.project = ProjectFactory(creator=self.admin, is_public=True)
self.subject = SubjectFactory()
self.subject_two = SubjectFactory()
self.file = api_test_utils.create_test_file(self.project, self.admin, 'second_place.pdf')
self.preprint = PreprintFactory(creator=self.admin, filename='second_place.pdf', provider=self.provider, subjects=[[self.subject._id]], project=self.project, is_published=False)
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_unpublished_not_called(self, mock_on_preprint_updated):
self.preprint.save()
assert not mock_on_preprint_updated.called
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_published_called(self, mock_on_preprint_updated):
self.preprint.set_published(True, auth=self.auth, save=True)
assert mock_on_preprint_updated.called
# This covers an edge case where a preprint is forced back to unpublished
# that it sends the information back to share
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_unpublished_called_forced(self, mock_on_preprint_updated):
self.preprint.set_published(True, auth=self.auth, save=True)
self.preprint.is_published = False
self.preprint.save(**{'force_update': True})
assert_equal(mock_on_preprint_updated.call_count, 2)
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_published_subject_change_called(self, mock_on_preprint_updated):
self.preprint.is_published = True
self.preprint.set_subjects([[self.subject_two._id]], auth=self.auth)
assert mock_on_preprint_updated.called
call_args, call_kwargs = mock_on_preprint_updated.call_args
assert 'old_subjects' in mock_on_preprint_updated.call_args[1]
assert call_kwargs.get('old_subjects') == [self.subject.id]
assert [self.subject.id] in mock_on_preprint_updated.call_args[1].values()
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_unpublished_subject_change_not_called(self, mock_on_preprint_updated):
self.preprint.set_subjects([[self.subject_two._id]], auth=self.auth)
assert not mock_on_preprint_updated.called
@mock.patch('website.preprints.tasks.requests')
@mock.patch('website.preprints.tasks.settings.SHARE_URL', 'ima_real_website')
def test_send_to_share_is_true(self, mock_requests):
self.preprint.provider.access_token = 'Snowmobiling'
self.preprint.provider.save()
on_preprint_updated(self.preprint._id)
assert mock_requests.post.called
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_preprint_contributor_changes_updates_preprints_share(self, mock_on_preprint_updated):
preprint = PreprintFactory(is_published=True, creator=self.admin)
assert mock_on_preprint_updated.call_count == 2
user = AuthUserFactory()
preprint.primary_file = self.file
preprint.add_contributor(contributor=user, auth=self.auth, save=True)
assert mock_on_preprint_updated.call_count == 5
preprint.move_contributor(contributor=user, index=0, auth=self.auth, save=True)
assert mock_on_preprint_updated.call_count == 7
data = [{'id': self.admin._id, 'permissions': ADMIN, 'visible': True},
{'id': user._id, 'permissions': WRITE, 'visible': False}]
preprint.manage_contributors(data, auth=self.auth, save=True)
assert mock_on_preprint_updated.call_count == 9
preprint.update_contributor(user, READ, True, auth=self.auth, save=True)
assert mock_on_preprint_updated.call_count == 11
preprint.remove_contributor(contributor=user, auth=self.auth)
assert mock_on_preprint_updated.call_count == 13
@mock.patch('website.preprints.tasks.settings.SHARE_URL', 'a_real_url')
@mock.patch('website.preprints.tasks._async_update_preprint_share.delay')
@mock.patch('website.preprints.tasks.requests')
def test_call_async_update_on_500_failure(self, requests, mock_async):
self.preprint.provider.access_token = 'Snowmobiling'
requests.post.return_value = MockShareResponse(501)
update_preprint_share(self.preprint)
assert mock_async.called
@mock.patch('website.preprints.tasks.settings.SHARE_URL', 'a_real_url')
@mock.patch('website.preprints.tasks.send_desk_share_preprint_error')
@mock.patch('website.preprints.tasks._async_update_preprint_share.delay')
@mock.patch('website.preprints.tasks.requests')
def test_no_call_async_update_on_400_failure(self, requests, mock_async, mock_mail):
self.preprint.provider.access_token = 'Snowmobiling'
requests.post.return_value = MockShareResponse(400)
update_preprint_share(self.preprint)
assert not mock_async.called
assert mock_mail.called
class TestPreprintConfirmationEmails(OsfTestCase):
def setUp(self):
super(TestPreprintConfirmationEmails, self).setUp()
self.user = AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.preprint = PreprintFactory(creator=self.user, project=self.project, provider=PreprintProviderFactory(_id='osf'), is_published=False)
self.preprint.add_contributor(self.write_contrib, permissions=WRITE)
self.preprint_branded = PreprintFactory(creator=self.user, is_published=False)
@mock.patch('website.mails.send_mail')
def test_creator_gets_email(self, send_mail):
self.preprint.set_published(True, auth=Auth(self.user), save=True)
domain = self.preprint.provider.domain or settings.DOMAIN
send_mail.assert_called_with(
self.user.email,
mails.REVIEWS_SUBMISSION_CONFIRMATION,
user=self.user,
mimetype='html',
provider_url='{}preprints/{}'.format(domain, self.preprint.provider._id),
domain=domain,
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
workflow=None,
reviewable=self.preprint,
is_creator=True,
provider_name=self.preprint.provider.name,
no_future_emails=[],
logo=settings.OSF_PREPRINTS_LOGO,
)
assert_equals(send_mail.call_count, 1)
self.preprint_branded.set_published(True, auth=Auth(self.user), save=True)
assert_equals(send_mail.call_count, 2)
class TestPreprintOsfStorage(OsfTestCase):
def setUp(self):
super(TestPreprintOsfStorage, self).setUp()
self.user = UserFactory()
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id).decode()
self.preprint = PreprintFactory(creator=self.user)
self.JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
def test_create_log(self):
action = 'file_added'
path = 'pizza.nii'
nlog = self.preprint.logs.count()
self.preprint.create_waterbutler_log(
auth=Auth(user=self.user),
action=action,
payload={'metadata': {'path': path, 'materialized': path, 'kind': 'file'}},
)
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlog + 1)
assert_equal(
self.preprint.logs.latest().action,
'{0}_{1}'.format('osf_storage', action),
)
assert_equal(
self.preprint.logs.latest().params['path'],
path
)
def build_url(self, **kwargs):
options = {'payload': jwe.encrypt(jwt.encode({'data': dict(dict(
action='download',
nid=self.preprint._id,
provider='osfstorage'), **kwargs),
'exp': timezone.now() + datetime.timedelta(seconds=500),
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), self.JWE_KEY)}
return self.preprint.api_url_for('get_auth', **options)
def test_auth_download(self):
url = self.build_url(cookie=self.cookie)
res = self.app.get(url, auth=Auth(user=self.user))
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['credentials'], self.preprint.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.preprint.serialize_waterbutler_settings())
expected_url = furl.furl(self.preprint.api_url_for('create_waterbutler_log', _absolute=True, _internal=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
class TestCheckPreprintAuth(OsfTestCase):
def setUp(self):
super(TestCheckPreprintAuth, self).setUp()
self.user = AuthUserFactory()
self.preprint = PreprintFactory(creator=self.user)
def test_has_permission(self):
res = views.check_access(self.preprint, Auth(user=self.user), 'upload', None)
assert_true(res)
def test_not_has_permission_read_published(self):
res = views.check_access(self.preprint, Auth(), 'download', None)
assert_true(res)
def test_not_has_permission_logged_in(self):
user2 = AuthUserFactory()
self.preprint.is_published = False
self.preprint.save()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.preprint, Auth(user=user2), 'download', None)
assert_equal(exc_info.exception.code, 403)
def test_not_has_permission_not_logged_in(self):
self.preprint.is_published = False
self.preprint.save()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.preprint, Auth(), 'download', None)
assert_equal(exc_info.exception.code, 401)
def test_check_access_withdrawn_preprint_file(self):
self.preprint.date_withdrawn = timezone.now()
self.preprint.save()
# Unauthenticated
with assert_raises(HTTPError) as exc_info:
views.check_access(self.preprint, Auth(), 'download', None)
assert_equal(exc_info.exception.code, 401)
# Noncontributor
user2 = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.preprint, Auth(user2), 'download', None)
assert_equal(exc_info.exception.code, 403)
# Read contributor
self.preprint.add_contributor(user2, READ, save=True)
with assert_raises(HTTPError) as exc_info:
views.check_access(self.preprint, Auth(user2), 'download', None)
assert_equal(exc_info.exception.code, 403)
# Admin contributor
with assert_raises(HTTPError) as exc_info:
views.check_access(self.preprint, Auth(self.user), 'download', None)
assert_equal(exc_info.exception.code, 403)
class TestPreprintOsfStorageLogs(OsfTestCase):
def setUp(self):
super(TestPreprintOsfStorageLogs, self).setUp()
self.user = AuthUserFactory()
self.user_non_contrib = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.preprint = PreprintFactory(creator=self.user)
self.file = OsfStorageFile.create(
target=self.preprint,
path='/testfile',
_id='testfile',
name='testfile',
materialized_path='/testfile'
)
self.file.save()
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
def build_payload(self, metadata, **kwargs):
options = dict(
auth={'id': self.user._id},
action='create',
provider='osfstorage',
metadata=metadata,
time=time.time() + 1000,
)
options.update(kwargs)
options = {
key: value
for key, value in options.items()
if value is not None
}
message, signature = signing.default_signer.sign_payload(options)
return {
'payload': message,
'signature': signature,
}
def test_add_log(self):
path = 'pizza'
url = self.preprint.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'file', 'path': path})
nlogs = self.preprint.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlogs + 1)
def test_add_log_missing_args(self):
path = 'pizza'
url = self.preprint.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'file', 'path': path}, auth=None)
nlogs = self.preprint.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlogs)
def test_add_log_no_user(self):
path = 'pizza'
url = self.preprint.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'file', 'path': path}, auth={'id': None})
nlogs = self.preprint.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlogs)
def test_add_log_bad_action(self):
path = 'pizza'
url = self.preprint.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'file', 'path': path}, action='dance')
nlogs = self.preprint.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlogs)
def test_action_file_rename(self):
url = self.preprint.api_url_for('create_waterbutler_log')
payload = self.build_payload(
action='rename',
metadata={
'path': 'foo',
'nid': self.preprint._id,
'materialized': 'foo',
'kind': 'file'
},
source={
'materialized': 'foo',
'provider': 'osfstorage',
'node': {'_id': self.preprint._id},
'name': 'new.txt',
'kind': 'file',
},
destination={
'path': 'foo',
'materialized': 'foo',
'provider': 'osfstorage',
'node': {'_id': self.preprint._id},
'name': 'old.txt',
'kind': 'file',
},
)
self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'}
)
self.preprint.reload()
assert_equal(
self.preprint.logs.latest().action,
'osf_storage_addon_file_renamed',
)
def test_action_downloads_contrib(self):
url = self.preprint.api_url_for('create_waterbutler_log')
download_actions=('download_file', 'download_zip')
wb_url = settings.WATERBUTLER_URL + '?version=1'
for action in download_actions:
payload = self.build_payload(metadata={'path': '/testfile',
'nid': self.preprint._id},
action_meta={'is_mfr_render': False},
request_meta={'url': wb_url},
action=action)
nlogs = self.preprint.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=False,
)
assert_equal(res.status_code, 200)
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlogs)
def test_add_file_osfstorage_log(self):
path = 'pizza'
url = self.preprint.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'file', 'path': path})
nlogs = self.preprint.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlogs + 1)
assert('urls' in self.preprint.logs.filter(action='osf_storage_file_added')[0].params)
def test_add_folder_osfstorage_log(self):
path = 'pizza'
url = self.preprint.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'folder', 'path': path})
nlogs = self.preprint.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.preprint.reload()
assert_equal(self.preprint.logs.count(), nlogs + 1)
assert('urls' not in self.preprint.logs.filter(action='osf_storage_file_added')[0].params)
@pytest.mark.django_db
class TestWithdrawnPreprint:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def unpublished_preprint_pre_mod(self):
return PreprintFactory(provider__reviews_workflow='pre-moderation', is_published=False)
@pytest.fixture()
def preprint_pre_mod(self):
return PreprintFactory(provider__reviews_workflow='pre-moderation')
@pytest.fixture()
def unpublished_preprint_post_mod(self):
return PreprintFactory(provider__reviews_workflow='post-moderation', is_published=False)
@pytest.fixture()
def preprint_post_mod(self):
return PreprintFactory(provider__reviews_workflow='post-moderation')
@pytest.fixture()
def preprint(self):
return PreprintFactory()
@pytest.fixture()
def admin(self):
admin = AuthUserFactory()
osf_admin = Group.objects.get(name='osf_admin')
admin.groups.add(osf_admin)
return admin
@pytest.fixture()
def moderator(self, preprint_pre_mod, preprint_post_mod):
moderator = AuthUserFactory()
preprint_pre_mod.provider.add_to_group(moderator, 'moderator')
preprint_pre_mod.provider.save()
preprint_post_mod.provider.add_to_group(moderator, 'moderator')
preprint_post_mod.provider.save()
return moderator
@pytest.fixture()
def make_withdrawal_request(self, user):
def withdrawal_request(target):
request = PreprintRequestFactory(
creator=user,
target=target,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value)
request.run_submit(user)
return request
return withdrawal_request
@pytest.fixture()
def crossref_client(self):
return crossref.CrossRefClient(base_url='http://test.osf.crossref.test')
def test_withdrawn_preprint(self, user, preprint, unpublished_preprint_pre_mod, unpublished_preprint_post_mod):
# test_ever_public
# non-moderated
assert preprint.ever_public
# pre-mod
unpublished_preprint_pre_mod.run_submit(user)
assert not unpublished_preprint_pre_mod.ever_public
unpublished_preprint_pre_mod.run_reject(user, 'it')
unpublished_preprint_pre_mod.reload()
assert not unpublished_preprint_pre_mod.ever_public
unpublished_preprint_pre_mod.run_accept(user, 'it')
unpublished_preprint_pre_mod.reload()
assert unpublished_preprint_pre_mod.ever_public
# post-mod
unpublished_preprint_post_mod.run_submit(user)
assert unpublished_preprint_post_mod.ever_public
# test_cannot_set_ever_public_to_False
unpublished_preprint_pre_mod.ever_public = False
unpublished_preprint_post_mod.ever_public = False
preprint.ever_public = False
with pytest.raises(ValidationError):
preprint.save()
with pytest.raises(ValidationError):
unpublished_preprint_pre_mod.save()
with pytest.raises(ValidationError):
unpublished_preprint_post_mod.save()
def test_crossref_status_is_updated(self, make_withdrawal_request, preprint, preprint_post_mod, preprint_pre_mod, moderator, admin, crossref_client):
# test_non_moderated_preprint
assert preprint.verified_publishable
assert crossref_client.get_status(preprint) == 'public'
withdrawal_request = make_withdrawal_request(preprint)
withdrawal_request.run_accept(admin, withdrawal_request.comment)
assert preprint.is_retracted
assert preprint.verified_publishable
assert crossref_client.get_status(preprint) == 'unavailable'
# test_post_moderated_preprint
assert preprint_post_mod.verified_publishable
assert crossref_client.get_status(preprint_post_mod) == 'public'
withdrawal_request = make_withdrawal_request(preprint_post_mod)
withdrawal_request.run_accept(moderator, withdrawal_request.comment)
assert preprint_post_mod.is_retracted
assert preprint_post_mod.verified_publishable
assert crossref_client.get_status(preprint_post_mod) == 'unavailable'
# test_pre_moderated_preprint
assert preprint_pre_mod.verified_publishable
assert crossref_client.get_status(preprint_pre_mod) == 'public'
withdrawal_request = make_withdrawal_request(preprint_pre_mod)
withdrawal_request.run_accept(moderator, withdrawal_request.comment)
assert preprint_pre_mod.is_retracted
assert preprint_pre_mod.verified_publishable
assert crossref_client.get_status(preprint_pre_mod) == 'unavailable'
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
options:
group:
type: str
description:
- Name of the Jenkins group on the OS.
default: jenkins
jenkins_home:
type: path
description:
- Home directory of the Jenkins user.
default: /var/lib/jenkins
mode:
type: raw
description:
- File mode applied on versioned plugins.
default: '0644'
name:
type: str
description:
- Plugin name.
required: yes
owner:
type: str
description:
- Name of the Jenkins user on the OS.
default: jenkins
state:
type: str
description:
- Desired plugin state.
- If the C(latest) is set, the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
timeout:
type: int
description:
- Server connection timeout in secs.
default: 30
updates_expiration:
type: int
description:
- Number of seconds after which a new copy of the I(update-center.json)
file is downloaded. This is used to avoid the need to download the
plugin to calculate its checksum when C(latest) is specified.
- Set it to C(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
C(latest) is specified.
default: 86400
updates_url:
type: list
elements: str
description:
- A list of base URL(s) to retrieve I(update-center.json), and direct plugin files from.
- This can be a list since community.general 3.3.0.
default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io']
update_json_url_segment:
type: list
elements: str
description:
- A list of URL segment(s) to retrieve the update center json file from.
default: ['update-center.json', 'updates/update-center.json']
version_added: 3.3.0
latest_plugins_url_segments:
type: list
elements: str
description:
- Path inside the I(updates_url) to get latest plugins from.
default: ['latest']
version_added: 3.3.0
versioned_plugins_url_segments:
type: list
elements: str
description:
- Path inside the I(updates_url) to get specific version of plugins from.
default: ['download/plugins', 'plugins']
version_added: 3.3.0
url:
type: str
description:
- URL of the Jenkins server.
default: http://localhost:8080
version:
type: str
description:
- Plugin version number.
- If this option is specified, all plugin dependencies must be installed
manually.
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
- Quote the version to prevent the value to be interpreted as float. For
example if C(1.20) would be unquoted, it would become C(1.2).
with_dependencies:
description:
- Defines whether to install plugin dependencies.
- This option takes effect only if the I(version) is not defined.
type: bool
default: yes
notes:
- Plugin installation should be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- It's necessary to notify the handler or call the I(service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkins service was
successfully restarted after the plugin installation.
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
extends_documentation_fragment:
- url
- files
'''
EXAMPLES = '''
- name: Install plugin
community.general.jenkins_plugin:
name: build-pipeline-plugin
- name: Install plugin without its dependencies
community.general.jenkins_plugin:
name: build-pipeline-plugin
with_dependencies: no
- name: Make sure the plugin is always up-to-date
community.general.jenkins_plugin:
name: token-macro
state: latest
- name: Install specific version of the plugin
community.general.jenkins_plugin:
name: token-macro
version: "1.15"
- name: Pin the plugin
community.general.jenkins_plugin:
name: token-macro
state: pinned
- name: Unpin the plugin
community.general.jenkins_plugin:
name: token-macro
state: unpinned
- name: Enable the plugin
community.general.jenkins_plugin:
name: token-macro
state: enabled
- name: Disable the plugin
community.general.jenkins_plugin:
name: token-macro
state: disabled
- name: Uninstall plugin
community.general.jenkins_plugin:
name: build-pipeline-plugin
state: absent
#
# Example of how to authenticate
#
- name: Install plugin
community.general.jenkins_plugin:
name: build-pipeline-plugin
url_username: admin
url_password: p4ssw0rd
url: http://localhost:8888
#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
hosts: jenkins-master
vars:
my_jenkins_plugins:
token-macro:
enabled: yes
build-pipeline-plugin:
version: "1.4.9"
pinned: no
enabled: yes
tasks:
- name: Install plugins without a specific version
community.general.jenkins_plugin:
name: "{{ item.key }}"
register: my_jenkins_plugin_unversioned
when: >
'version' not in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Install plugins with a specific version
community.general.jenkins_plugin:
name: "{{ item.key }}"
version: "{{ item.value['version'] }}"
register: my_jenkins_plugin_versioned
when: >
'version' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Initiate the fact
ansible.builtin.set_fact:
jenkins_restart_required: no
- name: Check if restart is required by any of the versioned plugins
ansible.builtin.set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_versioned.results }}"
- name: Check if restart is required by any of the unversioned plugins
ansible.builtin.set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_unversioned.results }}"
- name: Restart Jenkins if required
ansible.builtin.service:
name: jenkins
state: restarted
when: jenkins_restart_required
- name: Wait for Jenkins to start up
ansible.builtin.uri:
url: http://localhost:8080
status_code: 200
timeout: 5
register: jenkins_service_status
# Keep trying for 5 mins in 5 sec intervals
retries: 60
delay: 5
until: >
'status' in jenkins_service_status and
jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
ansible.builtin.set_fact:
jenkins_restart_required: no
when: jenkins_restart_required
- name: Plugin pinning
community.general.jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
when: >
'pinned' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Plugin enabling
community.general.jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
when: >
'enabled' in item.value
with_dict: "{{ my_jenkins_plugins }}"
'''
RETURN = '''
plugin:
description: plugin name
returned: success
type: str
sample: build-pipeline-plugin
state:
description: state of the target, after execution
returned: success
type: str
sample: "present"
'''
from ansible.module_utils.basic import AnsibleModule, to_bytes
from ansible.module_utils.six.moves import http_cookiejar as cookiejar
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url, url_argument_spec
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common.text.converters import to_native
import base64
import hashlib
import io
import json
import os
import tempfile
import time
class FailedInstallingWithPluginManager(Exception):
pass
class JenkinsPlugin(object):
def __init__(self, module):
# To be able to call fail_json
self.module = module
# Shortcuts for the params
self.params = self.module.params
self.url = self.params['url']
self.timeout = self.params['timeout']
# Crumb
self.crumb = {}
# Cookie jar for crumb session
self.cookies = None
if self._csrf_enabled():
self.cookies = cookiejar.LWPCookieJar()
self.crumb = self._get_crumb()
# Get list of installed plugins
self._get_installed_plugins()
def _csrf_enabled(self):
csrf_data = self._get_json_data(
"%s/%s" % (self.url, "api/json"), 'CSRF')
if 'useCrumbs' not in csrf_data:
self.module.fail_json(
msg="Required fields not found in the Crumbs response.",
details=csrf_data)
return csrf_data['useCrumbs']
def _get_json_data(self, url, what, **kwargs):
# Get the JSON data
r = self._get_url_data(url, what, **kwargs)
# Parse the JSON data
try:
json_data = json.loads(to_native(r.read()))
except Exception as e:
self.module.fail_json(
msg="Cannot parse %s JSON data." % what,
details=to_native(e))
return json_data
def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, **kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
errors = {}
for url in urls:
err_msg = None
try:
self.module.debug("fetching url: %s" % url)
response, info = fetch_url(
self.module, url, timeout=self.timeout, cookies=self.cookies,
headers=self.crumb, **kwargs)
if info['status'] == 200:
return response
else:
err_msg = ("%s. fetching url %s failed. response code: %s" % (msg_status, url, info['status']))
if info['status'] > 400: # extend error message
err_msg = "%s. response body: %s" % (err_msg, info['body'])
except Exception as e:
err_msg = "%s. fetching url %s failed. error msg: %s" % (msg_status, url, to_native(e))
finally:
if err_msg is not None:
self.module.debug(err_msg)
errors[url] = err_msg
# failed on all urls
self.module.fail_json(msg=msg_exception, details=errors)
def _get_url_data(
self, url, what=None, msg_status=None, msg_exception=None,
dont_fail=False, **kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
# Get the URL data
try:
response, info = fetch_url(
self.module, url, timeout=self.timeout, cookies=self.cookies,
headers=self.crumb, **kwargs)
if info['status'] != 200:
if dont_fail:
raise FailedInstallingWithPluginManager(info['msg'])
else:
self.module.fail_json(msg=msg_status, details=info['msg'])
except Exception as e:
if dont_fail:
raise FailedInstallingWithPluginManager(e)
else:
self.module.fail_json(msg=msg_exception, details=to_native(e))
return response
def _get_crumb(self):
crumb_data = self._get_json_data(
"%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
ret = {
crumb_data['crumbRequestField']: crumb_data['crumb']
}
else:
self.module.fail_json(
msg="Required fields not found in the Crum response.",
details=crumb_data)
return ret
def _get_installed_plugins(self):
plugins_data = self._get_json_data(
"%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
'list of plugins')
# Check if we got valid data
if 'plugins' not in plugins_data:
self.module.fail_json(msg="No valid plugin data found.")
# Create final list of installed/pined plugins
self.is_installed = False
self.is_pinned = False
self.is_enabled = False
for p in plugins_data['plugins']:
if p['shortName'] == self.params['name']:
self.is_installed = True
if p['pinned']:
self.is_pinned = True
if p['enabled']:
self.is_enabled = True
break
def _install_with_plugin_manager(self):
if not self.module.check_mode:
# Install the plugin (with dependencies)
install_script = (
'd = Jenkins.instance.updateCenter.getPlugin("%s")'
'.deploy(); d.get();' % self.params['name'])
if self.params['with_dependencies']:
install_script = (
'Jenkins.instance.updateCenter.getPlugin("%s")'
'.getNeededDependencies().each{it.deploy()}; %s' % (
self.params['name'], install_script))
script_data = {
'script': install_script
}
data = urlencode(script_data)
# Send the installation request
r = self._get_url_data(
"%s/scriptText" % self.url,
msg_status="Cannot install plugin.",
msg_exception="Plugin installation has failed.",
data=data,
dont_fail=True)
hpi_file = '%s/plugins/%s.hpi' % (
self.params['jenkins_home'],
self.params['name'])
if os.path.isfile(hpi_file):
os.remove(hpi_file)
def install(self):
changed = False
plugin_file = (
'%s/plugins/%s.jpi' % (
self.params['jenkins_home'],
self.params['name']))
if not self.is_installed and self.params['version'] in [None, 'latest']:
try:
self._install_with_plugin_manager()
changed = True
except FailedInstallingWithPluginManager: # Fallback to manually downloading the plugin
pass
if not changed:
# Check if the plugin directory exists
if not os.path.isdir(self.params['jenkins_home']):
self.module.fail_json(
msg="Jenkins home directory doesn't exist.")
checksum_old = None
if os.path.isfile(plugin_file):
# Make the checksum of the currently installed plugin
with open(plugin_file, 'rb') as plugin_fh:
plugin_content = plugin_fh.read()
checksum_old = hashlib.sha1(plugin_content).hexdigest()
if self.params['version'] in [None, 'latest']:
# Take latest version
plugin_urls = self._get_latest_plugin_urls()
else:
# Take specific version
plugin_urls = self._get_versioned_plugin_urls()
if (
self.params['updates_expiration'] == 0 or
self.params['version'] not in [None, 'latest'] or
checksum_old is None):
# Download the plugin file directly
r = self._download_plugin(plugin_urls)
# Write downloaded plugin into file if checksums don't match
if checksum_old is None:
# No previously installed plugin
if not self.module.check_mode:
self._write_file(plugin_file, r)
changed = True
else:
# Get data for the MD5
data = r.read()
# Make new checksum
checksum_new = hashlib.sha1(data).hexdigest()
# If the checksum is different from the currently installed
# plugin, store the new plugin
if checksum_old != checksum_new:
if not self.module.check_mode:
self._write_file(plugin_file, data)
changed = True
elif self.params['version'] == 'latest':
# Check for update from the updates JSON file
plugin_data = self._download_updates()
# If the latest version changed, download it
if checksum_old != to_bytes(plugin_data['sha1']):
if not self.module.check_mode:
r = self._download_plugin(plugin_urls)
self._write_file(plugin_file, r)
changed = True
# Change file attributes if needed
if os.path.isfile(plugin_file):
params = {
'dest': plugin_file
}
params.update(self.params)
file_args = self.module.load_file_common_arguments(params)
if not self.module.check_mode:
# Not sure how to run this in the check mode
changed = self.module.set_fs_attributes_if_different(
file_args, changed)
else:
# See the comment above
changed = True
return changed
def _get_latest_plugin_urls(self):
urls = []
for base_url in self.params['updates_url']:
for update_segment in self.params['latest_plugins_url_segments']:
urls.append("{0}/{1}/{2}.hpi".format(base_url, update_segment, self.params['name']))
return urls
def _get_versioned_plugin_urls(self):
urls = []
for base_url in self.params['updates_url']:
for versioned_segment in self.params['versioned_plugins_url_segments']:
urls.append("{0}/{1}/{2}/{3}/{2}.hpi".format(base_url, versioned_segment, self.params['name'], self.params['version']))
return urls
def _get_update_center_urls(self):
urls = []
for base_url in self.params['updates_url']:
for update_json in self.params['update_json_url_segment']:
urls.append("{0}/{1}".format(base_url, update_json))
return urls
def _download_updates(self):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_file = "%s/%s" % (updates_dir, updates_filename)
download_updates = True
# Check if we need to download new updates file
if os.path.isfile(updates_file):
# Get timestamp when the file was changed last time
ts_file = os.stat(updates_file).st_mtime
ts_now = time.time()
if ts_now - ts_file < self.params['updates_expiration']:
download_updates = False
updates_file_orig = updates_file
# Download the updates file if needed
if download_updates:
urls = self._get_update_center_urls()
# Get the data
r = self._get_urls_data(
urls,
msg_status="Remote updates not found.",
msg_exception="Updates download failed.")
# Write the updates file
update_fd, updates_file = tempfile.mkstemp()
os.write(update_fd, r.read())
try:
os.close(update_fd)
except IOError as e:
self.module.fail_json(
msg="Cannot close the tmp updates file %s." % updates_file,
details=to_native(e))
# Open the updates file
try:
f = io.open(updates_file, encoding='utf-8')
except IOError as e:
self.module.fail_json(
msg="Cannot open temporal updates file.",
details=to_native(e))
i = 0
for line in f:
# Read only the second line
if i == 1:
try:
data = json.loads(line)
except Exception as e:
self.module.fail_json(
msg="Cannot load JSON data from the tmp updates file.",
details=to_native(e))
break
i += 1
# Move the updates file to the right place if we could read it
if download_updates:
# Make sure the destination directory exists
if not os.path.isdir(updates_dir):
try:
os.makedirs(updates_dir, int('0700', 8))
except OSError as e:
self.module.fail_json(
msg="Cannot create temporal directory.",
details=to_native(e))
self.module.atomic_move(updates_file, updates_file_orig)
# Check if we have the plugin data available
if 'plugins' not in data or self.params['name'] not in data['plugins']:
self.module.fail_json(
msg="Cannot find plugin data in the updates file.")
return data['plugins'][self.params['name']]
def _download_plugin(self, plugin_urls):
# Download the plugin
return self._get_urls_data(
plugin_urls,
msg_status="Plugin not found.",
msg_exception="Plugin download failed.")
def _write_file(self, f, data):
# Store the plugin into a temp file and then move it
tmp_f_fd, tmp_f = tempfile.mkstemp()
if isinstance(data, (text_type, binary_type)):
os.write(tmp_f_fd, data)
else:
os.write(tmp_f_fd, data.read())
try:
os.close(tmp_f_fd)
except IOError as e:
self.module.fail_json(
msg='Cannot close the temporal plugin file %s.' % tmp_f,
details=to_native(e))
# Move the file onto the right place
self.module.atomic_move(tmp_f, f)
def uninstall(self):
changed = False
# Perform the action
if self.is_installed:
if not self.module.check_mode:
self._pm_query('doUninstall', 'Uninstallation')
changed = True
return changed
def pin(self):
return self._pinning('pin')
def unpin(self):
return self._pinning('unpin')
def _pinning(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'pin' and not self.is_pinned or
action == 'unpin' and self.is_pinned):
# Perform the action
if not self.module.check_mode:
self._pm_query(action, "%sning" % action.capitalize())
changed = True
return changed
def enable(self):
return self._enabling('enable')
def disable(self):
return self._enabling('disable')
def _enabling(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'enable' and not self.is_enabled or
action == 'disable' and self.is_enabled):
# Perform the action
if not self.module.check_mode:
self._pm_query(
"make%sd" % action.capitalize(),
"%sing" % action[:-1].capitalize())
changed = True
return changed
def _pm_query(self, action, msg):
url = "%s/pluginManager/plugin/%s/%s" % (
self.params['url'], self.params['name'], action)
# Send the request
self._get_url_data(
url,
msg_status="Plugin not found. %s" % url,
msg_exception="%s has failed." % msg,
method="POST")
def main():
# Module arguments
argument_spec = url_argument_spec()
argument_spec.update(
group=dict(type='str', default='jenkins'),
jenkins_home=dict(type='path', default='/var/lib/jenkins'),
mode=dict(default='0644', type='raw'),
name=dict(type='str', required=True),
owner=dict(type='str', default='jenkins'),
state=dict(
choices=[
'present',
'absent',
'pinned',
'unpinned',
'enabled',
'disabled',
'latest'],
default='present'),
timeout=dict(default=30, type="int"),
updates_expiration=dict(default=86400, type="int"),
updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io',
'http://mirrors.jenkins.io']),
update_json_url_segment=dict(type="list", elements="str", default=['update-center.json',
'updates/update-center.json']),
latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']),
versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']),
url=dict(default='http://localhost:8080'),
url_password=dict(no_log=True),
version=dict(),
with_dependencies=dict(default=True, type='bool'),
)
# Module settings
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Force basic authentication
module.params['force_basic_auth'] = True
# Convert timeout to float
try:
module.params['timeout'] = float(module.params['timeout'])
except ValueError as e:
module.fail_json(
msg='Cannot convert %s to float.' % module.params['timeout'],
details=to_native(e))
# Set version to latest if state is latest
if module.params['state'] == 'latest':
module.params['state'] = 'present'
module.params['version'] = 'latest'
# Create some shortcuts
name = module.params['name']
state = module.params['state']
# Initial change state of the task
changed = False
# Instantiate the JenkinsPlugin object
jp = JenkinsPlugin(module)
# Perform action depending on the requested state
if state == 'present':
changed = jp.install()
elif state == 'absent':
changed = jp.uninstall()
elif state == 'pinned':
changed = jp.pin()
elif state == 'unpinned':
changed = jp.unpin()
elif state == 'enabled':
changed = jp.enable()
elif state == 'disabled':
changed = jp.disable()
# Print status of the change
module.exit_json(changed=changed, plugin=name, state=state)
if __name__ == '__main__':
main()
|
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xlrd
from openpyxl import load_workbook
import re
import collections
import json
LAB_URLS = {'adolfo-ferrando':['Lab Web Site', 'http://ferrandolab.org/'],
'christine-chio':['Lab Web Site', 'https://chiolab.com/'],
'katia-basso':['Lab Web Site', 'https://rdf-lab.org/']}
def sort_names(names):
name_map = collections.defaultdict(lambda: collections.defaultdict(list))
for name in names:
names = name.split(' ')
last = names[-1]
first = ' '.join(names[0:-1])
name_map[last][first].append(name)
ret = []
for last in sorted(name_map):
for first in sorted(name_map[last]):
ret.extend(name_map[last][first])
return ret
df = pd.read_csv('ICG_Directory_3.13.20.txt', sep='\t', keep_default_na=False)
id_map = {}
lab_map = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict()))
people_map = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(str))))
faclabmap = {}
new_lab = False
lab_group = 'Director'
for i in range(15, df.shape[0]):
name = df.iloc[i, 0]
name = name.strip()
name = re.sub(r' +', ' ', name)
if name == '':
new_lab = True
continue
nl = name.lower()
letters = []
if 'md' in nl:
letters.append('MD')
if 'phd' in nl:
letters.append('PhD')
name = re.sub(r' *, *', ',', name)
name = name.replace(',PhD', '')
name = name.replace(' PhD', '')
name = name.replace(',MD', '')
alt = ''
matcher = re.search(r'\((.+?)\)', name)
if matcher:
alt = matcher.group(1)
name = re.sub(r' *\(.+?\) *', '', name)
names = name.split(',')
first_name = names[-1]
last_name = names[0]
formatted_name = '{} {}'.format(first_name, last_name)
id = '{}-{}'.format(first_name.lower(), last_name.lower())
id = id.replace('\'', '')
id = id.replace(' ', '-')
id = id.replace('.', '')
if new_lab:
current_lab = {'id':id,
'name':formatted_name,
'Faculty':[],
'Research Staff':[],
'Graduate Students':[],
'Students':[]}
new_lab = False
if i == 16:
lab_group = 'Director'
else:
lab_group = 'Faculty' #'Principal Investigators'
id_map[formatted_name] = id
title = df.iloc[i, 1]
title = title.replace('Prof ', 'Professor ')
title = title.replace('Assoc ', 'Associate ')
title = title.replace('GRA', 'Graduate Student')
print(formatted_name, title, lab_group)
if 'Prof' in title:
# Map multiple faculoty to same lab if necessary
lab_map[lab_group][formatted_name] = current_lab
current_lab['Faculty'].append(formatted_name)
elif 'Graduate' in title:
current_lab['Graduate Students'].append(formatted_name)
elif 'Student' in title:
current_lab['Students'].append(formatted_name)
else:
current_lab['Research Staff'].append(formatted_name)
phone = df.iloc[i, 2]
phone = phone.replace('cell: ', '')
fax = df.iloc[i, 3]
email = df.iloc[i, 4]
if '@' not in email:
email = '{}@cumc.columbia.edu'.format(email)
uni = re.sub(r'@.+', '', email)
room = df.iloc[i, 5]
url = LAB_URLS.get(id, [])
#
# Create markdown
#
g = 'Research Staff'
#if 'Research' in title or 'Postdoc' in title or 'Instructor' in title:
# t = 'Research Staff'
if 'Student' in title:
g = 'Students'
if 'GRA' in title or 'Grad' in title:
g = 'Graduate Students'
if 'Professor' in title:
g = 'Faculty'
faclabmap[id] = current_lab['id']
# f = open('people/{}.md'.format(id), 'w')
# print('---', file=f)
# print('id: "{}"'.format(id), file=f)
# print('firstName: "{}"'.format(first_name), file=f)
# print('lastName: "{}"'.format(last_name), file=f)
# print('postNominalLetters: "{}"'.format(' '.join(letters)), file=f)
# print('titles: ["{}"]'.format(title), file=f)
# print('phone: "{}"'.format(phone), file=f)
# print('fax: "{}"'.format(fax), file=f)
# print('email: "{}"'.format(email), file=f)
# print('room: "{}"'.format(room), file=f)
# print('researchAreas: []', file=f)
# print('pubmed: "https://pubmed.ncbi.nlm.nih.gov/?term={}+{}%5BAuthor%5D"'.format(last_name, first_name[0]), file=f)
# if (len(url) > 0):
# print('url: "{}::{}"'.format(url[0], url[1]), file=f)
# else:
# print('url: ""', file=f)
# print('tags: ["page-format::short", "publication-format::recent"]'.format(url), file=f)
# print('---', file=f)
# f.close()
people_map[g][last_name][first_name] = id
#
# Sorted map of labs to people
#
GROUPS = ['Director', 'Faculty']
SUB_GROUPS = ['Faculty', 'Research Staff', 'Graduate Students', 'Students']
all_groups = []
all_lab_map = {}
for g in GROUPS:
group = {'name':g, 'people': []}
faculty_names = sort_names(lab_map[g])
for name in faculty_names:
# f = open('faculty/{}.md'.format(id_map[name]), 'w')
# print('---', file=f)
# print('id: "{}"'.format(id_map[name]), file=f)
# print('labId: "{}"'.format(lab_map[g][name]['id']), file=f)
# print('name: "{}"'.format(name), file=f)
# print('group: "{}"'.format(g), file=f)
# print('phone: "{}"'.format(''), file=f)
# print('fax: "{}"'.format(''), file=f)
# print('email: "{}"'.format(''), file=f)
# print('room: "{}"'.format(''), file=f)
# print('url: ""'.format(''), file=f)
# print('tags: []'.format(''), file=f)
# print('---', file=f)
# f.close()
lab = {'id':id_map[name], 'name':name, 'groups':[]} #, 'divisions': []}
for sg in SUB_GROUPS:
member_names = sort_names(lab_map[g][name][sg])
#division['people'] = [id_map[name] for name in member_names]
lab['groups'].append({'name':sg, 'people':[id_map[name] for name in member_names]})
#lab['divisions'].append(division)
#group['faculty'].append({'personId':id_map[name], 'labId':lab_map[g][name]['id']})
group['people'].append({'person':id_map[name], 'lab':faclabmap[id_map[name]]})
# Lab names only, not the faculty mapping to the lab
if name == lab_map[g][name]['name']:
all_lab_map[lab_map[g][name]['name']] = lab
all_groups.append(group)
all_labs = [all_lab_map[name] for name in sort_names(all_lab_map)]
with open('faculty.json', 'w') as f:
json.dump(all_groups, f, indent=2)
with open('labs.json', 'w') as f:
json.dump(all_labs, f, indent=2)
sorted_people = collections.defaultdict(list)
for sg in SUB_GROUPS:
for ln in sorted(people_map[sg]):
for fn in sorted(people_map[sg][ln]):
sorted_people[sg].append(people_map[sg][ln][fn])
with open('people.json', 'w') as f:
json.dump(sorted_people, f, indent=2)
|
import csv
with open('TESTE.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
csv_reader.__next__ ()
print('{:^10}|{:^46}| {:^58}|'.format('NF-e', 'CHAVE NF-E', 'RAZÃO SOCIAL'))
print('{:^10}|{:^46}| {:^58}|'.format('-' * 8, '-' * 44 , '-' * 57))
for c in csv_reader:
print('{:^10}|{:^46}| {:<58}|'.format(c[4], c[15],c[7]))
|
import keras
from keras.layers import Activation
from keras.layers import Conv2D, BatchNormalization, Dense, Flatten, Reshape
class CNN:
def __init__(self):
self.batch_size = 64
self.epoch = 2
def train(self, x_train, y_train):
self.model = keras.models.Sequential()
self.model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same', input_shape=(9, 9, 1)))
self.model.add(BatchNormalization())
self.model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
self.model.add(BatchNormalization())
self.model.add(Conv2D(128, kernel_size=(1, 1), activation='relu', padding='same'))
self.model.add(Flatten())
self.model.add(Dense(81*9))
self.model.add(Reshape((-1, 9)))
self.model.add(Activation('softmax'))
adam = keras.optimizers.adam(lr=.001)
self.model.compile( loss='sparse_categorical_crossentropy', optimizer=adam)
print(self.model.fit(x_train, y_train, batch_size=self.batch_size, epochs=self.epoch))
self.model.save('cnn.model')
if __name__ == "__main__":
pass
|
import binascii
from flask import Flask, jsonify, render_template
import Crypto
import Crypto.Random
from Crypto.PublicKey import RSA
class Transaction:
def __init__(self, sender_address, sender_private_key, recipient_address, value):
self.sender_address = sender_address
self.sender_private_key = sender_private_key
self.recipient_address = recipient_address
self.value = value
app = Flask(__name__)
@app.route('/')
def index():
return render_template('wallet_blockchain.html')
@app.route('/generate/transaction', methods=['POST'])
def generate_transaction():
return 'Done!'
@app.route('/wallet/new')
def new_wallet():
random_gen = Crypto.Random.new().read
private_key = RSA.generate(1024, random_gen)
public_key = private_key.publickey()
response = {
'private_key': binascii.hexlify(private_key.export_key(format('DER'))).decode('ascii'),
'public_key': binascii.hexlify(public_key.export_key(format('DER'))).decode('ascii')
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
# node to be replaced by urls
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=8081, type=int, help="port to listen to")
args = parser.parse_args()
port = args.port
app.run(host='127.0.0.1', port=port, debug=True)
|
import numpy as np
import argparse
def gene_conditional_probs(X,thresh):
CP = np.eye(X.shape[0])
for i in range(X.shape[0]):
a = (X[i] > thresh)
for j in range(X.shape[0]):
b = (X[j] > thresh)
CP[i,j] = np.average(a*b)/np.average(a)
return CP
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--datapath', help='Path to data')
parser.add_argument('--savepath', help='Path to save')
parser.add_argument('--threshold', help='Expression threshold',type=float,default=10)
args,_ = parser.parse_known_args()
X = np.load(args.datapath)
cond_prob = gene_conditional_probs(X,args.threshold)
np.save('%s/conditional_probability.npy' % args.savepath,cond_prob)
|
from .server import *
# import gzip
# from locale import CODESET
# import urllib
# from datetime import datetime
# import pika, sys, os
# import nexradaws
# import pyart
# import base64
# import io
# import pytz
# from matplotlib import pyplot as plt
# from rest_framework.utils import json
# from rest_framework.views import APIView
# from rest_framework.response import Response
# # Create your views here.
# class PlotAPIView(APIView):
# def fetchData(self, files):
# templocation = 'aws_files'
# conn = nexradaws.NexradAwsInterface()
# central_timezone = pytz.timezone('US/Central')
# print(files)
# file_list = files.split("/")
# print("Files list=",file_list)
# file = urllib.request.urlretrieve('https://noaa-nexrad-level2.s3.amazonaws.com/' + files, "aws_files/"+file_list[4])
# if 'gz' in file_list[4]:
# filename = gzip.open('aws_files/' + file_list[4])
# else:
# filename = 'aws_files/' + file_list[4]
# return filename
# def createGraph(self, filename):
# filename = self.fetchData(filename)
# radar = pyart.io.read_nexrad_archive(filename)
# display = pyart.graph.RadarDisplay(radar)
# fig = plt.figure(figsize=(6, 5))
# # plot super resolution reflectivity
# ax = fig.add_subplot(111)
# display.plot('reflectivity', 0, title='NEXRAD Reflectivity', vmin=-32, vmax=64, colorbar_label='', ax=ax)
# display.plot_range_ring(radar.range['data'][-1] / 1000., ax=ax)
# display.set_limits(xlim=(-500, 500), ylim=(-500, 500), ax=ax)
# return plt
# #The input received from the MQ will be passed to this function
# def post(self, request):
# b64 = []
# json_str = json.dumps(request.data)
# json_data = json.loads(json_str)
# #uri = json_data['uri']
# for i in range(len(json_data)):
# fname = json_data[i]
# print(fname)
# plt = self.createGraph(fname)
# flike = io.BytesIO()
# plt.savefig(flike)
# b64.append(base64.b64encode(flike.getvalue()).decode())
# """resp = {
# 'id':json_data['entryId'],
# 'uri':b64
# }"""
# # Add the mq response code
# #return Response(resp)
# #Remove this line if not required
# return Response(b64,content_type='image/jpg')
# """"
# RABBITMQ CODE
# """""
# # establish connection with rabbitmq server
# connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
# channel = connection.channel()
# #create/ declare queue
# channel.queue_declare(queue='plot_queue')
# # subscribe a callback function to the queue. This callback function is called by the pika library
# # It does the work and sends the response back.
# def on_request(ch, method, props, body):
# # our message content will be in body
# n = int(body)
# print(" [.] fib(%s)" % n)
# # instead of fib here our plot graph function should be called and response should be saved here
# response = fib(n)
# ch.basic_publish(exchange='',
# routing_key=props.reply_to,
# properties=pika.BasicProperties(correlation_id = \
# props.correlation_id),
# body=str(response))
# ch.basic_ack(delivery_tag=method.delivery_tag)
# # We might want to run more than one server process.
# # In order to spread the load equally over multiple servers we need to set the prefetch_count setting.
# channel.basic_qos(prefetch_count=1)
# # We declare a callback "on_request" for basic_consume, the core of the RPC server. It's executed when the request is received.
# channel.basic_consume(queue='plot_queue', on_message_callback=on_request)
# print(" [x] Awaiting RPC requests")
# channel.start_consuming()
# print(' [*] Waiting for messages. To exit press CTRL+C')
# channel.start_consuming()
# def fib(n):
# if n == 0:
# return 0
# elif n == 1:
# return 1
# else:
# return fib(n - 1) + fib(n - 2)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from xml.etree import ElementTree
from trac.test import EnvironmentStub, MockRequest
from trac.mimeview.txtl import TextileRenderer, has_textile
from trac.web.chrome import web_context
class TextileRendererTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[TextileRenderer])
self.env.config.set('wiki', 'safe_schemes', 'https, http, data')
self.renderer = TextileRenderer(self.env)
self.req = MockRequest(self.env)
self.context = web_context(self.req)
def tearDown(self):
self.env.reset_db()
def _render(self, text):
result = self.renderer.render(self.context, 'textile', text)
self.assertEqual(unicode, type(result))
return result
def _parse_xml(self, source):
return ElementTree.fromstring(source.encode('utf-8'))
def test_image(self):
result = self._render(u"""\
!https://example.org/foo.png! uníćode
!//example.net/foo.png! uníćode
!/path/to/foo.png! uníćode
!foo.png! uníćode
!data:image/png,foo! uníćode
""")
tree = self._parse_xml(result)
elements = tree.findall('img')
self.assertEqual(elements[0].get('src'), 'https://example.org/foo.png')
self.assertEqual(elements[0].get('crossorigin'), 'anonymous')
self.assertEqual(elements[1].get('src'), '//example.net/foo.png')
self.assertEqual(elements[1].get('crossorigin'), 'anonymous')
self.assertEqual(elements[2].get('src'), '/path/to/foo.png')
self.assertEqual(elements[2].get('crossorigin'), None)
self.assertEqual(elements[3].get('src'), 'foo.png')
self.assertEqual(elements[3].get('crossorigin'), None)
self.assertIn(elements[4].get('src'), ['data:image/png,foo', '#'])
self.assertEqual(elements[4].get('crossorigin'), None)
def test_style(self):
result = self._render(u"""\
*{background:url(https://example.org/foo.png)}uníćode*
*{background:url(//example.net/foo.png) }uníćode*
*{background:url(/path/to/foo.png) }uníćode*
*{background:url(./foo.png) }uníćode*
*{background:url(foo.png) }uníćode*
*{background:url(data:image/png,foo) }uníćode*
""")
self.assertNotIn('url(https://example.org/foo.png)', result)
self.assertNotIn('url(//example.net/foo.png)', result)
self.assertIn('url(/path/to/foo.png)', result)
self.assertIn('url(./foo.png)', result)
self.assertIn('url(foo.png)', result)
self.assertIn('url(data:image/png,foo)', result)
def test_html(self):
result = self._render(u"""\
<a href="ftp://example.org/">unsafe</a>
<img src="//example.org/foo.png" />
<span style="background-image:url(http://example.org/foo.png)">unsafe</span>
""")
self.assertNotIn('href="ftp://', result)
self.assertNotIn('<img src="//example.org/foo.png" />', result)
self.assertIn('<img src="//example.org/foo.png" '
'crossorigin="anonymous" />', result)
self.assertNotIn('url(http://example.org/foo.png)', result)
self.assertIn('<span>unsafe</span>', result)
def test_suite():
suite = unittest.TestSuite()
if has_textile:
suite.addTest(unittest.makeSuite(TextileRendererTestCase))
else:
print('SKIP: mimeview/tests/txtl (no textile installed)')
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
import unittest
from unittest.mock import Mock
from kaggle_gcp import KaggleKernelCredentials, init_gcs
from test.support import EnvironmentVarGuard
from google.cloud import storage
def _make_credentials():
import google.auth.credentials
return Mock(spec=google.auth.credentials.Credentials)
class TestStorage(unittest.TestCase):
def test_version(self):
self.assertIsNotNone(storage.__version__)
def test_ctr(self):
credentials = _make_credentials()
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'GCS')
with env:
init_gcs()
client = storage.Client(project="xyz", credentials=credentials)
self.assertEqual(client.project, "xyz")
self.assertNotIsInstance(client._credentials, KaggleKernelCredentials)
self.assertIsNotNone(client._credentials)
def test_annonymous_client(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'GCS')
with env:
init_gcs()
anonymous = storage.Client.create_anonymous_client()
self.assertIsNotNone(anonymous)
def test_default_credentials_gcs_enabled(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'GCS')
with env:
init_gcs()
client = storage.Client(project="xyz")
self.assertIsInstance(client._credentials, KaggleKernelCredentials)
self.assertTrue(client._connection.user_agent.startswith("kaggle-gcp-client/1.0"))
def test_monkeypatching_idempotent(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'GCS')
with env:
client1 = storage.Client.__init__
init_gcs()
client2 = storage.Client.__init__
self.assertEqual(client1, client2)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'lundberg'
import sys
import os
import argparse
from collections import defaultdict
from apiclient import NIApiClient
USER = os.environ.get('USER', '')
APIKEY = os.environ.get('API_KEY', '')
BASE_URL = os.environ.get('BASE_URL', 'http://localhost')
VERBOSE = False
def get_host_scan(output_file):
client = NIApiClient(BASE_URL, USER, APIKEY)
for host in client.get_host_scan():
tcp_ports, udp_ports = '', ''
if host.get('tcp_ports', None):
tcp_ports = 'T:{},'.format(','.join(host['tcp_ports']))
if host.get('udp_ports', None):
udp_ports = 'U:{}'.format(','.join(host['udp_ports']))
if tcp_ports or udp_ports:
for ip_address in host.get('ip_addresses', []):
output_file.writelines('{ip} {tcp}{udp}\n'.format(ip=ip_address, tcp=tcp_ports, udp=udp_ports))
# Deprecated old version that traverses hostsm nd gets dependencies. Use get_host_scan.
def get_hosts(output_file):
client = NIApiClient(BASE_URL, USER, APIKEY)
for host in client.get_type('host', headers=client.create_headers()):
if host['node'].get('operational_state', 'Not set') != 'Decommissioned':
if VERBOSE:
print('Getting ports for %s...' % host['node_name']),
ports = defaultdict(list)
for rel in client.get_relationships(host, relationship_type='Depends_on', headers=client.create_headers()):
protocol = rel['properties'].get('protocol', None)
port = rel['properties'].get('port', None)
if protocol and port:
ports[protocol].append(port)
tcp_ports, udp_ports = '', ''
if 'tcp' in ports:
tcp_ports = 'T:%s,' % ','.join([str(i) for i in set(ports['tcp'])])
if 'udp' in ports:
udp_ports = 'U:%s' % ','.join([str(i) for i in set(ports['udp'])])
if tcp_ports or udp_ports:
for ip_address in host['node'].get('ip_addresses', []):
output_file.writelines('%s %s%s\n' % (ip_address, tcp_ports, udp_ports))
if VERBOSE:
print('done.')
def main():
# User friendly usage output
parser = argparse.ArgumentParser()
parser.add_argument('--output', '-O')
parser.add_argument('--verbose', '-V', action='store_true', default=False)
args = parser.parse_args()
if args.verbose:
global VERBOSE
VERBOSE = True
if args.output:
with open(args.output, 'w') as f:
get_host_scan(f)
else:
get_host_scan(sys.stdout)
return 0
if __name__ == '__main__':
main()
|
from hacker_news_assets.resources import RESOURCES_LOCAL, RESOURCES_PROD, RESOURCES_STAGING
from dagster import AssetGroup, schedule_from_partitions
from . import assets
core_assets_prod = AssetGroup.from_package_module(
package_module=assets, resource_defs=RESOURCES_PROD
).prefixed("core")
core_assets_staging = AssetGroup.from_package_module(
package_module=assets, resource_defs=RESOURCES_STAGING
).prefixed("core")
core_assets_local = AssetGroup.from_package_module(
package_module=assets, resource_defs=RESOURCES_LOCAL
).prefixed("core")
RUN_TAGS = {
"dagster-k8s/config": {
"container_config": {
"resources": {
"requests": {"cpu": "500m", "memory": "2Gi"},
}
},
}
}
core_assets_schedule_prod = schedule_from_partitions(
core_assets_prod.build_job(name="core_job", tags=RUN_TAGS)
)
core_assets_schedule_staging = schedule_from_partitions(
core_assets_staging.build_job(name="core_job", tags=RUN_TAGS)
)
core_assets_schedule_local = schedule_from_partitions(
core_assets_local.build_job(name="core_job", tags=RUN_TAGS)
)
core_definitions_prod = [core_assets_prod, core_assets_schedule_prod]
core_definitions_staging = [core_assets_staging, core_assets_schedule_staging]
core_definitions_local = [core_assets_local, core_assets_schedule_local]
|
# -*- coding: utf-8 -*-
from vecto.data import Dataset
from vecto.embeddings import load_from_dir
from vecto.benchmarks.analogy.io import get_pairs
from numpy import vstack
from numpy.linalg import norm
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import argparse
# import logging
# logger = logging.getLogger(__name__)
def _parse(args):
embs = load_from_dir(args.embs)
dataset = Dataset(args.data)
visulize_relations(embs, dataset)
def visulize_relations(embs, dataset):
# format pairs (wordA, [wordB])
pairs = []
for filename in dataset.file_iterator():
pairs += get_pairs(filename)
offsets = []
# all pais of related words
related_words = [(wa,wb) for (wa, wsb) in pairs for wb in wsb]
for wa,wb in related_words:
# not pair missing
if embs.vocabulary.get_id(wa) < 0: continue
if embs.vocabulary.get_id(wb) < 0: continue
# normed vectors offset
va = embs.get_vector(wa)
vb = embs.get_vector(wb)
# offsets.append(va - vb)
offsets.append(va/norm(va) - vb/norm(vb))
# prepare frame
fig, ax = plt.subplots(1,2)
fig.suptitle(dataset.path)
# add mean to end of list
offsets.append(vstack(offsets).mean(axis=0))
# with t-sne
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_result = tsne.fit_transform(offsets.copy())
ax[0].scatter(tsne_result[:,0],tsne_result[:,1])
ax[0].scatter(tsne_result[-1,0],tsne_result[-1,1])
ax[0].set_title("t-SNE Mean: {}".format(tsne_result[-1]))
# with pca
pca = PCA(n_components=2)
pca_result = pca.fit_transform(offsets.copy())
ax[1].scatter(pca_result[:,0], pca_result[:,1])
ax[1].scatter(pca_result[-1,0], pca_result[-1,1])
ax[1].set_title("PCA Mean: {}".format(pca_result[-1]))
plt.show()
# sets parser and interface function
parser = argparse.ArgumentParser()
# sets the user options
parser.add_argument("embs", help="embeddings dir path")
parser.add_argument("data", help="dataset dir path")
# parser.add_argument("data", help="dataset files", nargs="+")
# cals the parser
_parse(parser.parse_args())
|
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as tf
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True):
if isReLU:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, bias=True),
nn.LeakyReLU(0.1, inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, bias=True)
)
def upsample_factor2(inputs, target_as):
inputs = tf.interpolate(inputs, scale_factor=2, mode="nearest")
_, _, h, w = target_as.size()
if inputs.size(2) != h or inputs.size(3) != w:
return tf.interpolate(inputs, [h, w], mode="bilinear", align_corners=False)
else:
return inputs
class OccUpsampleNetwork(nn.Module):
def __init__(self, ch_in, ch_out):
super(OccUpsampleNetwork, self).__init__()
self.feat_dim = 32
self.init_conv = conv(ch_in, self.feat_dim)
self.res_convs = nn.Sequential(
conv(self.feat_dim, self.feat_dim),
conv(self.feat_dim, self.feat_dim, isReLU=False)
)
self.res_end_conv = conv(self.feat_dim, self.feat_dim)
self.mul_const = 0.1
self.out_convs = conv(self.feat_dim, ch_out)
def forward(self, occ, x):
occ = upsample_factor2(occ, x)
x_in = torch.cat([occ, x], dim=1)
x_init = self.init_conv(x_in)
x_res = x_init
x_res = x_res + self.res_convs(x_res) * self.mul_const
x_res = x_res + self.res_convs(x_res) * self.mul_const
x_res = x_res + self.res_convs(x_res) * self.mul_const
x_init = x_init + self.res_end_conv(x_res)
return self.out_convs(x_init) + occ
def subtract_mean(input):
return input - input.mean(2).mean(2).unsqueeze(2).unsqueeze(2).expand_as(input)
class RefineFlow(nn.Module):
def __init__(self, ch_in):
super(RefineFlow, self).__init__()
self.kernel_size = 3
self.pad_size = 1
self.pad_ftn = nn.ReplicationPad2d(self.pad_size)
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 1),
conv(128, 64, 3, 1, 1),
conv(64, 64, 3, 1, 1),
conv(64, 32, 3, 1, 1),
conv(32, 32, 3, 1, 1),
conv(32, self.kernel_size * self.kernel_size, 3, 1, 1)
)
self.softmax_feat = nn.Softmax(dim=1)
self.unfold_flow = nn.Unfold(kernel_size=(self.kernel_size, self.kernel_size))
self.unfold_kernel = nn.Unfold(kernel_size=(1, 1))
def forward(self, flow, diff_img, feature):
b, _, h, w = flow.size()
flow_m = subtract_mean(flow)
norm2_img = torch.norm(diff_img, p=2, dim=1, keepdim=True)
feat = self.convs(torch.cat([flow_m, norm2_img, feature], dim=1))
feat_kernel = self.softmax_feat(-feat ** 2)
flow_x = flow[:, 0].unsqueeze(1)
flow_y = flow[:, 1].unsqueeze(1)
flow_x_unfold = self.unfold_flow(self.pad_ftn(flow_x))
flow_y_unfold = self.unfold_flow(self.pad_ftn(flow_y))
feat_kernel_unfold = self.unfold_kernel(feat_kernel)
flow_out_x = torch.sum(flow_x_unfold * feat_kernel_unfold, dim=1).unsqueeze(1).view(b, 1, h, w)
flow_out_y = torch.sum(flow_y_unfold * feat_kernel_unfold, dim=1).unsqueeze(1).view(b, 1, h, w)
return torch.cat([flow_out_x, flow_out_y], dim=1)
class RefineOcc(nn.Module):
def __init__(self, ch_in):
super(RefineOcc, self).__init__()
self.kernel_size = 3
self.pad_size = 1
self.pad_ftn = nn.ReplicationPad2d(self.pad_size)
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 1),
conv(128, 64, 3, 1, 1),
conv(64, 64, 3, 1, 1),
conv(64, 32, 3, 1, 1),
conv(32, 32, 3, 1, 1),
conv(32, self.kernel_size * self.kernel_size, 3, 1, 1)
)
self.softmax_feat = nn.Softmax(dim=1)
self.unfold_occ = nn.Unfold(kernel_size=(self.kernel_size, self.kernel_size))
self.unfold_kernel = nn.Unfold(kernel_size=(1, 1))
def forward(self, occ, feat1, feat2):
b, _, h, w = occ.size()
feat = self.convs(torch.cat([occ, feat1, feat2], dim=1))
feat_kernel = self.softmax_feat(-feat ** 2)
occ_unfold = self.unfold_occ(self.pad_ftn(occ))
feat_kernel_unfold = self.unfold_kernel(feat_kernel)
occ_out = torch.sum(occ_unfold * feat_kernel_unfold, dim=1).unsqueeze(1).view(b, 1, h, w)
return occ_out
|
#!/usr/bin/env python3
import json
import os
import sys
from collections import defaultdict
from argparse import ArgumentParser
class cached_mutable_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, fget, fset=None, name=None):
self.fget = fget
self.fset = fset
self.__doc__ = getattr(fget, '__doc__')
self.name = name or fget.__name__
def __get__(self, instance, cls=None):
if instance is None:
return self
if self.name in instance.__dict__:
return instance.__dict__[self.name]
else:
res = instance.__dict__[self.name] = self.fget(instance)
return res
def __set__(self, instance, value):
if self.fset:
self.fset(instance, value)
instance.__dict__[self.name] = value
else:
raise AttributeError("read-only attribute")
def __delete__(self, instance):
if self.fdel:
self.fdel(instance)
del instance.__dict__[self.name]
else:
raise AttributeError("can not be deleted")
def setter(self, fset):
self.fset = fset
return self
def deleter(self, fdel):
self.fdel = fdel
return self
def parse_args():
parser = ArgumentParser()
parser.add_argument("INFILE")
parser.add_argument("OUTDIR")
return parser.parse_args()
class EventCache:
BATCH_SIZE = 5000
def __init__(self, file):
self.file = file
@cached_mutable_property
def _cache(self):
return defaultdict(lambda: [])
@_cache.deleter
def _cache(self):
pass
def aggregate_events(self, to):
self._ensure_dir(to)
with open(self.file, "r") as f:
i = 0
for line in f:
if i >= self.BATCH_SIZE:
self._flush(to)
try:
event_name, source_id, target_id = line.strip().split()
except ValueError:
continue
else:
self._cache[event_name].append(
(event_name, source_id, target_id)
)
i += 1
self._flush(to)
def _flush(self, to):
for name, events in self._cache.items():
with open(os.path.join(to, name), "a") as f:
for e in events:
f.write(" ".join(e))
f.write("\n")
del self._cache
def _ensure_dir(self, dir_):
os.makedirs(dir_, exist_ok=True)
def main():
args = parse_args()
cache = EventCache(file=args.INFILE)
cache.aggregate_events(to=args.OUTDIR)
return 0
if __name__ == "__main__":
sys.exit(main())
|
items_dict={'1':'Apple', '2':'Orange','3':'Banana', '4':'Guava','5':'Mango',
'6' : 'Peach','7':'Ladies Finger','8':'Peas','9':'Carrot','10':'Radish',
'11':'Broccoli','12':'Mushroom','13':'Maggie','14':'Lays', '15':'Doritos',
'16':'Pringles','17':'Oreos','18':'Bourbon','19':'Coke','20':'Pepsi','21':'Frooti',
'22':'Maaza','23':'Mountain Dew','24':'Milk','25':'Cheese','26':'Curd','27':'Dairy Milk',
'28':'Baguette','29':'Croissant','30':'Hard Bread','31':'Sour Dough Bread','32':'Multi Grain Bread','33':'Soulfull',
'34':'Corn Flakes','35':'Chocos'}
bought_together_sets= [{'1','2','3'},
{'4','5','6'},
{'7','8','9'},
{'10','11','12'},
{'13','14','15'},
{'16','17','18'},
{'20','21','22'},
{'24','25','26'},
{'27','28','29'},
{'30','31','32'},
{'33','34','35'}]
keep_running_loop="y"
user_choice=1
price_dict={"1":5,"2": 10,"3":20,"4":20, "5": 10, "6":10, "7": 20, "8":20, "9": 30, "10": 35, "11": 40, "12": 20, "13": 10, "14": 10, "15": 20,
"16": 10, "17": 10, "18": 15, "19": 15, "20":35, "21":10, "22":10, "23":15, "24":25, "25": 15, "26":25, "27": 5, "28": 55, "29":50, "30": 45,
"31": 35, "32": 25, "33": 20, "34": 20, "35": 20,}
total_price = 0
cart = []
print("welcome to Fast Mart!")
while(keep_running_loop=="y"):
for item in items_dict.keys():
print(item,": ",items_dict[item])
user_choice = input("Please pick an item! \n")
if(user_choice not in items_dict.keys()):
print("Hmm, this seems to be an invalid choice!")
continue
cart.append(items_dict[user_choice])
print("Items currently in your cart are: ", cart)
total_price = total_price + price_dict[user_choice]
for sets in bought_together_sets:
if(user_choice in sets):
print("Users who bought "+items_dict[user_choice]+" also bought:")
for item in sets:
if(item!=user_choice):
print(items_dict[item]," Product Code: ",item)
keep_running_loop=input("Add more items? y for yes, n for no.")
print("Thanks for shopping with Fast Mart! Your total is ",total_price, " rupees.")
|
# CodingGears.io
# shutil - High-level file operations
import shutil
# TODO: Copy file shutil.copyfile (destination path must be file)
# shutil.copyfile("/home/training/sample/cities.txt", "/home/training/cities.txt.bk")
# TODO: Copy file shutil.copy (destination path can be file or directory)
# shutil.copy("/home/training/sample/cities.txt", "/home/training")
# TODO: Move file shutil.move
shutil.move("/home/training/sample/animals.txt", "/home/training")
shutil.move("/home/training/sample/numbers.txt", "/home/training/numbers123.txt")
|
from django.shortcuts import render
# Create your views here.
from .forms import BuscarViajeForm
from django.db.models import Q,F
from django.db.models.expressions import RawSQL
from Nucleo.models import Viaje
from django.http import HttpResponseRedirect
from django.views.generic import ListView
class BuscarList(ListView):
model = Viaje
template_name = "classroom/buscar/listarbusqueda.html"
def get_queryset(self):
origen =self.request.GET.get('origen')
destino = self.request.GET.get('destino')
fecha = self.request.GET.get('fecha')
print(fecha)
viajes = Viaje.objects.all().filter(tiempo_inicio__date=fecha, realizado=False)
borrar = []
for v in viajes:
if not v.tramos.filter(ciudad_origen=origen).exists() and not v.tramos.filter(ciudad_destino=destino).exists():
borrar.append(v.pk)
for pk in borrar:
viajes=viajes.exclude(pk=pk)
return viajes.filter(tramos__ciudad_origen=origen).annotate(lahora = F('tramos__hora_inicio')).annotate(
mi_origen=RawSQL("SELECT '%s'"%(origen,), ())).annotate(mi_destino=RawSQL("SELECT '%s'"%(destino,), ()))
def Buscar(request):
form = BuscarViajeForm(request.POST)
if request.method == "POST" and form.is_valid():
origen = form.cleaned_data['ciudad_origen']
destino = form.cleaned_data['ciudad_destino']
fecha = form.cleaned_data['fecha']
viajes = Viaje.objects.all()
existio = False
for v in viajes:
if v.tramos.filter(Q(ciudad_origen=origen)|Q(ciudad_destino=destino)).count()==2:
existio=True
break
return HttpResponseRedirect('/buscar/listar?origen=%s&destino=%s&fecha=%s'%(origen,destino,fecha))
else:
form = BuscarViajeForm()
return render(request,'classroom/buscar/buscar.html',{'form':form})
|
import logging
import os
import subprocess
import sys
import tempfile
def save(doc, outpath, outformat=None):
"""
Saves document `doc` to a file at `outpath`. By default, this file
will be in SVG format; if it ends with .pdf or .png, or if outformat
is specified, the document will be converted to PDF or PNG if possible.
Conversion to PDF and PNG require rsvg-convert (provided by librsvg),
inkscape or webkitToPDF (PDF conversion only).
Attributes:
doc: the :py:class:`genomeview.Document` to be saved
outpath: a string specifying the file to save to; file extensions of
.pdf or .png will change the default output format
outformat: override the file format; must be one of "pdf", "png", or
(the default) "svg"
"""
if isinstance(outpath, bytes):
outpath = outpath.decode()
if outformat is None:
if outpath.lower().endswith(".pdf"):
outformat = "pdf"
elif outpath.lower().endswith(".png"):
outformat = "png"
else:
outformat = "svg"
if outformat == "svg":
with open(outpath, "w") as outf:
render_to_file(doc, outf)
else:
# render to a temporary file then convert to PDF or PNG
with tempfile.TemporaryDirectory() as outdir:
temp_svg_path = os.path.join(outdir, "temp.svg")
with open(temp_svg_path, "w") as outf:
render_to_file(doc, outf)
convert_svg(temp_svg_path, outpath, outformat)
def render_to_file(doc, outf):
"""
Renders the document as an svg to a file-like object.
"""
for l in doc.render():
outf.write(l + "\n")
#############################################################################
######################### low-level functionality ###########################
#############################################################################
def convert_svg(inpath, outpath, outformat):
converter = _getExportConverter(outformat)
if converter == "webkittopdf":
exportData = _convertSVG_webkitToPDF(inpath, outpath, outformat)
elif converter == "librsvg":
exportData = _convertSVG_rsvg_convert(inpath, outpath, outformat)
elif converter == "inkscape":
exportData = _convertSVG_inkscape(inpath, outpath, outformat)
return exportData
def _getExportConverter(exportFormat, requested_converter=None):
if requested_converter == "webkittopdf" and exportFormat=="png":
logging.error("webkitToPDF does not support export to PNG; use librsvg or inkscape instead, or "
"export to PDF")
sys.exit(1)
if exportFormat == "png" and requested_converter is None:
return "librsvg"
if requested_converter == "rsvg-convert":
return "librsvg"
if requested_converter in [None, "webkittopdf"]:
if _checkWebkitToPDF():
return "webkittopdf"
if requested_converter in [None, "librsvg"]:
if _checkRSVGConvert():
return "librsvg"
if requested_converter in [None, "inkscape"]:
if _checkInkscape():
return "inkscape"
raise Exception("No converter found for conversion to {}".format(exportFormat))
return None
def _checkWebkitToPDF():
try:
subprocess.check_call("webkitToPDF", stderr=subprocess.PIPE, shell=True)
return True
except subprocess.CalledProcessError:
return False
def _checkRSVGConvert():
try:
subprocess.check_call("rsvg-convert -v", stdout=subprocess.PIPE, shell=True)
return True
except subprocess.CalledProcessError:
return False
def _checkInkscape():
try:
subprocess.check_call("inkscape --version", stdout=subprocess.PIPE, shell=True)
return True
except subprocess.CalledProcessError:
return False
def _convertSVG_webkitToPDF(inpath, outpath, outformat):
if outformat.lower() != "pdf":
return None
try:
cmd = "webkitToPDF {} {}".format(inpath, outpath)
subprocess.check_call(cmd, shell=True)#, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return None
return open(outpath, "rb").read()
def _convertSVG_inkscape(inpath, outpath, outformat):
options = ""
outformat = outformat.lower()
if outformat == "png":
options = "--export-dpi 150 --export-background white"
try:
subprocess.check_call("inkscape {} {} --export-{}={}".format(options, inpath, outformat, outpath),
shell=True)
except subprocess.CalledProcessError as e:
print("EXPORT ERROR:", str(e))
return open(outpath, "rb").read()
def _convertSVG_rsvg_convert(inpath, outpath, outformat):
options = ""
outformat = outformat.lower()
if outformat == "png":
options = "-a --background-color white"
try:
subprocess.check_call("rsvg-convert -f {} {} -o {} {}".format(outformat, options, outpath, inpath), shell=True)
except subprocess.CalledProcessError as e:
print("EXPORT ERROR:", str(e))
return open(outpath, "rb").read()
|
#!/usr/bin/env python3
# Turns on pdb for opcode replacement
# Note: this only works properly on Unix systems.
debug_on = False
# Imports
import sys
import json
def error(message):
"""Function called when there's an error."""
#raise SyntaxError(message)
print("jlm2asq.py:", message, file=sys.stderr)
exit()
#Start
t = sys.stdin.read()
t = t.split("\nHLT\n")
if len(t) != 2:
error("Not a valid JLM program. Check for HLT.")
try:
stuff = json.loads(t[1])
except:
error("variables initialized incorrectly.")
program = t[0].split("\n")
program.append("HLT")
#Changing stuff
stuff["Z"] = 0
stuff["NEGONE"] = -1
stuff["ASCIIZ"] = 48
#Getting line #s of labels
op_len = {
"SUBLEQ":1,
"ADD":3,
"SUB":1,
"MOV":4,
"OUT":4,
"INP":5,
"#":0,
"GOTO":1,
"BRP":2,
"BRZ":7,
"NOP":1,
"HLT":1
}
lbl_locs = {}
cur_line = 0
for i in range(len(program)):
line = program[i]
line = line.split()
opcode = line[0]
if opcode == "LBL":
lbl_locs[line[1]] = str(cur_line)
program[i] = "# LBL "+line[1]
elif opcode in op_len.keys():
cur_line += op_len[opcode]
else:
error("Line "+str(i)+": "+opcode+" is not a valid opcode.")
#Defining Non-Branching Functions
def subleq(a, b, lbl):
"""Fundamental Operation!
Subtracts a from b and goes to c if results is less than or equal to zero;
otherwise it goes to the next instruction."""
global lbl_locs
try:
c = lbl_locs[lbl]
except KeyError:
error("%s is not a valid label." % lbl)
return "{a} {b} {c}\n".format(a=a, b=b, c=c)
def add(a, b):
"""Adds a and b and stores the result in a."""
return "{b} Z ?+1\nZ {a} ?+1\nZ Z ?+1\n".format(a=a, b=b)
def sub(a, b):
"""Subtracts b from a and stores the result in a."""
return "{b} {a} ?+1\n".format(a=a, b=b)
def mov(a, b):
"""Copies b to a."""
return sub(a, a) + add(a, b)
def out(a):
"""Copies the value in a to the "output"(address 0xff)."""
return mov("255", a)
def inp(a):
"""Copies the value in the "input" (address 0xfe) to a once user enters something on keyboard."""
return mov(a,"254")+"Z "+a+" ?-4\n"
def hlt():
"""Halts the program for coffee break. ALWAYS and only at the very end of the program!"""
return "\n0\n"
def cmt():
"""For comments."""
return ""
#Branch Functions
def goto(lbl):
"""Unconditionally goes to label lbl."""
global lbl_locs
try:
q = lbl_locs[lbl]
except KeyError:
raise Exception("%s is not a valid label." % q)
t = "Z Z {q}\n".format(q=q)
return t
def brp(a, lbl):
"""Goes to label lbl if a>0. Otherwise, continues as normal."""
global lbl_locs
try:
lbl = lbl_locs[lbl]
except KeyError:
error("%s is not a valid label." % lbl)
return "Z {a} ?+2\nZ Z {lbl}\n".format(a=a, lbl=lbl)
def brz(a, lbl):
"""Goes to label lbl if a=0. Otherwise, continues as normal."""
global lbl_locs
try:
lbl = lbl_locs[lbl]
except KeyError:
error("%s is not a valid label." % lbl)
return ("Z {a} ?+2\nZ Z ?+6\n{a} Z ?+2\nZ Z ?+4\nNEGONE Z ?+2\nZ Z {lbl}\n".format(a=a, lbl=lbl))+sub("Z","Z")
#Replacing opcodes
if debug_on:
sys.stdin = open("/dev/tty")
import pdb
pdb.set_trace()
output = ""
for line in program:
lip = line.split()
opcode = lip[0]
try:
if opcode == "SUBLEQ":
output += subleq(lip[1], lip[2], lip[3])
elif opcode == "ADD":
output += add(lip[1], lip[2])
elif opcode == "SUB":
output += sub(lip[1], lip[2])
elif opcode == "MOV":
output += mov(lip[1], lip[2])
elif opcode=="GOTO":
output += goto(lip[1])
elif opcode == "OUT":
output += out(lip[1])
elif opcode == "INP":
output += inp(lip[1])
elif opcode=="BRP":
output += brp(lip[1], lip[2])
elif opcode == "BRZ":
output += brz(lip[1], lip[2])
elif opcode == "NOP":
output += "Z Z ?+1\n"
elif opcode == "#":
output += cmt()
elif opcode == "HLT":
output += "0\n"
else:
error("Invalid opcode: %s" % lip[0])
except IndexError:
error("Wrong number of operands: %s" % line)
output += json.dumps(stuff)
print(output)
|
"""Tests for the module :mod:`esmvaltool.diag_scripts.shared.io`."""
import os
from collections import OrderedDict
from copy import deepcopy
import iris
import mock
import numpy as np
import pytest
import yaml
from esmvaltool.diag_scripts.shared import io
with open(os.path.join(os.path.dirname(__file__), 'configs',
'test_io.yml')) as file_:
CONFIG = yaml.safe_load(file_)
@pytest.mark.parametrize('data', CONFIG['_has_necessary_attributes'])
@mock.patch.object(io, 'logger', autospec=True)
def test_has_necessary_attributes(mock_logger, data):
"""Test attribute checks."""
for log_level in ('debug', 'info', 'warning', 'error', 'exception'):
metadata = data['input']
kwargs = data.get('kwargs', {})
has_atts = io._has_necessary_attributes(metadata,
log_level=log_level,
**kwargs)
assert has_atts == data['output']
logger_func = getattr(mock_logger, log_level)
if has_atts:
logger_func.assert_not_called()
else:
logger_func.assert_called()
mock_logger.reset_mock()
CFG = {
'input_files': [
'metadata.yml',
'test_metadata.yml',
'valid/dir/1',
'valid/dir/2',
],
'other_attr':
'I am not used!',
}
ROOT_DIR = '/root/to/something'
PATTERNS_FOR_ALL_ANCESTORS = [
(None, [
os.path.join(ROOT_DIR, 'test.nc'),
os.path.join(ROOT_DIR, 'egg.yml'),
os.path.join(ROOT_DIR, 'root2', 'x.nc'),
os.path.join(ROOT_DIR, 'root2', 'y.png'),
os.path.join(ROOT_DIR, 'root3', 'egg.nc'),
os.path.join(ROOT_DIR, 'test_1.nc'),
os.path.join(ROOT_DIR, 'test_2.yml'),
os.path.join(ROOT_DIR, 'root4', 'egg.nc'),
]),
('*', [
os.path.join(ROOT_DIR, 'test.nc'),
os.path.join(ROOT_DIR, 'egg.yml'),
os.path.join(ROOT_DIR, 'root2', 'x.nc'),
os.path.join(ROOT_DIR, 'root2', 'y.png'),
os.path.join(ROOT_DIR, 'root3', 'egg.nc'),
os.path.join(ROOT_DIR, 'test_1.nc'),
os.path.join(ROOT_DIR, 'test_2.yml'),
os.path.join(ROOT_DIR, 'root4', 'egg.nc'),
]),
('*.nc', [
os.path.join(ROOT_DIR, 'test.nc'),
os.path.join(ROOT_DIR, 'root2', 'x.nc'),
os.path.join(ROOT_DIR, 'root3', 'egg.nc'),
os.path.join(ROOT_DIR, 'test_1.nc'),
os.path.join(ROOT_DIR, 'root4', 'egg.nc'),
]),
('test*', [
os.path.join(ROOT_DIR, 'test.nc'),
os.path.join(ROOT_DIR, 'test_1.nc'),
os.path.join(ROOT_DIR, 'test_2.yml'),
]),
('*.yml', [
os.path.join(ROOT_DIR, 'egg.yml'),
os.path.join(ROOT_DIR, 'test_2.yml'),
]),
('egg.nc*', [
os.path.join(ROOT_DIR, 'root3', 'egg.nc'),
os.path.join(ROOT_DIR, 'root4', 'egg.nc'),
]),
]
@pytest.mark.parametrize('pattern,output', PATTERNS_FOR_ALL_ANCESTORS)
@mock.patch('esmvaltool.diag_scripts.shared.io.os.walk', autospec=True)
def test_get_all_ancestor_files(mock_walk, pattern, output):
"""Test retrieving of ancestor files."""
input_dirs = [
[
(ROOT_DIR, ['dir', '__pycache__'], ['test.nc', 'egg.yml']),
(os.path.join(ROOT_DIR, 'root2'), ['d'], ['x.nc', 'y.png']),
(os.path.join(ROOT_DIR, 'root3'), [], ['egg.nc']),
],
[
(ROOT_DIR, ['dir', '__pycache__'], ['test_1.nc', 'test_2.yml']),
(os.path.join(ROOT_DIR, 'root4'), ['d2'], ['egg.nc']),
],
]
mock_walk.side_effect = input_dirs
files = io.get_all_ancestor_files(CFG, pattern=pattern)
assert files == output
PATTERNS_FOR_SINGLE_ANCESTOR = [
([], None, True),
(['I/am/a/cool/file.nc'], 'I/am/a/cool/file.nc', False),
(['I/am/a/cool/file.nc', 'oh/no/file_2.nc'], 'I/am/a/cool/file.nc', True),
]
@pytest.mark.parametrize('files,output,logger', PATTERNS_FOR_SINGLE_ANCESTOR)
@mock.patch.object(io, 'get_all_ancestor_files', autospec=True)
@mock.patch.object(io, 'logger', autospec=True)
def test_get_ancestor_file(mock_logger, mock_get_all_ancestors, files, output,
logger):
"""Test retrieving of single ancestor file."""
mock_get_all_ancestors.return_value = files
returned_file = io.get_ancestor_file(CFG, pattern='*')
assert returned_file == output
if logger:
mock_logger.warning.assert_called()
else:
mock_logger.warning.assert_not_called()
LONG_NAME = 'Loooong name'
SHORT_NAME = 'var'
STANDARD_NAME = 'air_temperature'
UNITS = 'K'
@pytest.mark.parametrize('root', [None, '*'])
@mock.patch.object(io, 'get_all_ancestor_files', autospec=True)
@mock.patch.object(io, 'logger', autospec=True)
@mock.patch('esmvaltool.diag_scripts.shared.io.iris.load_cube', autospec=True)
@mock.patch('esmvaltool.diag_scripts.shared.io.os.walk', autospec=True)
def test_netcdf_to_metadata(mock_walk, mock_load_cube, mock_logger,
mock_get_all_ancestors, root):
"""Test cube to metadata."""
attrs = [
{
'dataset': 'model',
'filename': 'path/to/model1.nc',
'project': 'CMIP42',
},
{
'dataset': 'model',
'filename': 'path/to/model1.yml',
'project': 'CMIP42',
},
{
'dataset': 'model',
'filename': 'path/to/model2.nc',
},
{
'dataset': 'model',
'filename': 'path/to/model3.nc',
'project': 'CMIP42',
},
{
'dataset': 'model',
'filename': 'path/to/model4.nc',
'project': 'CMIP42',
},
]
var_attrs = [
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'units': UNITS,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'units': UNITS,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'standard_name': STANDARD_NAME,
'units': UNITS,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'standard_name': None,
'units': UNITS,
},
]
cubes = [
iris.cube.Cube(0, attributes=attrs[0], **var_attrs[0]),
iris.cube.Cube(0, attributes=attrs[2], **var_attrs[2]),
iris.cube.Cube(0, attributes=attrs[3], **var_attrs[3]),
iris.cube.Cube(0, attributes=attrs[4], **var_attrs[4]),
]
walk_output = [
('path/to', [], ['model1.nc', 'model1.yml']),
('path/to', ['d'], ['model2.nc', 'model3.nc', 'model4.nc']),
]
output = deepcopy([{**attrs[i], **var_attrs[i]} for i in (0, 3, 4)])
for out in output:
out['short_name'] = out.pop('var_name')
out.setdefault('standard_name', None)
mock_get_all_ancestors.return_value = [a['filename'] for a in attrs]
mock_walk.return_value = walk_output
mock_load_cube.side_effect = cubes
metadata = io.netcdf_to_metadata({}, pattern=root, root=root)
assert metadata == output
mock_logger.warning.assert_called()
ATTRS_IN = [
{
'dataset': 'a',
'filename': 'path/to/model1.nc',
'project': 'CMIP42',
'bool': True,
},
{
'dataset': 'b',
'filename': 'path/to/model2.nc',
'project': 'CMIP42',
},
{
'dataset': 'c',
'filename': 'path/to/model3.nc',
},
{
'dataset': 'd',
'filename': 'path/to/model4.nc',
'project': 'CMIP42',
},
]
ATTRS_OUT = [
{
'dataset': 'a',
'filename': 'path/to/model1.nc',
'project': 'CMIP42',
'bool': 'True',
},
{
'dataset': 'b',
'filename': 'path/to/model2.nc',
'project': 'CMIP42',
},
{},
{
'dataset': 'd',
'filename': 'path/to/model4.nc',
'project': 'CMIP42',
},
]
VAR_ATTRS_IN = [
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'units': UNITS,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'units': UNITS,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'standard_name': STANDARD_NAME,
'units': UNITS,
},
]
VAR_ATTRS_OUT = [
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'standard_name': None,
'units': UNITS,
},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'standard_name': None,
'units': UNITS,
},
{},
{
'long_name': LONG_NAME,
'var_name': SHORT_NAME,
'standard_name': STANDARD_NAME,
'units': UNITS,
},
]
ADD_ATTRS = {'project': 'PROJECT', 'attr': 'test'}
ADD_VAR_ATTRS = {'standard_name': STANDARD_NAME, 'var_name': 'test'}
CUBES_IN = [
iris.cube.Cube(0, attributes=ADD_ATTRS, **ADD_VAR_ATTRS) for _ in range(4)
]
OUTPUT = [
iris.cube.Cube(0,
attributes={
**ADD_ATTRS,
**ATTRS_OUT[idx]
},
**{
**ADD_VAR_ATTRS,
**VAR_ATTRS_OUT[idx]
}) for idx in range(4)
]
OUTPUT[2] = None
METADATA_TO_NETDCF = zip(ATTRS_IN, VAR_ATTRS_IN, CUBES_IN, OUTPUT)
@pytest.mark.parametrize('attrs,var_attrs,cube,output', METADATA_TO_NETDCF)
@mock.patch.object(io, 'iris_save', autospec=True)
@mock.patch.object(io, 'logger', autospec=True)
def test_metadata_to_netcdf(mock_logger, mock_save, attrs, var_attrs, cube,
output):
"""Test metadata to cube."""
wrong_name = 'I_am_an_invalid_standard_name'
metadata = deepcopy({**attrs, **var_attrs})
metadata['short_name'] = metadata.pop('var_name')
if metadata['dataset'] == 'a':
metadata['standard_name'] = wrong_name
io.metadata_to_netcdf(cube, metadata)
if metadata.get('standard_name') == wrong_name:
mock_logger.debug.assert_called()
else:
mock_logger.debug.assert_not_called()
if output is None:
mock_logger.warning.assert_called()
assert not mock_save.called
else:
mock_logger.warning.assert_not_called()
save_args = (output, attrs['filename'])
assert mock_save.call_args_list == [mock.call(*save_args)]
PATH = 'path/to/super/cube'
VAR_ATTRS_NEW = [
{
'long_name': 'I do not have units :(',
'short_name': 'sad',
},
{
'long_name': 'Long name',
'short_name': 'var',
'units': '1',
},
{
'short_name': SHORT_NAME,
'long_name': LONG_NAME,
'standard_name': STANDARD_NAME,
'units': UNITS,
},
]
ATTRS_NEW = [
{},
{},
{
'test': '123',
'answer': 42,
},
]
ATTRIBUTES_FOR_1D_CUBE = zip(VAR_ATTRS_NEW, ATTRS_NEW)
@pytest.mark.parametrize('var_attrs,attrs', ATTRIBUTES_FOR_1D_CUBE)
@mock.patch.object(io, 'iris_save', autospec=True)
@mock.patch.object(io, 'logger', autospec=True)
def test_save_1d_data(mock_logger, mock_save, var_attrs, attrs):
"""Test saving of 1 dimensional data."""
coord_name = 'inclination'
data = [
np.ma.masked_invalid([1.0, np.nan, -1.0]),
np.arange(2.0) + 100.0,
np.ma.masked_invalid([33.0, 22.0, np.nan, np.nan, -77.0]),
]
coords = [
iris.coords.DimCoord(np.arange(3.0) - 3.0, long_name=coord_name),
iris.coords.DimCoord(np.arange(2.0) + 2.0, long_name=coord_name),
iris.coords.DimCoord(np.array([-7.0, -3.0, -2.71, 3.0, 314.15]),
long_name=coord_name),
]
cubes = OrderedDict([
('model1',
iris.cube.Cube(data[0],
var_name='xy',
units='kg',
attributes={'hi': '!'},
dim_coords_and_dims=[(coords[0], 0)])),
('model2',
iris.cube.Cube(data[1],
var_name='zr',
units='1',
attributes={},
dim_coords_and_dims=[(coords[1], 0)])),
('model3',
iris.cube.Cube(data[2],
var_name='wa',
units='unknown',
attributes={'very': 'long cube'},
dim_coords_and_dims=[(coords[2], 0)])),
])
dataset_dim = iris.coords.AuxCoord(list(cubes.keys()), long_name='dataset')
dim_1 = coords[0].copy([-7.0, -3.0, -2.71, -2.0, -1.0, 2.0, 3.0, 314.15])
output_data = np.ma.masked_invalid(
[[np.nan, 1.0, np.nan, np.nan, -1.0, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, 100.0, 101.0, np.nan],
[33.0, 22.0, np.nan, np.nan, np.nan, np.nan, np.nan, -77.0]])
output_dims = [(dataset_dim, 0), (dim_1, 1)]
# Without cubes
io.save_1d_data({}, PATH, coord_name, var_attrs, attrs)
mock_logger.warning.assert_called()
assert not mock_save.called
mock_logger.reset_mock()
mock_save.reset_mock()
# With cubes
io.save_1d_data(cubes, PATH, coord_name, var_attrs, attrs)
iris_var_attrs = deepcopy(var_attrs)
iris_var_attrs['var_name'] = iris_var_attrs.pop('short_name')
new_cube = iris.cube.Cube(output_data,
aux_coords_and_dims=output_dims,
attributes=attrs,
**iris_var_attrs)
if 'units' not in var_attrs:
mock_logger.warning.assert_called()
assert not mock_save.called
else:
mock_logger.warning.assert_not_called()
assert mock_save.call_args_list == [mock.call(new_cube, PATH)]
CUBELIST = [
iris.cube.Cube(1),
iris.cube.Cube(2, attributes={
'filename': 'a',
'x': 'y',
}),
]
CUBELIST_OUT = [
iris.cube.Cube(1, attributes={'filename': PATH}),
iris.cube.Cube(2, attributes={
'filename': PATH,
'x': 'y',
}),
]
CUBES_TO_SAVE = [
(iris.cube.Cube(0), iris.cube.Cube(0, attributes={'filename': PATH})),
(CUBELIST, CUBELIST_OUT),
(iris.cube.CubeList(CUBELIST), iris.cube.CubeList(CUBELIST_OUT)),
]
@pytest.mark.parametrize('source,output', CUBES_TO_SAVE)
@mock.patch('esmvaltool.diag_scripts.shared.io.iris.save', autospec=True)
@mock.patch.object(io, 'logger', autospec=True)
def test_iris_save(mock_logger, mock_save, source, output):
"""Test iris save function."""
io.iris_save(source, PATH)
assert mock_save.call_args_list == [mock.call(output, PATH)]
mock_logger.info.assert_called_once()
AUX_COORDS = [
None,
None,
iris.coords.AuxCoord([2, 3, 5], long_name='Primes!'),
]
ATTRIBUTES_FOR_SCALAR_CUBE = zip(VAR_ATTRS_NEW, ATTRS_NEW, AUX_COORDS)
@pytest.mark.parametrize('var_attrs,attrs,aux_coord',
ATTRIBUTES_FOR_SCALAR_CUBE)
@mock.patch.object(io, 'iris_save', autospec=True)
@mock.patch.object(io, 'logger', autospec=True)
def test_save_scalar_data(mock_logger, mock_save, var_attrs, attrs, aux_coord):
"""Test saving of scalar data."""
data = OrderedDict([
('model1', np.nan),
('model2', 1.0),
('model3', 3.14),
])
dataset_dim = iris.coords.AuxCoord(list(data.keys()), long_name='dataset')
output_data = np.ma.masked_invalid([np.nan, 1.0, 3.14])
# Without data
io.save_scalar_data({}, PATH, var_attrs)
mock_logger.warning.assert_called()
assert not mock_save.called
mock_logger.reset_mock()
mock_save.reset_mock()
# With data
io.save_scalar_data(data, PATH, var_attrs, aux_coord, attrs)
iris_var_attrs = deepcopy(var_attrs)
iris_var_attrs['var_name'] = iris_var_attrs.pop('short_name')
new_cube = iris.cube.Cube(output_data,
aux_coords_and_dims=[(dataset_dim, 0)],
attributes=attrs,
**iris_var_attrs)
if aux_coord is not None:
new_cube.add_aux_coord(aux_coord, 0)
if 'units' not in var_attrs:
mock_logger.warning.assert_called()
assert not mock_save.called
else:
mock_logger.warning.assert_not_called()
assert mock_save.call_args_list == [mock.call(new_cube, PATH)]
|
import numpy as np
import math
arr = np.random.normal(3, 1, 10)
res = []
for value in arr:
res.append(math.floor(max(1, min(value, 5))))
print(res)
|
import grpc
from koapy.grpc import KiwoomOpenApiService_pb2_grpc
from koapy.grpc.KiwoomOpenApiServiceClientStubWrapper import KiwoomOpenApiServiceClientStubWrapper
from koapy.config import config
class KiwoomOpenApiServiceClient:
def __init__(self, host=None, port=None):
self._host = host or config.get_string('koapy.grpc.host', 'localhost')
self._port = port or config.get('koapy.grpc.port')
if self._port is None:
raise ValueError('Port is None')
self._target = self._host + ':' + str(self._port)
self._channel = grpc.insecure_channel(self._target)
self._stub = KiwoomOpenApiService_pb2_grpc.KiwoomOpenApiServiceStub(self._channel)
self._stub_wrapped = KiwoomOpenApiServiceClientStubWrapper(self._stub)
def is_ready(self, timeout=None):
if timeout is None:
timeout = config.get_int('koapy.grpc.client.is_ready.timeout', 5)
try:
grpc.channel_ready_future(self._channel).result(timeout=timeout)
return True
except grpc.FutureTimeoutError:
return False
def get_original_stub(self):
return self._stub
def get_stub(self):
return self._stub_wrapped
def close(self):
return self._channel.close()
def __getattr__(self, name):
return getattr(self._stub_wrapped, name)
|
import re
import pypeg2 as peg
from .grammar_util import IntNumMixin
class LevelType:
""" A level type, such as "FOUR_LEVEL" or "EIGHT_LEVEL". Matches quotes as well. """
grammar = "\"", peg.attr("name", re.compile(r"([A-Z]|_|\+)*")), "\""
class ScanCodeName:
""" A key code such as <UP> or <DOWN> """
grammar = "<", peg.attr("name", re.compile(r"([A-Z]|\+|-|[0-9])+")), ">"
@staticmethod
def from_name(name):
return peg.parse("<" + name + ">", ScanCodeName)
class KeySym:
""" Key symbol such as bracketleft, braceleft, 1, 2, 3, a, b, A, B... """
grammar = peg.attr("name", peg.word)
class KeySymList(peg.List):
""" A list of key symbols in brackets """
grammar = "[", peg.optional(peg.csl(KeySym)), "]"
@staticmethod
def from_strings(strings: list):
levels = KeySymList()
for s in strings:
k = KeySym()
k.name = s
levels.append(k)
return levels
class ActionsList(peg.List):
""" A list of actions in brackets """
pass
class KeyDescSymbols(IntNumMixin):
""" inside a key definition inside key_symbols { ... }, this is the part symbols[Group1]=[...]"""
grammar = "symbols", "[", ["Group", "group"], peg.attr("__num", re.compile(r"[0-9]+")), "]", \
"=", peg.attr("levels", KeySymList)
class KeyDescOverlay(IntNumMixin):
""" inside a key definition inside key_symbols { ... }, this is the part overlayX=<...>"""
grammar = "overlay", peg.attr("__num", re.compile(r"[0-9]")), "=", peg.attr("key_code", ScanCodeName)
class KeyDescActions():
""" inside a key definition inside key_symbols { ... }, this is the part actions[Group1]= [] """
grammar = "actions", "[", ["Group", "group"], peg.attr("__num", re.compile(r"[0-9]+")), "]", \
"=", peg.attr("actions", ActionsList)
class KeyDescription(peg.List):
""" A key definition in the xkb_symbols { ... } section """
_contained_grammars = [KeyDescSymbols, KeyDescActions, KeyDescOverlay, ("type", "=", LevelType),
peg.attr("short_levels", KeySymList)]
grammar = "key", peg.blank, peg.attr("key_code", ScanCodeName), peg.blank, "{", peg.endl, \
peg.indent(
_contained_grammars,
peg.maybe_some(",", peg.endl, _contained_grammars),
peg.optional(","), peg.endl
), "}", ";", peg.endl
@property
def type(self):
for e in self:
if isinstance(e, LevelType):
return e
return None
def convert_short_keysym_list(self, group_nums: list):
assert hasattr(self, "short_levels")
for group_num in group_nums:
self.set_symbols_group(group_num, self.short_levels)
delattr(self, "short_levels")
def get_overlays(self):
overlays = []
for child in self:
if isinstance(child, KeyDescOverlay):
overlays.append(child)
return overlays
def get_overlay_nums(self):
map(lambda o: o.num, self.get_overlays())
def get_overlay_by_num(self, num: int):
return [child for child in self if isinstance(child, KeyDescOverlay) and child.num == num][0]
def remove_overlay_by_num(self, num: int):
self[:] = [child for child in self if not isinstance(child, KeyDescOverlay) or child.num != num]
def set_overlay(self, num: int, key_code: ScanCodeName):
self.remove_overlay_by_num(num)
overlay = KeyDescOverlay()
overlay.num = num
overlay.key_code = key_code
self.append(overlay)
def get_symbols_groups(self):
groups = []
for child in self:
if isinstance(child, KeyDescSymbols):
groups.append(child)
return groups
def get_symbols_group_nums(self):
map(lambda syms: syms.group_num, self.get_symbols_groups())
def get_symbols_group_by_num(self, num: int):
return [child for child in self if isinstance(child, KeyDescSymbols) and child.num == num][0]
def remove_symbols_group_by_num(self, num: int):
self[:] = [child for child in self if not isinstance(child, KeyDescSymbols) or child.num != num]
def set_symbols_group(self, num: int, levels: KeySymList):
self.remove_symbols_group_by_num(num)
group = KeyDescSymbols()
group.num = num
group.levels = levels
self.append(group)
|
#Program to swap two no. without using third variable
print("#Program to swap two no. without using third variable")
a= int(input("Enter the First Number :-"))
b =int(input("Enter the Second Number :-"))
a=a+b
b=a-b
a=a-b
print('Value of first no. after swapping is :',a,'\n Value of second no. after swapping :',b)
#Program to swap two no. using third variable
print("#Program to swap two no. using third variable")
c= int(input("Enter the First Number :-"))
d =int(input("Enter the Second Number :-"))
temp = c
c = d
d = temp
print('Value of first no. after swapping is :',c,'\n Value of second no. after swapping :',d)
|
import os
import sys
base = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
sys.path.append(base)
import random
import math
import torch
def sample_mask(im_size: int, window_size: int):
"""
Args:
- im_size: size of image
- window_size: size of window. if -1, return full size mask
"""
assert im_size >= 2
assert (1 <= window_size <= im_size) or (window_size == -1)
# if window_size == -1, return all True mask.
if window_size == -1:
return torch.ones(im_size, im_size, dtype=torch.bool)
mask = torch.zeros(im_size, im_size, dtype=torch.bool) # all elements are False
# sample window center. if window size is odd, sample from pixel position. if even, sample from grid position.
window_center_h = random.randrange(0, im_size) if window_size % 2 == 1 else random.randrange(0, im_size + 1)
window_center_w = random.randrange(0, im_size) if window_size % 2 == 1 else random.randrange(0, im_size + 1)
for idx_h in range(window_size):
for idx_w in range(window_size):
h = window_center_h - math.floor(window_size / 2) + idx_h
w = window_center_w - math.floor(window_size / 2) + idx_w
if (0 <= h < im_size) and (0 <= w < im_size):
mask[h, w] = True
return mask
if __name__ == '__main__':
print(sample_mask(6, 2))
print(sample_mask(6, 3))
print(sample_mask(5, 2))
print(sample_mask(5, 3))
|
import re
import torch
import importlib
import numpy as np
#from matplotlib import pyplot as plt
#from TTS.tts.utils.visual import plot_spectrogram
def interpolate_vocoder_input(scale_factor, spec):
"""Interpolate spectrogram by the scale factor.
It is mainly used to match the sampling rates of
the tts and vocoder models.
Args:
scale_factor (float): scale factor to interpolate the spectrogram
spec (np.array): spectrogram to be interpolated
Returns:
torch.tensor: interpolated spectrogram.
"""
print(" > before interpolation :", spec.shape)
spec = torch.tensor(spec).unsqueeze(0).unsqueeze(0) # pylint: disable=not-callable
spec = torch.nn.functional.interpolate(spec,
scale_factor=scale_factor,
recompute_scale_factor=True,
mode='bilinear',
align_corners=False).squeeze(0)
print(" > after interpolation :", spec.shape)
return spec
def plot_results(y_hat, y, ap, global_step, name_prefix):
""" Plot vocoder model results """
# select an instance from batch
y_hat = y_hat[0].squeeze(0).detach().cpu().numpy()
y = y[0].squeeze(0).detach().cpu().numpy()
spec_fake = ap.melspectrogram(y_hat).T
spec_real = ap.melspectrogram(y).T
spec_diff = np.abs(spec_fake - spec_real)
# plot figure and save it
# fig_wave = plt.figure()
# plt.subplot(2, 1, 1)
# plt.plot(y)
# plt.title("groundtruth speech")
# plt.subplot(2, 1, 2)
# plt.plot(y_hat)
# plt.title(f"generated speech @ {global_step} steps")
# plt.tight_layout()
# plt.close()
# figures = {
# name_prefix + "spectrogram/fake": plot_spectrogram(spec_fake),
# name_prefix + "spectrogram/real": plot_spectrogram(spec_real),
# name_prefix + "spectrogram/diff": plot_spectrogram(spec_diff),
# name_prefix + "speech_comparison": fig_wave,
# }
# return figures
def to_camel(text):
text = text.capitalize()
return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), text)
def setup_wavernn(c):
print(" > Model: WaveRNN")
MyModel = importlib.import_module("TTS.vocoder.models.wavernn")
MyModel = getattr(MyModel, "WaveRNN")
model = MyModel(
rnn_dims=c.wavernn_model_params['rnn_dims'],
fc_dims=c.wavernn_model_params['fc_dims'],
mode=c.mode,
mulaw=c.mulaw,
pad=c.padding,
use_aux_net=c.wavernn_model_params['use_aux_net'],
use_upsample_net=c.wavernn_model_params['use_upsample_net'],
upsample_factors=c.wavernn_model_params['upsample_factors'],
feat_dims=c.audio['num_mels'],
compute_dims=c.wavernn_model_params['compute_dims'],
res_out_dims=c.wavernn_model_params['res_out_dims'],
num_res_blocks=c.wavernn_model_params['num_res_blocks'],
hop_length=c.audio["hop_length"],
sample_rate=c.audio["sample_rate"],
)
return model
def setup_generator(c):
print(" > Generator Model: {}".format(c.generator_model))
MyModel = importlib.import_module('TTS.vocoder.models.' +
c.generator_model.lower())
MyModel = getattr(MyModel, to_camel(c.generator_model))
if c.generator_model.lower() in 'melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
if c.generator_model in 'melgan_fb_generator':
pass
if c.generator_model.lower() in 'multiband_melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=4,
proj_kernel=7,
base_channels=384,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
if c.generator_model.lower() in 'fullband_melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
if c.generator_model.lower() in 'parallel_wavegan_generator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_size=3,
num_res_blocks=c.generator_model_params['num_res_blocks'],
stacks=c.generator_model_params['stacks'],
res_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=c.audio['num_mels'],
dropout=0.0,
bias=True,
use_weight_norm=True,
upsample_factors=c.generator_model_params['upsample_factors'])
if c.generator_model.lower() in 'wavegrad':
model = MyModel(
in_channels=c['audio']['num_mels'],
out_channels=1,
use_weight_norm=c['model_params']['use_weight_norm'],
x_conv_channels=c['model_params']['x_conv_channels'],
y_conv_channels=c['model_params']['y_conv_channels'],
dblock_out_channels=c['model_params']['dblock_out_channels'],
ublock_out_channels=c['model_params']['ublock_out_channels'],
upsample_factors=c['model_params']['upsample_factors'],
upsample_dilations=c['model_params']['upsample_dilations'])
return model
def setup_discriminator(c):
print(" > Discriminator Model: {}".format(c.discriminator_model))
if 'parallel_wavegan' in c.discriminator_model:
MyModel = importlib.import_module(
'TTS.vocoder.models.parallel_wavegan_discriminator')
else:
MyModel = importlib.import_module('TTS.vocoder.models.' +
c.discriminator_model.lower())
MyModel = getattr(MyModel, to_camel(c.discriminator_model.lower()))
if c.discriminator_model in 'random_window_discriminator':
model = MyModel(
cond_channels=c.audio['num_mels'],
hop_length=c.audio['hop_length'],
uncond_disc_donwsample_factors=c.
discriminator_model_params['uncond_disc_donwsample_factors'],
cond_disc_downsample_factors=c.
discriminator_model_params['cond_disc_downsample_factors'],
cond_disc_out_channels=c.
discriminator_model_params['cond_disc_out_channels'],
window_sizes=c.discriminator_model_params['window_sizes'])
if c.discriminator_model in 'melgan_multiscale_discriminator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_sizes=(5, 3),
base_channels=c.discriminator_model_params['base_channels'],
max_channels=c.discriminator_model_params['max_channels'],
downsample_factors=c.
discriminator_model_params['downsample_factors'])
if c.discriminator_model == 'residual_parallel_wavegan_discriminator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_size=3,
num_layers=c.discriminator_model_params['num_layers'],
stacks=c.discriminator_model_params['stacks'],
res_channels=64,
gate_channels=128,
skip_channels=64,
dropout=0.0,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
)
if c.discriminator_model == 'parallel_wavegan_discriminator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_size=3,
num_layers=c.discriminator_model_params['num_layers'],
conv_channels=64,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True
)
return model
# def check_config(c):
# c = None
# pass
|
from django.db import models
from phone_field import PhoneField
from django.core.validators import RegexValidator
from django.utils import timezone
# Create your models here.
METHOD={
("Khalti","Khalti"),
("E-Sewa","E-Sewa"),
}
class Post(models.Model):
province=models.CharField(max_length=100,null=True,blank=True)
district=models.CharField(max_length=100,null=True,blank=True)
owners_name = models.CharField(max_length=200)
Address = models.CharField(max_length=200)
phone_regex = RegexValidator(
regex=r'^\+?1?\d{10}$', message="Phone number must be 10 digits and entered in the format: '98XXXXXXXX'.")
contact_number = models.CharField(
validators=[phone_regex], max_length=13) # validators should be a list
location = models.URLField(max_length=200, null=True)
amount = models.PositiveIntegerField(null=True)
length = models.PositiveIntegerField(null=True)
Area = models.PositiveIntegerField(null=True)
frontview = models.ImageField(upload_to='images/')
leftsideview = models.ImageField(upload_to='images/')
backsideview = models.ImageField(upload_to='images/')
rightsideview = models.ImageField(upload_to='images/')
payment_verification_slip= models.ImageField(upload_to='images/')
citizenship_photo= models.ImageField(upload_to='images/',null=True,blank=True)
land_ownership_document_photo = models.ImageField(upload_to='images/',null=True,blank=True)
land_map_photo = models.ImageField(upload_to='images/',null=True,blank=True)
created_date = models.DateTimeField(
default=timezone.now, blank=True, null=True)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.owners_name
|
"""Utilities for the-wizz library. Contains file loading/closing, cosmology,
and setting the verbosity of the outputs.
"""
from astropy.cosmology import WMAP5
from astropy.io import fits
import h5py
import numpy as np
def file_checker_loader(input_file_name):
"""Utility function for checking the existence of a file and loading the
file with the proper format. Currently checks for FITS files.
----------------------------------------------------------------------------
Args:
sample_file_name: name of file on disk to load
Returns:
open file object data
"""
try:
file_handle = open(input_file_name)
file_handle.close()
except IOError:
print("IOError: File %s not found. the-wizz is exiting." %
input_file_name)
raise IOError("File not found.")
if input_file_name.endswith('fit') or input_file_name.endswith('fits') or \
input_file_name.endswith('gz') or input_file_name.endswith('cat'):
hdu_list = fits.open(input_file_name)
data = hdu_list[1].data
return data
elif input_file_name.endswith('hdf5') or input_file_name.endswith('dat'):
hdf5_file = h5py.File(input_file_name, 'r')
return hdf5_file
else:
print("File type not currently supported. Try again later. "
"the-wizz is exiting.")
raise IOError
return None
def create_hdf5_file(hdf5_file_name, args):
# TODO:
# Decide if I want to use libver latest or not. Could be more stable
# if we use the "earliest" version. Will have to speed test saving
# and loading of the pairs.
"""Convenience function for creating an HDF5 file with attributes set in
input_flags. Saves the current input flags to the group input_flags for
later reference
----------------------------------------------------------------------------
Args:
hdf5_file_name: string name of the HDF5 file to create
args: argparse ArgumentParser.parse_args object from input_flags
Returns:
open HDF5 file object
"""
hdf5_file = h5py.File(hdf5_file_name, 'w-', libver='latest')
if args is not None:
flag_grp = hdf5_file.create_group('input_flags')
for arg in vars(args):
kwargs = {}
if type(arg) is str:
kwargs["dtype"] = h5py.special_dtype(vlen=str)
if getattr(args, arg) is None:
flag_grp.attrs.create(
arg, 'None', dtype=h5py.special_dtype(vlen=str))
else:
flag_grp.attrs.create(arg, getattr(args, arg), **kwargs)
return hdf5_file
def create_ascii_file(ascii_file_name, args):
"""Convenience function for creating an output ascii file. This method
writes the current state of the input_flags arguments to the header of the
file and returns an open Python file handle object. The method will over
write any file it is given so use with caution.
----------------------------------------------------------------------------
Args:
ascii_file_name: string name of the file to write too
args: argparse ArgumentParser.parse_args object from input_flags
Returns:
open Python file object
"""
ascii_file = open(ascii_file_name, 'w')
ascii_file.writelines('# input_flags:\n')
for arg in vars(args):
ascii_file.writelines('#\t%s : %s\n' % (arg, getattr(args, arg)))
return ascii_file
|
from django.db import models
from django.contrib.auth import get_user_model
from django.db.models import Q
class Activity(models.Model):
title = models.CharField(max_length=200)
pub_date = models.DateTimeField(verbose_name="date published", auto_now_add=True)
enrolled_users = models.ManyToManyField(get_user_model(), related_name="activities", blank=True)
banned_users = models.ManyToManyField(get_user_model(), related_name="bannedActivities", blank=True)
entity = models.ForeignKey(get_user_model(), null=True, on_delete=models.CASCADE, limit_choices_to={'groups__name':'EntidadPublicoPrivada'})
restricted_entry = models.BooleanField(null=True) #Aun falta añadir esta funcionalidad(recordad hacer el makemigrations->migrate si lo implementais)
description = models.CharField(max_length=1000)
def __str__(self):
return self.title
class Meta:
# Como se muestra en la web de admin
verbose_name = "Actividad"
verbose_name_plural = "Actividades"
class ActivityRequest(models.Model):
requester = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, limit_choices_to=Q(groups__name='Estudiante'))
activity = models.ForeignKey(Activity, on_delete=models.CASCADE)
pub_date = models.DateTimeField(verbose_name="date published", auto_now_add=True, null=True)
class Meta:
# Como se muestra en la web de admin
verbose_name = "Solicitud de Actividad"
verbose_name_plural = "Solicitudes de Actividades"
unique_together = ('requester', 'activity')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tedega_share import (
init_logger,
get_logger,
monitor_connectivity,
monitor_system
)
from tedega_view import (
create_application,
config_view_endpoint
)
from tedega_storage.rdbms import (
BaseItem,
RDBMSStorageBase,
init_storage,
get_storage
)
########################################################################
# Model #
########################################################################
class Ping(BaseItem, RDBMSStorageBase):
__tablename__ = "pings"
########################################################################
# Controller #
########################################################################
@config_view_endpoint(path="/pings", method="GET", auth=None)
def ping():
data = {}
log = get_logger()
with get_storage() as storage:
factory = Ping.get_factory(storage)
item = factory.create()
storage.create(item)
items = storage.read(Ping)
data["total"] = len(items)
data["data"] = [item.get_values() for item in items]
log.info("Let's log something")
return data
def build_app(servicename):
# Define things we want to happen of application creation. We want:
# 1. Initialise out fluent logger.
# 2. Initialise the storage.
# 3. Start the monitoring of out service to the "outside".
# 4. Start the monitoring of the system every 10sec (CPU, RAM,DISK).
run_on_init = [(init_logger, servicename),
(init_storage, None),
(monitor_connectivity, [("www.google.com", 80)]),
(monitor_system, 10)]
application = create_application(servicename, run_on_init=run_on_init)
return application
if __name__ == "__main__":
application = build_app("tedega_examples")
application.run()
|
# -*- coding: utf-8 -*-
# yapf: disable
"""
Module to make available commonly used click arguments.
"""
from __future__ import absolute_import
import click
from aiida.cmdline.params import types
from aiida.cmdline.params.arguments.overridable import OverridableArgument
CALCULATION = OverridableArgument('calculation', type=types.CalculationParamType())
CALCULATIONS = OverridableArgument('calculations', nargs=-1, type=types.CalculationParamType())
CODE = OverridableArgument('code', type=types.CodeParamType())
CODES = OverridableArgument('codes', nargs=-1, type=types.CodeParamType())
COMPUTER = OverridableArgument('computer', type=types.ComputerParamType())
COMPUTERS = OverridableArgument('computers', nargs=-1, type=types.ComputerParamType())
DATUM = OverridableArgument('datum', type=types.DataParamType())
DATA = OverridableArgument('data', nargs=-1, type=types.DataParamType())
GROUP = OverridableArgument('group', type=types.GroupParamType())
GROUPS = OverridableArgument('groups', nargs=-1, type=types.GroupParamType())
NODE = OverridableArgument('node', type=types.NodeParamType())
NODES = OverridableArgument('nodes', nargs=-1, type=types.NodeParamType())
PROCESS = OverridableArgument('process', type=types.CalculationParamType())
PROCESSES = OverridableArgument('processes', nargs=-1, type=types.CalculationParamType())
INPUT_FILE = OverridableArgument('input_file', metavar='INPUT_FILE', type=click.Path(exists=True))
OUTPUT_FILE = OverridableArgument('output_file', metavar='OUTPUT_FILE', type=click.Path())
LABEL = OverridableArgument('label')
USER = OverridableArgument('user', metavar='USER', type=types.UserParamType())
PROFILE_NAME = OverridableArgument('profile_name', type=click.STRING)
|
# MD-TPM -> Module Dependency to be cleared at time of module (Text Processing Module) integration.
# Imports
import numpy as np
import re
from collections import defaultdict, OrderedDict
from collections import namedtuple
import spacy
from spacy.tokens import Token
import warnings
import Functions as F
from Functions import global_variables as gv
from Functions.logging import print_count_pairs, timefn
# moved print_count_pairs fn to logging.py
@timefn
def generate_structured_data_from_text(text, threshold_value=0.5,
max_connections_value=5):
"""Returns cytoscape compatible json structure"""
text = "\n" + text + "\n"
preprocessed_text=F.preprocess(text, gv.PREPROCESSING_PIPELINE)
F.print_param(preprocessed_text[:50] + "." * 5 + preprocessed_text[-50:])
F.print_param(f'Threshhold : {threshold_value}\nMax Connection : {max_connections_value}')
# processed_text = re.sub('\\n+', '\\n', '\n' + preprocessed_text + '\n')
pairs = F.make_pairs(preprocessed_text)
F.set_index() # only when DOC object is set
weight_matrix = np.array([0.3, 0.5, 0.2]) # cs, wd, fr
print_count_pairs(pairs)
a = F.assign_values(pairs, weight_matrix=weight_matrix, )
g = F.make_graph(a, threshold=threshold_value, max_connections=max_connections_value)
gv.TREE, gv.ROOT = F.make_tree(g)
F.print_param(f'Root Node : {gv.ROOT}')
F.detect_cycle(gv.TREE, gv.ROOT)
standard_dict = F.make_a_node_dict(gv.ROOT)
cytoscape_dict = F.transform_data(standard_dict)
with open('run_detail.txt', 'w') as log_file:
log_file.write(F.print_log("Return Values", gv.RETURN_LOG_FILE))
log_file.write(F.print_log("Function Time", gv.TIME_LOG))
log_file.write(F.print_log("Function Count", repr(gv.FUNCTION_COUNT)))
return cytoscape_dict, preprocessed_text
if __name__ == "__main__":
sample_text = """
In botany, a tree is a perennial plant with an elongated stem, or trunk, supporting branches and leaves in most species. In some usages, the definition of a tree may be narrower, including only woody plants with secondary growth, plants that are usable as lumber or plants above a specified height. Trees are not a taxonomic group but include a variety of plant species that have independently evolved a woody trunk and branches as a way to tower above other plants to compete for sunlight. Trees tend to be long-lived, some reaching several thousand years old. In wider definitions, the taller palms, tree ferns, bananas, and bamboos are also trees. Trees have been in existence for 370 million years. It is estimated that there are just over 3 trillion mature trees in the world.[1]
A tree typically has many secondary branches supported clear of the ground by the trunk. This trunk typically contains woody tissue for strength, and vascular tissue to carry materials from one part of the tree to another. For most trees it is surrounded by a layer of bark which serves as a protective barrier. Below the ground, the roots branch and spread out widely; they serve to anchor the tree and extract moisture and nutrients from the soil. Above ground, the branches divide into smaller branches and shoots. The shoots typically bear leaves, which capture light energy and convert it into sugars by photosynthesis, providing the food for the tree's growth and development.
Trees usually reproduce using seeds. Flowers and fruit may be present, but some trees, such as conifers, instead have pollen cones and seed cones. Palms, bananas, and bamboos also produce seeds, but tree ferns produce spores instead.
"""
result=generate_structured_data_from_text(sample_text)
|
# Depth-first search
def dfs(node, explored):
if node in explored:
return explored
explored.append(node)
for nei in node.neighbors:
explored = dfs(nei, explored)
return explored
# Breadth-first search
def bfs(start, goal):
explored = []
queue_paths = [[start]]
while queue_paths:
path = queue_paths[0]
node = path[-1]
queue_paths = queue_paths[1:]
if node not in explored:
explored.append(node)
for nei in node.neighbors:
new_path = list(path)
new_path.append(nei)
if nei == goal:
return new_path
queue_paths.append(new_path)
|
from django import forms
from .models import *
from rest_framework import serializers
class UploadForm(forms.ModelForm):
class Meta:
model = Project
fields = ('name','view','description','link')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user_id']
class VoteForm(forms.ModelForm):
class Meta:
model = Vote
exclude=['user','project']
|
import os,sys
import xml.etree.ElementTree as ET
def findObjects(xmlFile, category):
boxes = []
tree = ET.parse(xmlFile)
for child in tree.getroot().findall('object'):
if child.find('name').text == category: # and child.find('difficult').text != '1':
bn = child.find('bndbox')
box = map(float, [bn.find('xmin').text, bn.find('ymin').text, bn.find('xmax').text, bn.find('ymax').text])
area = (box[2]-box[0])*(box[3]-box[1])
# Skip small objects
#if area >= 400.0:
boxes.append( box )
return boxes
## MAIN PROGRAM
def mainProgram():
if len(sys.argv) < 5:
print 'Use: extractPascalBoxes.py trainvalList category xmlDir bboxOutput'
sys.exit()
category = sys.argv[2]
xmlDir = sys.argv[3]
outputFile = sys.argv[4]
imageList = [x.split() for x in open(sys.argv[1])]
out = open(outputFile,'w')
allBoxes = dict()
for img in imageList:
if img[1] == "1":
allBoxes[img[0]] = []
boxes = findObjects(xmlDir+'/'+img[0]+'.xml', category)
for box in boxes:
out.write(img[0]+' '+' '.join(map(str,map(int,box)))+'\n')
out.close()
if __name__ == "__main__":
mainProgram()
|
from typing import Dict, Any, List
import pytest
from checkov.common.bridgecrew.bc_source import SourceType
from checkov.common.bridgecrew.platform_integration import BcPlatformIntegration, bc_integration
@pytest.fixture()
def mock_bc_integration() -> BcPlatformIntegration:
bc_integration.bc_api_key = "abcd1234-abcd-1234-abcd-1234abcd1234"
bc_integration.setup_bridgecrew_credentials(
repo_id="bridgecrewio/checkov",
skip_fixes=True,
skip_download=True,
source=SourceType("Github", False),
source_version="1.0",
repo_branch="master",
)
return bc_integration
@pytest.fixture()
def scan_result() -> List[Dict[str, Any]]:
return [
{
"repository": "/path/to/requirements.txt",
"passed": True,
"packages": [
{
"type": "python",
"name": "requests",
"version": "2.26.0",
"path": "/path/to/requirements.txt",
},
{
"type": "python",
"name": "django",
"version": "1.2",
"path": "/path/to/requirements.txt",
},
{
"type": "python",
"name": "flask",
"version": "0.6",
"path": "/path/to/requirements.txt",
},
],
"complianceIssues": None,
"complianceDistribution": {"critical": 0, "high": 0, "medium": 0, "low": 0, "total": 0},
"vulnerabilities": [
{
"id": "CVE-2019-19844",
"status": "fixed in 3.0.1, 2.2.9, 1.11.27",
"cvss": 9.8,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover. A suitably crafted email address (that is equal to an existing user\\'s email address after case transformation of Unicode characters) would allow an attacker to be sent a password reset token for the matched user account. (One mitigation in the new releases is to send password reset tokens only to the registered user email address.)",
"severity": "critical",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
"riskFactors": ["Attack complexity: low", "Attack vector: network", "Critical severity", "Has fix"],
"impactedVersions": ["<1.11.27"],
"publishedDate": "2019-12-18T20:15:00+01:00",
"discoveredDate": "2019-12-18T19:15:00Z",
"fixDate": "2019-12-18T20:15:00+01:00",
},
{
"id": "CVE-2016-6186",
"status": "fixed in 1.9.8, 1.8.14",
"cvss": 6.1,
"vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N",
"description": "Cross-site scripting (XSS) vulnerability in the dismissChangeRelatedObjectPopup function in contrib/admin/static/admin/js/admin/RelatedObjectLookups.js in Django before 1.8.14, 1.9.x before 1.9.8, and 1.10.x before 1.10rc1 allows remote attackers to inject arbitrary web script or HTML via vectors involving unsafe usage of Element.innerHTML.",
"severity": "medium",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2016-6186",
"riskFactors": [
"Attack complexity: low",
"Attack vector: network",
"Exploit exists",
"Has fix",
"Medium severity",
],
"impactedVersions": ["<=1.8.13"],
"publishedDate": "2016-08-05T17:59:00+02:00",
"discoveredDate": "2016-08-05T15:59:00Z",
"fixDate": "2016-08-05T17:59:00+02:00",
},
{
"id": "CVE-2016-7401",
"status": "fixed in 1.9.10, 1.8.15",
"cvss": 7.5,
"vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N",
"description": "The cookie parsing code in Django before 1.8.15 and 1.9.x before 1.9.10, when used on a site with Google Analytics, allows remote attackers to bypass an intended CSRF protection mechanism by setting arbitrary cookies.",
"severity": "high",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2016-7401",
"riskFactors": ["High severity", "Attack complexity: low", "Attack vector: network", "Has fix"],
"impactedVersions": ["<=1.8.14"],
"publishedDate": "2016-10-03T20:59:00+02:00",
"discoveredDate": "2016-10-03T18:59:00Z",
"fixDate": "2016-10-03T20:59:00+02:00",
},
{
"id": "CVE-2021-33203",
"status": "fixed in 3.2.4, 3.1.12, 2.2.24",
"cvss": 4.9,
"vector": "CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:U/C:H/I:N/A:N",
"description": "Django before 2.2.24, 3.x before 3.1.12, and 3.2.x before 3.2.4 has a potential directory traversal via django.contrib.admindocs. Staff members could use the TemplateDetailView view to check the existence of arbitrary files. Additionally, if (and only if) the default admindocs templates have been customized by application developers to also show file contents, then not only the existence but also the file contents would have been exposed. In other words, there is directory traversal outside of the template root directories.",
"severity": "medium",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2021-33203",
"riskFactors": [
"Attack complexity: low",
"Attack vector: network",
"Has fix",
"Medium severity",
"Recent vulnerability",
],
"impactedVersions": ["<2.2.24"],
"publishedDate": "2021-06-08T20:15:00+02:00",
"discoveredDate": "2021-06-08T18:15:00Z",
"fixDate": "2021-06-08T20:15:00+02:00",
},
{
"id": "CVE-2019-1010083",
"status": "fixed in 1.0",
"cvss": 7.5,
"vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H",
"description": "The Pallets Project Flask before 1.0 is affected by: unexpected memory usage. The impact is: denial of service. The attack vector is: crafted encoded JSON data. The fixed version is: 1. NOTE: this may overlap CVE-2018-1000656.",
"severity": "high",
"packageName": "flask",
"packageVersion": "0.6",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2019-1010083",
"riskFactors": [
"Attack complexity: low",
"Attack vector: network",
"DoS",
"Has fix",
"High severity",
],
"impactedVersions": ["<1.0"],
"publishedDate": "2019-07-17T16:15:00+02:00",
"discoveredDate": "2019-07-17T14:15:00Z",
"fixDate": "2019-07-17T16:15:00+02:00",
},
{
"id": "CVE-2018-1000656",
"status": "fixed in 0.12.3",
"cvss": 7.5,
"vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H",
"description": "The Pallets Project flask version Before 0.12.3 contains a CWE-20: Improper Input Validation vulnerability in flask that can result in Large amount of memory usage possibly leading to denial of service. This attack appear to be exploitable via Attacker provides JSON data in incorrect encoding. This vulnerability appears to have been fixed in 0.12.3. NOTE: this may overlap CVE-2019-1010083.",
"severity": "high",
"packageName": "flask",
"packageVersion": "0.6",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2018-1000656",
"riskFactors": [
"Attack complexity: low",
"Attack vector: network",
"DoS",
"Has fix",
"High severity",
],
"impactedVersions": ["<0.12.3"],
"publishedDate": "2018-08-20T21:31:00+02:00",
"discoveredDate": "2018-08-20T19:31:00Z",
"fixDate": "2018-08-20T21:31:00+02:00",
},
],
"vulnerabilityDistribution": {"critical": 1, "high": 3, "medium": 2, "low": 0, "total": 6},
},
{
"repository": "/path/to/sub/requirements.txt",
"passed": True,
"packages": [
{
"type": "python",
"name": "requests",
"version": "2.26.0",
"path": "/path/to/sub/requirements.txt",
}
],
"complianceIssues": None,
"complianceDistribution": {"critical": 0, "high": 0, "medium": 0, "low": 0, "total": 0},
"vulnerabilities": None,
"vulnerabilityDistribution": {"critical": 0, "high": 0, "medium": 0, "low": 0, "total": 0},
},
{
"repository": "/path/to/go.sum",
"passed": True,
"packages": [
{
"type": "go",
"name": "github.com/jstemmer/go-junit-report",
"version": "v0.0.0-20190106144839-af01ea7f8024",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/modern-go/reflect2",
"version": "v1.0.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/uber/jaeger-lib",
"version": "v2.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/kisielk/errcheck",
"version": "v1.5.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/gogo/protobuf",
"version": "v1.3.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/ghodss/yaml",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/NYTimes/gziphandler",
"version": "v0.0.0-20170623195520-56545f4a5d46",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/fsnotify.v1",
"version": "v1.4.7",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/client9/misspell",
"version": "v0.3.4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/Azure/go-autorest/autorest/date",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/text",
"version": "v0.3.6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/uber/jaeger-client-go",
"version": "v2.16.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/golang/protobuf",
"version": "v1.4.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/go-multierror",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/api",
"version": "v0.18.6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/PuerkitoBio/purell",
"version": "v1.1.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/yuin/gopher-lua",
"version": "v0.0.0-20200603152657-dc2b0ca8b37e",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/PuerkitoBio/urlesc",
"version": "v0.0.0-20170810143723-de5bf2ad4578",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/julienschmidt/httprouter",
"version": "v1.2.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/coreos/go-oidc",
"version": "v2.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/spf13/afero",
"version": "v1.2.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/smartystreets/assertions",
"version": "v1.1.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/Azure/go-autorest/tracing",
"version": "v0.5.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/tools",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/mod",
"version": "v0.3.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/alecthomas/kingpin.v2",
"version": "v2.2.6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/fsnotify/fsnotify",
"version": "v1.4.9",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/kr/pretty",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-openapi/jsonpointer",
"version": "v0.19.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/go-uuid",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/opentracing/opentracing-go",
"version": "v1.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/memberlist",
"version": "v0.1.4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/docker/spdystream",
"version": "v0.0.0-20160310174837-449fdfce4d96",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-kit/kit",
"version": "v0.8.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/miekg/dns",
"version": "v1.1.41",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/crypto",
"version": "v0.0.0-20200622213623-75b288015ac9",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/onsi/gomega",
"version": "v1.10.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/uber-go/atomic",
"version": "v1.4.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/mobile",
"version": "v0.0.0-20190312151609-d3739f865fa6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "google.golang.org/protobuf",
"version": "v1.23.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-ole/go-ole",
"version": "v1.2.4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/golang/groupcache",
"version": "v0.0.0-20190702054246-869f871628b6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/oauth2",
"version": "v0.0.0-20190604053449-0f29369cfe45",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "google.golang.org/api",
"version": "v0.4.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/yookoala/gofast",
"version": "v0.6.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/kr/logfmt",
"version": "v0.0.0-20140226030751-b84e30acd515",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-openapi/spec",
"version": "v0.19.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/ini.v1",
"version": "v1.38.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/chzyer/test",
"version": "v0.0.0-20180213035817-a1ea475d72b1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/lint",
"version": "v0.0.0-20190313153728-d0100b6bd8b3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/prometheus/common",
"version": "v0.4.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/pquerna/cachecontrol",
"version": "v0.0.0-20180517163645-1555304b9b35",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "modernc.org/cc",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/code-generator",
"version": "v0.17.6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "google.golang.org/grpc",
"version": "v1.22.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "honnef.co/go/tools",
"version": "v0.0.0-20190523083050-ea95bdfd59fc",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "sigs.k8s.io/yaml",
"version": "v1.2.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-logr/logr",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/shirou/gopsutil",
"version": "v3.21.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/peterbourgon/diskv",
"version": "v2.0.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/yuin/goldmark",
"version": "v1.2.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/image",
"version": "v0.0.0-20190227222117-0694c2d4d067",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/sarslanhan/cronmask",
"version": "v0.0.0-20190709075623-766eca24d011",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/armon/go-metrics",
"version": "v0.0.0-20180917152333-f0300d1749da",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/tomb.v1",
"version": "v1.0.0-20141024135613-dd632973f1e7",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/cenkalti/backoff",
"version": "v2.2.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/googleapis/gnostic",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/StackExchange/wmi",
"version": "v0.0.0-20190523213315-cbe66965904d",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/sony/gobreaker",
"version": "v0.4.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/term",
"version": "v0.0.0-20201126162022-7de9c90e9dd1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "go.uber.org/atomic",
"version": "v1.4.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/spf13/pflag",
"version": "v1.0.5",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "sigs.k8s.io/structured-merge-diff/v3",
"version": "v3.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/mailru/easyjson",
"version": "v0.7.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/exp",
"version": "v0.0.0-20190312203227-4b39c73a6495",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/pascaldekloe/goe",
"version": "v0.0.0-20180627143212-57f6aae5913c",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "layeh.com/gopher-json",
"version": "v0.0.0-20190114024228-97fed8db8427",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/golang/glog",
"version": "v0.0.0-20160126235308-23def4e6c14b",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "go.opentelemetry.io/otel",
"version": "v0.13.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/opentracing/basictracer-go",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/klog",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/sys",
"version": "v0.0.0-20210415045647-66c3f260301c",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "modernc.org/mathutil",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/felixge/httpsnoop",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "sigs.k8s.io/structured-merge-diff/v2",
"version": "v2.0.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/emicklei/go-restful",
"version": "v2.9.5",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/instana/go-sensor",
"version": "v1.4.16",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/remyoudompheng/bigfft",
"version": "v0.0.0-20170806203942-52369c62f446",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/json-iterator/go",
"version": "v1.1.8",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/lightstep/lightstep-tracer-common/golang/gogo",
"version": "v0.0.0-20210210170715-a8dfcb80d3a7",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/square/go-jose.v2",
"version": "v2.3.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/kisielk/gotool",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/google/uuid",
"version": "v1.1.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/stretchr/testify",
"version": "v1.6.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "cloud.google.com/go",
"version": "v0.38.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "modernc.org/xc",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/tidwall/match",
"version": "v1.0.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/net",
"version": "v0.0.0-20210415231046-e915ea6b2b7d",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/google/go-cmp",
"version": "v0.5.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/chzyer/logex",
"version": "v1.1.10",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/chzyer/readline",
"version": "v0.0.0-20180603132655-2972be24d48e",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/errwrap",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/codahale/hdrhistogram",
"version": "v0.0.0-20161010025455-3a0bb77429bd",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "google.golang.org/appengine",
"version": "v1.5.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/tklauser/numcpus",
"version": "v0.2.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/aryszka/jobqueue",
"version": "v0.0.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/golang/mock",
"version": "v1.2.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/tklauser/go-sysconf",
"version": "v0.3.5",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/mxk/go-flowrate",
"version": "v0.0.0-20140419014527-cca7078d478f",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/looplab/fsm",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/tidwall/gjson",
"version": "v1.7.4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/dgrijalva/jwt-go",
"version": "v3.2.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/prometheus/client_model",
"version": "v0.0.0-20190129233127-fd36f4220a90",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/sanity-io/litter",
"version": "v1.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/kr/pty",
"version": "v1.1.5",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/gengo",
"version": "v0.0.0-20190822140433-26a664648505",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/oklog/ulid",
"version": "v1.3.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/munnerz/goautoneg",
"version": "v0.0.0-20120707110453-a547fc61f48d",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/go-sockaddr",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/go-immutable-radix",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "google.golang.org/genproto",
"version": "v0.0.0-20190530194941-fb225487d101",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-restit/lzjson",
"version": "v0.0.0-20161206095556-efe3c53acc68",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hpcloud/tail",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/BurntSushi/xgb",
"version": "v0.0.0-20160522181843-27f122750802",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/Azure/go-autorest/autorest/mocks",
"version": "v0.2.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/client-go",
"version": "v0.17.6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/beorn7/perks",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/nxadm/tail",
"version": "v1.4.4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gonum.org/v1/gonum",
"version": "v0.0.0-20190331200053-3d26580ed485",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/BurntSushi/toml",
"version": "v0.3.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/stretchr/objx",
"version": "v0.2.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/cespare/xxhash/v2",
"version": "v2.1.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "modernc.org/golex",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/kube-openapi",
"version": "v0.0.0-20200410145947-bcb3869e6f29",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/mwitkow/go-conntrack",
"version": "v0.0.0-20161129095857-cc309e4a2223",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gonum.org/v1/netlib",
"version": "v0.0.0-20190331212654-76723241ea4e",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/google/btree",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/gophercloud/gophercloud",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/imdario/mergo",
"version": "v0.3.5",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/sync",
"version": "v0.0.0-20210220032951-036812b2e83c",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/konsorten/go-windows-terminal-sequences",
"version": "v1.0.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/google/pprof",
"version": "v0.0.0-20181206194817-3ea8567a2e57",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/xerrors",
"version": "v0.0.0-20200804184101-5ec99f83aff1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/evanphx/json-patch",
"version": "v4.2.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-stack/stack",
"version": "v1.8.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/matttproud/golang_protobuf_extensions",
"version": "v1.0.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/yaml.v2",
"version": "v2.4.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/yaml.v3",
"version": "v3.0.0-20200313102051-9f266ea9e77c",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/inf.v0",
"version": "v0.9.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "gopkg.in/check.v1",
"version": "v1.0.0-20180628173108-788fd7840127",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/prometheus/procfs",
"version": "v0.0.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/szuecs/rate-limit-buffer",
"version": "v0.7.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-redis/redis/v8",
"version": "v8.3.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/googleapis/gax-go/v2",
"version": "v2.0.4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-openapi/swag",
"version": "v0.19.5",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/google/gofuzz",
"version": "v1.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/onsi/ginkgo",
"version": "v1.14.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/jtolds/gls",
"version": "v4.20.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-openapi/jsonreference",
"version": "v0.19.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/smartystreets/goconvey",
"version": "v1.6.4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/alecthomas/units",
"version": "v0.0.0-20151022065526-2efee857e7cf",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/apimachinery",
"version": "v0.18.6",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/modern-go/concurrent",
"version": "v0.0.0-20180306012644-bacd9c7ef1dd",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/elazarl/goproxy",
"version": "v0.0.0-20180725130230-947c36da3153",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/gopherjs/gopherjs",
"version": "v0.0.0-20200217142428-fce0ec30dd00",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/pmezard/go-difflib",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/go-msgpack",
"version": "v0.5.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/prometheus/client_golang",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/alecthomas/template",
"version": "v0.0.0-20160405071501-a0175ee3bccc",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/cjoudrey/gluaurl",
"version": "v0.0.0-20161028222611-31cbb9bef199",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/tidwall/pretty",
"version": "v1.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/dgryski/go-rendezvous",
"version": "v0.0.0-20200823014737-9f7001d12a5f",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/sean-/seed",
"version": "v0.0.0-20170313163322-e2103e2c3529",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/lightstep/lightstep-tracer-go",
"version": "v0.24.1-0.20210318180546-a67254760a58",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "go.opencensus.io",
"version": "v0.22.3",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/Azure/go-autorest/logger",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/kr/text",
"version": "v0.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/google/martian",
"version": "v2.1.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/gregjones/httpcache",
"version": "v0.0.0-20180305231024-9cad4c3443a7",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "k8s.io/utils",
"version": "v0.0.0-20191114184206-e782cd3c129f",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/sirupsen/logrus",
"version": "v1.4.2",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/rcrowley/go-metrics",
"version": "v0.0.0-20181016184325-3113b8401b8a",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/go-logfmt/logfmt",
"version": "v0.3.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/davecgh/go-spew",
"version": "v1.1.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/Azure/go-autorest/autorest",
"version": "v0.9.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "golang.org/x/time",
"version": "v0.0.0-20190308202827-9d24e82272b4",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/hashicorp/golang-lru",
"version": "v0.5.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "modernc.org/strutil",
"version": "v1.0.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/cjoudrey/gluahttp",
"version": "v0.0.0-20190104103309-101c19a37344",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/Azure/go-autorest/autorest/adal",
"version": "v0.5.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/pkg/errors",
"version": "v0.8.1",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/szuecs/routegroup-client",
"version": "v0.17.7",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/abbot/go-http-auth",
"version": "v0.4.0",
"path": "/path/to/go.sum",
},
{
"type": "go",
"name": "github.com/dimfeld/httppath",
"version": "v0.0.0-20170720192232-ee938bf73598",
"path": "/path/to/go.sum",
},
],
"complianceIssues": None,
"complianceDistribution": {"critical": 0, "high": 0, "medium": 0, "low": 0, "total": 0},
"vulnerabilities": [
{
"id": "CVE-2020-29652",
"status": "fixed in v0.0.0-20201216223049-8b5274cf687f",
"cvss": 7.5,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H",
"description": "A nil pointer dereference in the golang.org/x/crypto/ssh component through v0.0.0-20201203163018-be400aefbc4c for Go allows remote attackers to cause a denial of service against SSH servers.",
"severity": "high",
"packageName": "golang.org/x/crypto",
"packageVersion": "v0.0.0-20200622213623-75b288015ac9",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-29652",
"riskFactors": [
"Has fix",
"High severity",
"Attack complexity: low",
"Attack vector: network",
"DoS",
],
"impactedVersions": ["<v0.0.0-20201216223049-8b5274cf687f"],
"publishedDate": "2020-12-17T06:15:00+01:00",
"discoveredDate": "2020-12-17T05:15:00Z",
"fixDate": "2020-12-17T06:15:00+01:00",
},
{
"id": "CVE-2020-26160",
"status": "fixed in v4.0.0-preview1",
"cvss": 7.7,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N",
"description": 'jwt-go before 4.0.0-preview1 allows attackers to bypass intended access restrictions in situations with []string{} for m[\\"aud\\"] (which is allowed by the specification). Because the type assertion fails, \\"\\" is the value of aud. This is a security problem if the JWT token is presented to a service that lacks its own audience check.',
"severity": "high",
"packageName": "github.com/dgrijalva/jwt-go",
"packageVersion": "v3.2.0",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-26160",
"riskFactors": ["High severity", "Attack complexity: low", "Attack vector: network", "Has fix"],
"impactedVersions": ["<v4.0.0-preview1"],
"publishedDate": "2020-09-30T20:15:00+02:00",
"discoveredDate": "2020-09-30T18:15:00Z",
"fixDate": "2020-09-30T20:15:00+02:00",
},
],
"vulnerabilityDistribution": {"critical": 0, "high": 2, "medium": 0, "low": 0, "total": 2},
},
]
|
print('hellopyhon')
print('hhl')
print('1223')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.