hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b674bd637a73cb01f4ac5b970788f18a3d517d5a
| 1,937
|
py
|
Python
|
internal/notes/builtin-SAVE/packages/cups/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 1
|
2019-01-17T20:07:19.000Z
|
2019-01-17T20:07:19.000Z
|
internal/notes/builtin-SAVE/packages/cups/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | null | null | null |
internal/notes/builtin-SAVE/packages/cups/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T18:13:57.000Z
|
2021-11-05T18:19:49.000Z
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Cups(AutotoolsPackage):
"""CUPS is the standards-based, open source printing system developed by
Apple Inc. for macOS and other UNIX-like operating systems. CUPS uses the
Internet Printing Protocol (IPP) to support printing to local and network
printers. This provides the core CUPS libraries, not a complete CUPS
install."""
homepage = "https://www.cups.org/"
url = "https://github.com/apple/cups/releases/download/v2.2.3/cups-2.2.3-source.tar.gz"
version('2.2.3', '006a8156680a516e43c59034e31df8bf')
depends_on('gnutls')
def configure_args(self):
args = ['--enable-gnutls', '--with-components=core']
return args
| 43.044444
| 91
| 0.681982
|
5b5be4a587d8b148bfbe0932a95778ad1731336c
| 10,037
|
py
|
Python
|
tests/components/sensor/test_dyson.py
|
robin13/home-assistant
|
4976569e304c23975d34ec88e2dfb94e84ab1f1c
|
[
"Apache-2.0"
] | 2
|
2020-08-29T07:24:56.000Z
|
2020-10-27T21:47:35.000Z
|
tests/components/sensor/test_dyson.py
|
robin13/home-assistant
|
4976569e304c23975d34ec88e2dfb94e84ab1f1c
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:25:50.000Z
|
2022-03-11T23:27:53.000Z
|
tests/components/sensor/test_dyson.py
|
robin13/home-assistant
|
4976569e304c23975d34ec88e2dfb94e84ab1f1c
|
[
"Apache-2.0"
] | 3
|
2018-09-14T07:34:09.000Z
|
2018-09-29T12:57:10.000Z
|
"""Test the Dyson sensor(s) component."""
import unittest
from unittest import mock
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT, \
STATE_OFF
from homeassistant.components.sensor import dyson
from tests.common import get_test_home_assistant
from libpurecoollink.dyson_pure_cool_link import DysonPureCoolLink
def _get_device_without_state():
"""Return a valid device provide by Dyson web services."""
device = mock.Mock(spec=DysonPureCoolLink)
device.name = "Device_name"
device.state = None
device.environmental_state = None
return device
def _get_with_state():
"""Return a valid device with state values."""
device = mock.Mock()
device.name = "Device_name"
device.state = mock.Mock()
device.state.filter_life = 100
device.environmental_state = mock.Mock()
device.environmental_state.dust = 5
device.environmental_state.humidity = 45
device.environmental_state.temperature = 295
device.environmental_state.volatil_organic_compounds = 2
return device
def _get_with_standby_monitoring():
"""Return a valid device with state but with standby monitoring disable."""
device = mock.Mock()
device.name = "Device_name"
device.state = mock.Mock()
device.state.filter_life = 100
device.environmental_state = mock.Mock()
device.environmental_state.dust = 5
device.environmental_state.humidity = 0
device.environmental_state.temperature = 0
device.environmental_state.volatil_organic_compounds = 2
return device
class DysonTest(unittest.TestCase):
"""Dyson Sensor component test class."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component_with_no_devices(self):
"""Test setup component with no devices."""
self.hass.data[dyson.DYSON_DEVICES] = []
add_entities = mock.MagicMock()
dyson.setup_platform(self.hass, None, add_entities)
add_entities.assert_called_with([])
def test_setup_component(self):
"""Test setup component with devices."""
def _add_device(devices):
assert len(devices) == 5
assert devices[0].name == "Device_name filter life"
assert devices[1].name == "Device_name dust"
assert devices[2].name == "Device_name humidity"
assert devices[3].name == "Device_name temperature"
assert devices[4].name == "Device_name air quality"
device_fan = _get_device_without_state()
device_non_fan = _get_with_state()
self.hass.data[dyson.DYSON_DEVICES] = [device_fan, device_non_fan]
dyson.setup_platform(self.hass, None, _add_device)
def test_dyson_filter_life_sensor(self):
"""Test filter life sensor with no value."""
sensor = dyson.DysonFilterLifeSensor(self.hass,
_get_device_without_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertIsNone(sensor.state)
self.assertEqual(sensor.unit_of_measurement, "hours")
self.assertEqual(sensor.name, "Device_name filter life")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
sensor.on_message('message')
def test_dyson_filter_life_sensor_with_values(self):
"""Test filter sensor with values."""
sensor = dyson.DysonFilterLifeSensor(self.hass, _get_with_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, 100)
self.assertEqual(sensor.unit_of_measurement, "hours")
self.assertEqual(sensor.name, "Device_name filter life")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
sensor.on_message('message')
def test_dyson_dust_sensor(self):
"""Test dust sensor with no value."""
sensor = dyson.DysonDustSensor(self.hass,
_get_device_without_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertIsNone(sensor.state)
self.assertEqual(sensor.unit_of_measurement, 'level')
self.assertEqual(sensor.name, "Device_name dust")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_dust_sensor_with_values(self):
"""Test dust sensor with values."""
sensor = dyson.DysonDustSensor(self.hass, _get_with_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, 5)
self.assertEqual(sensor.unit_of_measurement, 'level')
self.assertEqual(sensor.name, "Device_name dust")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_humidity_sensor(self):
"""Test humidity sensor with no value."""
sensor = dyson.DysonHumiditySensor(self.hass,
_get_device_without_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertIsNone(sensor.state)
self.assertEqual(sensor.unit_of_measurement, '%')
self.assertEqual(sensor.name, "Device_name humidity")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_humidity_sensor_with_values(self):
"""Test humidity sensor with values."""
sensor = dyson.DysonHumiditySensor(self.hass, _get_with_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, 45)
self.assertEqual(sensor.unit_of_measurement, '%')
self.assertEqual(sensor.name, "Device_name humidity")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_humidity_standby_monitoring(self):
"""Test humidity sensor while device is in standby monitoring."""
sensor = dyson.DysonHumiditySensor(self.hass,
_get_with_standby_monitoring())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, STATE_OFF)
self.assertEqual(sensor.unit_of_measurement, '%')
self.assertEqual(sensor.name, "Device_name humidity")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_temperature_sensor(self):
"""Test temperature sensor with no value."""
sensor = dyson.DysonTemperatureSensor(self.hass,
_get_device_without_state(),
TEMP_CELSIUS)
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertIsNone(sensor.state)
self.assertEqual(sensor.unit_of_measurement, '°C')
self.assertEqual(sensor.name, "Device_name temperature")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_temperature_sensor_with_values(self):
"""Test temperature sensor with values."""
sensor = dyson.DysonTemperatureSensor(self.hass,
_get_with_state(),
TEMP_CELSIUS)
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, 21.9)
self.assertEqual(sensor.unit_of_measurement, '°C')
self.assertEqual(sensor.name, "Device_name temperature")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
sensor = dyson.DysonTemperatureSensor(self.hass,
_get_with_state(),
TEMP_FAHRENHEIT)
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, 71.3)
self.assertEqual(sensor.unit_of_measurement, '°F')
self.assertEqual(sensor.name, "Device_name temperature")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_temperature_standby_monitoring(self):
"""Test temperature sensor while device is in standby monitoring."""
sensor = dyson.DysonTemperatureSensor(self.hass,
_get_with_standby_monitoring(),
TEMP_CELSIUS)
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, STATE_OFF)
self.assertEqual(sensor.unit_of_measurement, '°C')
self.assertEqual(sensor.name, "Device_name temperature")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_air_quality_sensor(self):
"""Test air quality sensor with no value."""
sensor = dyson.DysonAirQualitySensor(self.hass,
_get_device_without_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertIsNone(sensor.state)
self.assertEqual(sensor.unit_of_measurement, 'level')
self.assertEqual(sensor.name, "Device_name air quality")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
def test_dyson_air_quality_sensor_with_values(self):
"""Test air quality sensor with values."""
sensor = dyson.DysonAirQualitySensor(self.hass, _get_with_state())
sensor.entity_id = "sensor.dyson_1"
self.assertFalse(sensor.should_poll)
self.assertEqual(sensor.state, 2)
self.assertEqual(sensor.unit_of_measurement, 'level')
self.assertEqual(sensor.name, "Device_name air quality")
self.assertEqual(sensor.entity_id, "sensor.dyson_1")
| 44.411504
| 79
| 0.662549
|
f5f66f45028ef509d9fdb192292fca7655c555ea
| 1,074
|
py
|
Python
|
src/main/tools/dbpy/FotechUtils/dbHelper.py
|
inqwell/inq
|
31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3
|
[
"BSD-3-Clause"
] | 1
|
2016-09-25T16:41:57.000Z
|
2016-09-25T16:41:57.000Z
|
src/main/tools/dbpy/FotechUtils/dbHelper.py
|
inqwell/inq
|
31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3
|
[
"BSD-3-Clause"
] | null | null | null |
src/main/tools/dbpy/FotechUtils/dbHelper.py
|
inqwell/inq
|
31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3
|
[
"BSD-3-Clause"
] | 2
|
2016-09-25T16:48:49.000Z
|
2020-05-26T20:00:33.000Z
|
#
# $Header: /home/inqwell/cvsroot/dev/scripts/python/FotechUtils/dbHelper.py,v 1.1 2009/05/22 22:16:31 sanderst Exp $
#
import os
import KBC.fotech
from Util import db
from FotechUtils import LogHelper
from FotechUtils import dbUtils
from FotechUtils.FotechRoot import FOTechRoot
from FotechUtils.pwHelper import get_password_file
log = LogHelper.getLogger( __name__ )
"""
The main 'database.xml' file used by Front Office
"""
def get_database_xml():
return FOTechRoot.make_relative_path( "etc", "database.xml" )
"""
The system names defined in the database.xml files for Front Office
"""
FDESystem = 'fdenterprise'
def get_fde_connection( level, access = 'read' ):
return get_any_connection( get_database_xml(), get_password_file(), level, access, FDESystem )
def get_any_connection( db_file, pw_file, level, access, system ):
"""
Util method to get a connection
"""
log.info("FOTech DBHelper.getConnection: %s, %s" % (db_file, pw_file))
return dbUtils.getConnection( db_file, system, level, access, pwdfile = pw_file )
| 27.538462
| 116
| 0.735568
|
e9866122e9d000e996b74af5f485f1b23689321e
| 769
|
py
|
Python
|
tools/win32build/doall.py
|
ivanov/numpy
|
6d2665626e40f346bb5af8d780579f5a429ff9ba
|
[
"BSD-3-Clause"
] | null | null | null |
tools/win32build/doall.py
|
ivanov/numpy
|
6d2665626e40f346bb5af8d780579f5a429ff9ba
|
[
"BSD-3-Clause"
] | null | null | null |
tools/win32build/doall.py
|
ivanov/numpy
|
6d2665626e40f346bb5af8d780579f5a429ff9ba
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division
import subprocess
import os
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--pyver", dest="pyver",
help = "Python version (2.4, 2.5, etc...)")
opts, args = parser.parse_args()
pyver = opts.pyver
if not pyver:
pyver = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py', '-p', pyver])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', pyver],
cwd = 'bootstrap-%s' % pyver)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'],
cwd = 'bootstrap-%s' % pyver)
| 27.464286
| 74
| 0.583875
|
71f6803ba986621dd3026d83437940305673349b
| 6,473
|
py
|
Python
|
devel/.private/hector_uav_msgs/lib/python2.7/dist-packages/hector_uav_msgs/msg/_VelocityXYCommand.py
|
arijitnoobstar/UAVProjectileCatcher
|
3c1bed80df167192cb4b971b58c891187628142e
|
[
"Apache-2.0"
] | 10
|
2021-03-15T03:58:06.000Z
|
2021-12-30T15:33:38.000Z
|
Chapter_7_code/devel/.private/hector_uav_msgs/lib/python2.7/dist-packages/hector_uav_msgs/msg/_VelocityXYCommand.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | 1
|
2021-09-09T15:29:31.000Z
|
2021-09-09T15:29:31.000Z
|
Chapter_7_code/devel/.private/hector_uav_msgs/lib/python2.7/dist-packages/hector_uav_msgs/msg/_VelocityXYCommand.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | 4
|
2021-03-06T09:35:58.000Z
|
2021-05-24T14:34:11.000Z
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from hector_uav_msgs/VelocityXYCommand.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class VelocityXYCommand(genpy.Message):
_md5sum = "7b4d52af2aa98221d9bb260976d6a201"
_type = "hector_uav_msgs/VelocityXYCommand"
_has_header = True # flag to mark the presence of a Header object
_full_text = """Header header
float32 x
float32 y
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
"""
__slots__ = ['header','x','y']
_slot_types = ['std_msgs/Header','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,x,y
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(VelocityXYCommand, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
else:
self.header = std_msgs.msg.Header()
self.x = 0.
self.y = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2f().pack(_x.x, _x.y))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2f().pack(_x.x, _x.y))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2f = None
def _get_struct_2f():
global _struct_2f
if _struct_2f is None:
_struct_2f = struct.Struct("<2f")
return _struct_2f
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
| 33.890052
| 145
| 0.641588
|
46e28ab8405eab40b70bec6310aa483a1d96e3ba
| 9,065
|
py
|
Python
|
utils.py
|
ZRunner/ZBot-test
|
6b1b906807405ab73c721aa92cd7230a8636e61c
|
[
"MIT"
] | null | null | null |
utils.py
|
ZRunner/ZBot-test
|
6b1b906807405ab73c721aa92cd7230a8636e61c
|
[
"MIT"
] | null | null | null |
utils.py
|
ZRunner/ZBot-test
|
6b1b906807405ab73c721aa92cd7230a8636e61c
|
[
"MIT"
] | null | null | null |
import argparse
import glob
import logging
import os
import sys
from logging.handlers import RotatingFileHandler
from typing import TYPE_CHECKING
import discord
import mysql
from discord.ext import commands
from fcts import cryptage, tokens # pylint: disable=no-name-in-module
if TYPE_CHECKING:
from libs.classes import Zbot
OUTAGE_REASON = {
'fr': "Un des datacenters de notre hébergeur OVH a pris feu, rendant ,inaccessible le serveur et toutes ses données. Une vieille sauvegarde de la base de donnée sera peut-être utilisée ultérieurement. Plus d'informations sur https://zbot.statuspage.io/",
'en': "One of the datacenters of our host OVH caught fire, making the server and all its data inaccessible. An old backup of the database may be used later. More information on https://zbot.statuspage.io/"
}
async def get_prefix(bot:"Zbot", msg: discord.Message) -> list:
"""Get the correct bot prefix from a message
Prefix can change based on guild, but the bot mention will always be an option"""
prefixes = [await bot.prefix_manager.get_prefix(msg.guild)]
if msg.guild is None:
prefixes.append("")
return commands.when_mentioned_or(*prefixes)(bot, msg)
def flatten_list(first_list: list) -> list:
return [item for sublist in first_list for item in sublist]
def setup_bot_logger():
"""Create the logger module for the bot, used for logs"""
# on chope le premier logger
log = logging.getLogger("runner")
# on définis un formatteur
log_format = logging.Formatter("%(asctime)s %(levelname)s: %(message)s", datefmt="[%d/%m/%Y %H:%M]")
# ex du format : [08/11/2018 14:46] WARNING: Rss fetch_rss_flux l.288 : Cannot get the RSS flux because of the following error: (suivi du traceback)
# log vers un fichier
file_handler = RotatingFileHandler("logs/debug.log", maxBytes=1e6, backupCount=2, delay=True)
# tous les logs de niveau DEBUG et supérieur sont evoyés dans le fichier
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_format)
# log vers la console
stream_handler = logging.StreamHandler(sys.stdout)
# tous les logs de niveau INFO et supérieur sont evoyés dans le fichier
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(log_format)
# supposons que tu veuille collecter les erreurs sur ton site d'analyse d'erreurs comme sentry
#sentry_handler = x
#sentry_handler.setLevel(logging.ERROR) # on veut voir que les erreurs et au delà, pas en dessous
#sentry_handler.setFormatter(format)
log.addHandler(file_handler)
log.addHandler(stream_handler)
#log.addHandler(sentry_handler)
log.setLevel(logging.DEBUG)
return log
def setup_database_logger():
"Create the logger module for database access"
log = logging.getLogger("database")
log_format = logging.Formatter("%(asctime)s %(levelname)s: [SQL] %(message)s", datefmt="[%d/%m/%Y %H:%M]")
file_handler = RotatingFileHandler("logs/sql-debug.log", maxBytes=2e6, backupCount=2, delay=True)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_format)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(log_format)
log.addHandler(file_handler)
log.addHandler(stream_handler)
log.setLevel(logging.DEBUG)
return log
def setup_start_parser():
"Create a parser for the command-line interface"
parser = argparse.ArgumentParser()
parser.add_argument('--token', '-t', help="The bot token to use", required=True)
parser.add_argument('--no-main-loop', help="Deactivate the bot main loop",
action="store_false", dest="event_loop")
parser.add_argument('--no-rss', help="Disable any RSS feature (loop and commands)",
action="store_false", dest="rss_features")
return parser
def parse_crypted_file(bot: "Zbot"):
"Parse the secret file containing all types of tokens and private things"
with open('fcts/requirements', 'r') as file:
lines = file.read().split('\n')
# remove comments, empty lines and all
for line in lines:
if line.startswith("//") or line == '':
lines.remove(line)
while '' in lines:
lines.remove('')
# database
for i, line in enumerate(['user', 'password', 'host', 'database1', 'database2']):
bot.database_keys[line] = cryptage.uncrypte(lines[i])
# misc APIs
bot.others['botsondiscord'] = cryptage.uncrypte(lines[6])
bot.others['discordbotsgroup'] = cryptage.uncrypte(lines[7])
bot.others['bitly'] = cryptage.uncrypte(lines[8])
bot.others['twitter'] = {'consumer_key': cryptage.uncrypte(lines[9]),
'consumer_secret': cryptage.uncrypte(lines[10]),
'access_token_key': cryptage.uncrypte(lines[11]),
'access_token_secret': cryptage.uncrypte(lines[12])}
bot.others['discordlist.space'] = cryptage.uncrypte(lines[13])
bot.others['discordboats'] = cryptage.uncrypte(lines[14])
bot.others['discordextremelist'] = cryptage.uncrypte(lines[15])
bot.others['statuspage'] = cryptage.uncrypte(lines[16])
bot.others['nasa'] = cryptage.uncrypte(lines[17])
bot.others['random_api_token'] = cryptage.uncrypte(lines[18])
bot.others['google_api'] = cryptage.uncrypte(lines[19])
bot.dbl_token = tokens.get_dbl_token()
def load_sql_connection(bot: "Zbot"):
"Load the connection to the database, preferably in local mode"
try:
try:
cnx = mysql.connector.connect(user=bot.database_keys['user'],
password=bot.database_keys['password'],
host="127.0.0.1",
database=bot.database_keys['database1'])
except (mysql.connector.InterfaceError, mysql.connector.ProgrammingError):
bot.log.warning("Unable to access local dabatase - attempt via IP")
cnx = mysql.connector.connect(user=bot.database_keys['user'],
password=bot.database_keys['password'],
host=bot.database_keys['host'],
database=bot.database_keys['database1'])
else:
bot.log.info("Database connected locally")
bot.database_keys['host'] = '127.0.0.1'
cnx.close()
except Exception as err:
bot.log.error("---- UNABLE TO REACH THE DATABASE ----")
bot.log.error(err)
bot.database_online = False
async def load_cogs(bot: "Zbot"):
"Load the bot modules"
initial_extensions = ['fcts.languages',
'fcts.admin',
'fcts.aide',
'fcts.antiscam',
'fcts.bot_events',
'fcts.bot_stats',
'fcts.cases',
'fcts.embeds',
'fcts.emojis',
'fcts.errors',
'fcts.events',
'fcts.fun',
'fcts.info',
'fcts.library',
'fcts.minecraft',
'fcts.moderation',
'fcts.morpions',
'fcts.partners',
'fcts.perms',
'fcts.reloads',
'fcts.roles_react',
'fcts.rss',
'fcts.s_backups',
'fcts.serverlogs',
'fcts.servers',
'fcts.tickets',
'fcts.timers',
# 'fcts.translations',
'fcts.users',
'fcts.utilities',
'fcts.voices',
'fcts.welcomer',
'fcts.xp'
]
# Here we load our extensions(cogs) listed above in [initial_extensions]
count = 0
for extension in initial_extensions:
try:
await bot.load_extension(extension)
except discord.DiscordException:
bot.log.critical('Failed to load extension %s', extension, exc_info=True)
count += 1
if count > 0:
bot.log.critical("%s modules not loaded\nEnd of program", count)
sys.exit()
async def count_code_lines():
"""Count lines of Python code in the current folder
Comments and empty lines are ignored."""
count = 0
path = os.path.dirname(__file__)+'/**/*.py'
for filename in glob.iglob(path, recursive=True):
if '/env/' in filename or not filename.endswith('.py'):
continue
with open(filename, 'r', encoding='utf-8') as file:
for line in file.read().split("\n"):
cleaned_line = line.strip()
if len(cleaned_line) > 2 and not cleaned_line.startswith('#'):
count += 1
return count
| 42.359813
| 258
| 0.609156
|
988c8aaf033c0c5e15f6254e5acc10faf07e566b
| 199
|
py
|
Python
|
application/__init__.py
|
Naoto-Ida/PiStats-Server
|
67bc7224ccf77a3e46bc07a9781bf989d4631849
|
[
"MIT"
] | null | null | null |
application/__init__.py
|
Naoto-Ida/PiStats-Server
|
67bc7224ccf77a3e46bc07a9781bf989d4631849
|
[
"MIT"
] | null | null | null |
application/__init__.py
|
Naoto-Ida/PiStats-Server
|
67bc7224ccf77a3e46bc07a9781bf989d4631849
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
import application.index
import application.cpu
import application.disk
import application.memory
| 18.090909
| 37
| 0.844221
|
64325f603e48b2e1f18f8addfaaf255e3fde410f
| 4,067
|
py
|
Python
|
Solid/StochasticHillClimb.py
|
Yamp/Solid
|
96e7f5de2e7a481aadef93add9cfa3a8270705b3
|
[
"MIT"
] | null | null | null |
Solid/StochasticHillClimb.py
|
Yamp/Solid
|
96e7f5de2e7a481aadef93add9cfa3a8270705b3
|
[
"MIT"
] | null | null | null |
Solid/StochasticHillClimb.py
|
Yamp/Solid
|
96e7f5de2e7a481aadef93add9cfa3a8270705b3
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from math import exp
from random import random
class StochasticHillClimb(metaclass=ABCMeta):
"""
Conducts stochastic hill climb
"""
initial_state = None
current_state = None
best_state = None
cur_steps = 0
max_steps = None
best_objective = None
max_objective = None
temp = None
def __init__(self, initial_state, temp, max_steps, max_objective=None):
"""
:param initial_state: initial state of hill climbing
:param max_steps: maximum steps to run hill climbing for
:param temp: temperature in probabilistic acceptance of transition
:param max_objective: objective function to stop algorithm once reached
"""
self.initial_state = initial_state
if isinstance(max_steps, int) and max_steps > 0:
self.max_steps = max_steps
else:
raise ValueError('Max steps must be a positive integer')
if max_objective is not None:
if isinstance(max_objective, (float, int)):
self.max_objective = float(max_objective)
else:
raise ValueError('Maximum objective must be a numeric type')
if isinstance(temp, (float, int)):
self.temp = float(temp)
else:
raise ValueError('Temperature must be a numeric type')
def __str__(self):
return ('STOCHASTIC HILL CLIMB: \n' +
'CURRENT STEPS: %d \n' +
'BEST OBJECTIVE: %f \n' +
'BEST STATE: %s \n\n') % \
(self.cur_steps, self.best_objective, str(self.best_state))
def __repr__(self):
return self.__str__()
def _clear(self):
"""
Resets the variables that are altered on a per-run basis of the algorithm
:return: None
"""
self.cur_steps = 0
self.current_state = None
self.best_state = None
self.best_objective = None
@abstractmethod
def _neighbor(self):
"""
Returns a random member of the neighbor of the current state
:return: a random neighbor, given access to self.current_state
"""
pass
@abstractmethod
def _objective(self, state):
"""
Evaluates a given state
:param state: a state
:return: objective function value of state
"""
pass
def _accept_neighbor(self, neighbor):
"""
Probabilistically determines whether or not to accept a transition to a neighbor
:param neighbor: a state
:return: boolean indicating whether or not transition was accepted
"""
try:
p = 1. / (1 + (exp((self._objective(self.current_state) - self._objective(neighbor)) / self.temp)))
except OverflowError:
return True
return True if p >= 1 else p >= random()
def run(self, verbose=True):
"""
Conducts hill climb
:param verbose: indicates whether or not to print progress regularly
:return: best state and best objective function value
"""
self._clear()
self.current_state = self.initial_state
for i in range(self.max_steps):
self.cur_steps += 1
if ((i + 1) % 100 == 0) and verbose:
print(self)
neighbor = self._neighbor()
if self._accept_neighbor(neighbor):
self.current_state = neighbor
if self._objective(self.current_state) > (self.best_objective or 0):
self.best_objective = self._objective(self.current_state)
self.best_state = deepcopy(self.current_state)
if self.max_objective is not None and (self.best_objective or 0) > self.max_objective:
print("TERMINATING - REACHED MAXIMUM OBJECTIVE")
return self.best_state, self.best_objective
print("TERMINATING - REACHED MAXIMUM STEPS")
return self.best_state, self.best_objective
| 30.810606
| 111
| 0.605606
|
e98f7f518eb25c1a0e126b0bb4c5d0c8ff489f2d
| 212
|
py
|
Python
|
admin_list_controls/tests/utils.py
|
murray3k/wagtail-admin-list-controls
|
ad162fe70b9795937777b2dd6a01261deb394fdb
|
[
"MIT"
] | 30
|
2020-03-11T06:45:31.000Z
|
2022-01-24T13:54:17.000Z
|
admin_list_controls/tests/utils.py
|
murray3k/wagtail-admin-list-controls
|
ad162fe70b9795937777b2dd6a01261deb394fdb
|
[
"MIT"
] | 14
|
2020-03-09T20:54:27.000Z
|
2021-09-03T00:42:05.000Z
|
admin_list_controls/tests/utils.py
|
murray3k/wagtail-admin-list-controls
|
ad162fe70b9795937777b2dd6a01261deb394fdb
|
[
"MIT"
] | 3
|
2020-03-09T14:17:38.000Z
|
2021-05-18T09:16:50.000Z
|
from django.test import TestCase
class BaseTestCase(TestCase):
def assertObjectSerializesTo(self, obj, subset):
serialized = obj.serialize()
self.assertDictContainsSubset(subset, serialized)
| 30.285714
| 57
| 0.75
|
d870f62103cdbfee9479b651ca7889f215baeabc
| 105,050
|
py
|
Python
|
core/domain/user_services_test.py
|
EishaMazhar/oppia
|
ab4f3cf20764b27f567798e4b1184471aaf7f73b
|
[
"Apache-2.0"
] | null | null | null |
core/domain/user_services_test.py
|
EishaMazhar/oppia
|
ab4f3cf20764b27f567798e4b1184471aaf7f73b
|
[
"Apache-2.0"
] | 1
|
2020-05-27T06:08:17.000Z
|
2020-05-27T06:08:17.000Z
|
core/domain/user_services_test.py
|
EishaMazhar/oppia
|
ab4f3cf20764b27f567798e4b1184471aaf7f73b
|
[
"Apache-2.0"
] | 1
|
2018-03-20T14:12:31.000Z
|
2018-03-20T14:12:31.000Z
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.user_services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import logging
import os
from constants import constants
from core.domain import collection_services
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import user_domain
from core.domain import user_jobs_continuous
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import requests_mock
import utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
class MockUserStatsAggregator(
user_jobs_continuous.UserStatsAggregator):
"""A modified UserStatsAggregator that does not start a new
batch job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return MockUserStatsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class MockUserStatsMRJobManager(
user_jobs_continuous.UserStatsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return MockUserStatsAggregator
class UserServicesUnitTests(test_utils.GenericTestBase):
"""Test the user services methods."""
def setUp(self):
super(UserServicesUnitTests, self).setUp()
schema_version = 1
self.modifiable_user_data = user_domain.ModifiableUserData(
'display_alias', '12345', [constants.DEFAULT_LANGUAGE_CODE],
None, None, schema_version, 'user_id'
)
self.modifiable_new_user_data = user_domain.ModifiableUserData(
'display_alias3', '12345', [constants.DEFAULT_LANGUAGE_CODE],
None, None, schema_version
)
def test_is_user_id_valid(self):
self.assertTrue(
user_services.is_user_id_valid(feconf.SYSTEM_COMMITTER_ID))
self.assertTrue(
user_services.is_user_id_valid(feconf.MIGRATION_BOT_USER_ID))
self.assertTrue(
user_services.is_user_id_valid(feconf.SUGGESTION_BOT_USER_ID))
self.assertTrue(user_services.is_user_id_valid('uid_%s' % ('a' * 32)))
self.assertFalse(
user_services.is_user_id_valid('uid_%s%s' % ('a' * 31, 'A')))
self.assertFalse(user_services.is_user_id_valid('uid_%s' % ('a' * 31)))
self.assertFalse(user_services.is_user_id_valid('a' * 36))
def test_set_and_get_username(self):
gae_id = 'someUser'
username = 'username'
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.set_username(gae_id, username)
user_settings = user_services.create_new_user(
gae_id, 'user@example.com')
user_services.set_username(user_settings.user_id, username)
self.assertEqual(
username, user_services.get_username(user_settings.user_id))
def test_get_username_for_system_user(self):
self.assertEqual(
feconf.SYSTEM_COMMITTER_ID,
user_services.get_username(feconf.SYSTEM_COMMITTER_ID))
self.assertEqual(
feconf.MIGRATION_BOT_USERNAME,
user_services.get_username(feconf.MIGRATION_BOT_USER_ID))
def test_get_username_for_pseudonymous_id(self):
self.assertEqual(
'UserAaaaaaaa',
user_services.get_username('pid_' + 'a' * 32))
self.assertEqual(
'UserBbbbbbbb',
user_services.get_username('pid_' + 'b' * 32))
def test_get_usernames_for_pseudonymous_ids(self):
# Handle usernames that exists.
self.assertEqual(
['UserAaaaaaaa', 'UserBbbbbbbb'],
user_services.get_usernames(['pid_' + 'a' * 32, 'pid_' + 'b' * 32]))
def test_get_usernames_empty_list(self):
# Return empty list when no user id passed.
self.assertEqual([], user_services.get_usernames([]))
def test_get_usernames_system_admin(self):
# Check that system admin has correct username.
self.assertEqual(
[feconf.SYSTEM_COMMITTER_ID],
user_services.get_usernames([feconf.SYSTEM_COMMITTER_ID]))
def test_get_username_for_nonexistent_user(self):
with self.assertRaisesRegexp(
Exception,
'User with ID \'fakeUser\' not found.'
):
user_services.get_username('fakeUser')
def test_get_username_none(self):
user_id = user_services.create_new_user(
'fakeUser', 'user@example.com').user_id
self.assertEqual(None, user_services.get_username(user_id))
def test_is_username_taken_false(self):
self.assertFalse(user_services.is_username_taken('fakeUsername'))
def test_is_username_taken_true(self):
gae_id = 'someUser'
username = 'newUsername'
user_id = user_services.create_new_user(
gae_id, 'user@example.com').user_id
user_services.set_username(user_id, username)
self.assertTrue(user_services.is_username_taken(username))
def test_is_username_taken_different_case(self):
gae_id = 'someUser'
username = 'camelCase'
user_id = user_services.create_new_user(
gae_id, 'user@example.com').user_id
user_services.set_username(user_id, username)
self.assertTrue(user_services.is_username_taken('CaMeLcAsE'))
def test_set_invalid_usernames(self):
gae_id = 'someUser'
user_id = user_services.create_new_user(
gae_id, 'user@example.com').user_id
bad_usernames_with_expected_error_message = [
(' bob ', 'Usernames can only have alphanumeric characters.'),
('@', 'Usernames can only have alphanumeric characters.'),
('', 'Empty username supplied.'),
('a' * 100, 'A username can have at most 30 characters.'),
('ADMIN', 'This username is not available.'),
('admin', 'This username is not available.'),
('AdMiN2020', 'This username is not available.'),
('AbcOppiaMigrationBotXyz', 'This username is not available.'),
('OppiaMigrATIONBOTXyz', 'This username is not available.'),
('AbcOppiaSuggestionBotXyz', 'This username is not available.'),
('AAAOPPIASuggestionBotBBB', 'This username is not available.'),
('xyzOppia', 'This username is not available.'),
('oppiaXyz', 'This username is not available.'),
('abcOppiaXyz', 'This username is not available.')]
for username, error_msg in bad_usernames_with_expected_error_message:
with self.assertRaisesRegexp(utils.ValidationError, error_msg):
user_services.set_username(user_id, username)
def test_update_user_settings_for_invalid_display_alias_raises_error(self):
gae_id = 'someUser'
user_id = user_services.create_new_user(
gae_id, 'user@example.com').user_id
bad_display_aliases_with_expected_error = [
('', 'Expected display_alias to be a string, received .'),
(0, 'Expected display_alias to be a string, received 0.'),
(None, 'Expected display_alias to be a string, received None.')
]
self.modifiable_new_user_data.user_id = user_id
self.modifiable_new_user_data.pin = None
for display_alias, error_msg in bad_display_aliases_with_expected_error:
with self.assertRaisesRegexp(utils.ValidationError, error_msg):
self.modifiable_new_user_data.display_alias = display_alias
user_services.update_multiple_users_data(
[self.modifiable_new_user_data])
def test_update_user_settings_valid_display_alias_set_successfully(self):
gae_id = 'someUser'
user_id = user_services.create_new_user(
gae_id, 'user@example.com').user_id
display_alias = 'Name'
user_settings = user_services.get_user_settings(user_id)
self.assertIsNone(user_settings.display_alias)
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = None
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(user_settings.display_alias, display_alias)
def test_create_new_user_with_invalid_emails_raises_exception(self):
bad_email_addresses_with_expected_error_message = [
('@', 'Invalid email address: @'),
('@@', 'Invalid email address: @@'),
('abc', 'Invalid email address: abc'),
('', 'No user email specified.'),
(None, 'Expected email to be a string, received None'),
(
['a', '@', 'b.com'],
r'Expected email to be a string, received '
r'\[u\'a\', u\'@\', u\'b.com\'\]')]
for email, error_msg in bad_email_addresses_with_expected_error_message:
with self.assertRaisesRegexp(utils.ValidationError, error_msg):
user_services.create_new_user('gae_id', email)
def test_create_new_user_with_invalid_email_creates_no_user_models(self):
bad_email = '@'
error_msg = 'Invalid email address: @'
with self.assertRaisesRegexp(utils.ValidationError, error_msg):
user_services.create_new_user('gae_id', bad_email)
tmp_admin_user_id = self.get_user_id_from_email(self.SUPER_ADMIN_EMAIL)
user_ids_in_user_settings = [
model.id for model in user_models.UserSettingsModel.get_all()]
user_ids_in_user_auth_details = [
model.id for model in user_models.UserAuthDetailsModel.get_all()]
user_ids_in_user_contributions = [
model.id for model in user_models.UserContributionsModel.get_all()]
self.assertEqual(user_ids_in_user_settings, [tmp_admin_user_id])
self.assertEqual(user_ids_in_user_auth_details, [tmp_admin_user_id])
self.assertEqual(user_ids_in_user_contributions, [tmp_admin_user_id])
def test_email_truncation(self):
email_addresses = [
('a@b.c', '..@b.c'),
('ab@c.d', 'a..@c.d'),
('abc@def.gh', 'a..@def.gh'),
('abcd@efg.h', 'a..@efg.h'),
('abcdefgh@efg.h', 'abcde..@efg.h'),
]
for ind, (actual_email, expected_email) in enumerate(email_addresses):
user_settings = user_services.create_new_user(
python_utils.convert_to_bytes(ind), actual_email)
self.assertEqual(user_settings.truncated_email, expected_email)
def test_get_email_from_username(self):
gae_id = 'someUser'
username = 'username'
user_email = 'user@example.com'
user_settings = user_services.create_new_user(gae_id, user_email)
user_services.set_username(user_settings.user_id, username)
self.assertEqual(
user_services.get_username(user_settings.user_id), username)
# Handle usernames that exist.
self.assertEqual(
user_services.get_email_from_username(username), user_email)
# Handle usernames in the same equivalence class correctly.
self.assertEqual(
user_services.get_email_from_username('USERNAME'), user_email)
# Return None for usernames which don't exist.
self.assertIsNone(
user_services.get_email_from_username('fakeUsername'))
def test_get_user_id_from_username(self):
gae_id = 'someUser'
username = 'username'
user_email = 'user@example.com'
user_settings = user_services.create_new_user(gae_id, user_email)
user_services.set_username(user_settings.user_id, username)
self.assertEqual(
user_services.get_username(user_settings.user_id), username)
# Handle usernames that exist.
self.assertEqual(
user_services.get_user_id_from_username(username),
user_settings.user_id)
# Handle usernames in the same equivalence class correctly.
self.assertEqual(
user_services.get_user_id_from_username('USERNAME'),
user_settings.user_id)
# Return None for usernames which don't exist.
self.assertIsNone(
user_services.get_user_id_from_username('fakeUsername'))
def test_get_user_settings_by_gae_id_for_existing_user_is_correct(self):
gae_id = 'gae_id'
email = 'user@example.com'
user_id = 'user_id'
username = 'username'
user_models.UserSettingsModel(
id=user_id,
gae_id=gae_id,
email=email,
username=username,
).put()
user_models.UserAuthDetailsModel(
id=user_id,
gae_id=gae_id
).put()
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
user_settings = user_services.get_user_settings_by_gae_id(gae_id)
self.assertEqual(user_settings_model.id, user_settings.user_id)
self.assertEqual(user_settings_model.gae_id, user_settings.gae_id)
self.assertEqual(user_settings_model.email, user_settings.email)
self.assertEqual(user_settings_model.username, user_settings.username)
def test_get_user_settings_by_gae_id_for_nonexistent_gae_id_is_none(self):
self.assertIsNone(user_services.get_user_settings_by_gae_id('gae_id_x'))
def test_get_auth_details_by_gae_id_for_nonexistent_gae_id_is_none(self):
self.assertIsNone(user_services.get_user_settings_by_gae_id('gae_id_x'))
def test_get_user_settings_by_gae_id_strict_existing_user_is_correct(self):
non_existent_user_id = 'id_x'
gae_id = 'gae_id'
email = 'user@example.com'
user_id = 'user_id'
username = 'username'
user_models.UserSettingsModel(
id=user_id,
gae_id=gae_id,
email=email,
username=username,
).put()
user_models.UserAuthDetailsModel(
id=user_id,
gae_id=gae_id
).put()
user_settings_model = user_models.UserSettingsModel.get_by_id(user_id)
user_settings = user_services.get_user_settings_by_gae_id(gae_id)
self.assertEqual(user_settings_model.id, user_settings.user_id)
self.assertEqual(user_settings_model.gae_id, user_settings.gae_id)
self.assertEqual(user_settings_model.email, user_settings.email)
self.assertEqual(user_settings_model.username, user_settings.username)
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.get_user_settings_by_gae_id(
non_existent_user_id, strict=True)
def test_fetch_gravatar_success(self):
user_email = 'user@example.com'
gravatar_url = user_services.get_gravatar_url(user_email)
expected_gravatar_filepath = os.path.join(
self.get_static_asset_filepath(), 'assets', 'images', 'avatar',
'gravatar_example.png')
with python_utils.open_file(
expected_gravatar_filepath, 'rb', encoding=None) as f:
expected_gravatar = f.read()
with requests_mock.Mocker() as requests_mocker:
requests_mocker.get(gravatar_url, content=expected_gravatar)
gravatar = user_services.fetch_gravatar(user_email)
self.assertEqual(
gravatar, utils.convert_png_to_data_url(expected_gravatar_filepath))
def test_fetch_gravatar_failure_404(self):
user_email = 'user@example.com'
gravatar_url = user_services.get_gravatar_url(user_email)
error_messages = []
logging_mocker = self.swap(logging, 'error', error_messages.append)
with logging_mocker, requests_mock.Mocker() as requests_mocker:
requests_mocker.get(gravatar_url, status_code=404)
gravatar = user_services.fetch_gravatar(user_email)
self.assertEqual(
error_messages,
['[Status 404] Failed to fetch Gravatar from %s' % gravatar_url])
self.assertEqual(gravatar, user_services.DEFAULT_IDENTICON_DATA_URL)
def test_fetch_gravatar_failure_exception(self):
user_email = 'user@example.com'
gravatar_url = user_services.get_gravatar_url(user_email)
error_messages = []
logging_mocker = self.swap(logging, 'exception', error_messages.append)
with logging_mocker, requests_mock.Mocker() as requests_mocker:
requests_mocker.get(gravatar_url, exc=Exception)
gravatar = user_services.fetch_gravatar(user_email)
self.assertEqual(
error_messages, ['Failed to fetch Gravatar from %s' % gravatar_url])
self.assertEqual(gravatar, user_services.DEFAULT_IDENTICON_DATA_URL)
def test_default_identicon_data_url(self):
identicon_filepath = os.path.join(
self.get_static_asset_filepath(), 'assets', 'images', 'avatar',
'user_blue_72px.png')
identicon_data_url = utils.convert_png_to_data_url(identicon_filepath)
self.assertEqual(
identicon_data_url, user_services.DEFAULT_IDENTICON_DATA_URL)
def test_set_and_get_user_email_preferences(self):
gae_id = 'someUser'
username = 'username'
user_email = 'user@example.com'
user_id = user_services.create_new_user(gae_id, user_email).user_id
user_services.set_username(user_id, username)
# When UserEmailPreferencesModel is yet to be created,
# the value returned by get_email_preferences() should be True.
email_preferences = user_services.get_email_preferences(user_id)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
email_preferences = user_services.get_email_preferences(user_id)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
# The user retrieves their email preferences. This initializes
# a UserEmailPreferencesModel instance with the default values.
user_services.update_email_preferences(
user_id, feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
email_preferences = user_services.get_email_preferences(user_id)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
# The user sets their membership email preference to False.
user_services.update_email_preferences(
user_id, feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE, False, False,
False)
email_preferences = user_services.get_email_preferences(user_id)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
def test_set_and_get_user_email_preferences_for_exploration(self):
gae_id = 'someUser'
exploration_id = 'someExploration'
username = 'username'
user_email = 'user@example.com'
user_id = user_services.create_new_user(gae_id, user_email).user_id
user_services.set_username(user_id, username)
# When ExplorationUserDataModel is yet to be created, the value
# of mute_feedback_notifications and mute_suggestion_notifications
# should match the default values.
exploration_user_model = (
user_services.user_models.ExplorationUserDataModel.get(
user_id, exploration_id))
self.assertIsNone(exploration_user_model)
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertEqual(
email_preferences.mute_feedback_notifications,
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
self.assertEqual(
email_preferences.mute_suggestion_notifications,
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
# This initializes a ExplorationUserDataModel instance with
# the default mute values.
user_services.set_email_preferences_for_exploration(
user_id, exploration_id,
mute_feedback_notifications=(
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE),
mute_suggestion_notifications=(
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE))
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertEqual(
email_preferences.mute_feedback_notifications,
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
self.assertEqual(
email_preferences.mute_suggestion_notifications,
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
# This sets only mute_suggestion_notifications property to True.
# mute_feedback_notifications should remain same as before.
user_services.set_email_preferences_for_exploration(
user_id, exploration_id, mute_suggestion_notifications=True)
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertEqual(
email_preferences.mute_feedback_notifications,
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
self.assertTrue(email_preferences.mute_suggestion_notifications)
# This sets only mute_feedback_notifications property to True.
# mute_suggestion_notifications should remain same as before.
user_services.set_email_preferences_for_exploration(
user_id, exploration_id, mute_feedback_notifications=True)
email_preferences = user_services.get_email_preferences_for_exploration(
user_id, exploration_id)
self.assertTrue(email_preferences.mute_feedback_notifications)
self.assertTrue(email_preferences.mute_suggestion_notifications)
def test_get_usernames_by_role(self):
gae_ids = ['test1', 'test2', 'test3', 'test4']
usernames = ['name1', 'name2', 'name3', 'name4']
user_emails = [
'test1@email.com', 'test2@email.com',
'test3@email.com', 'test4@email.com']
user_ids = []
for gae_id, email, name in python_utils.ZIP(
gae_ids, user_emails, usernames):
user_id = user_services.create_new_user(gae_id, email).user_id
user_ids.append(user_id)
user_services.set_username(user_id, name)
user_services.update_user_role(user_ids[0], feconf.ROLE_ID_MODERATOR)
user_services.update_user_role(user_ids[1], feconf.ROLE_ID_MODERATOR)
user_services.update_user_role(user_ids[2], feconf.ROLE_ID_BANNED_USER)
user_services.update_user_role(user_ids[3], feconf.ROLE_ID_BANNED_USER)
self.assertEqual(
set(user_services.get_usernames_by_role(feconf.ROLE_ID_MODERATOR)),
set(['name1', 'name2']))
self.assertEqual(
set(user_services.get_usernames_by_role(
feconf.ROLE_ID_BANNED_USER)),
set(['name3', 'name4']))
def test_get_user_ids_by_role(self):
gae_ids = ['test1', 'test2', 'test3', 'test4']
usernames = ['name1', 'name2', 'name3', 'name4']
user_emails = [
'test1@email.com', 'test2@email.com',
'test3@email.com', 'test4@email.com']
user_ids = []
for uid, email, name in python_utils.ZIP(
gae_ids, user_emails, usernames):
user_id = user_services.create_new_user(uid, email).user_id
user_ids.append(user_id)
user_services.set_username(user_id, name)
user_services.update_user_role(user_ids[0], feconf.ROLE_ID_MODERATOR)
user_services.update_user_role(user_ids[1], feconf.ROLE_ID_MODERATOR)
user_services.update_user_role(user_ids[2], feconf.ROLE_ID_BANNED_USER)
user_services.update_user_role(user_ids[3], feconf.ROLE_ID_BANNED_USER)
self.assertEqual(
set(user_services.get_user_ids_by_role(feconf.ROLE_ID_MODERATOR)),
set([user_ids[0], user_ids[1]]))
self.assertEqual(
set(user_services.get_user_ids_by_role(
feconf.ROLE_ID_BANNED_USER)),
set([user_ids[2], user_ids[3]]))
def test_update_user_creator_dashboard_display(self):
gae_id = 'test_id'
username = 'testname'
user_email = 'test@email.com'
user_id = user_services.create_new_user(gae_id, user_email).user_id
user_services.set_username(user_id, username)
user_setting = user_services.get_user_settings(user_id)
self.assertEqual(
user_setting.creator_dashboard_display_pref,
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD'])
user_services.update_user_creator_dashboard_display(
user_id, constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['LIST'])
user_setting = user_services.get_user_settings(user_id)
self.assertEqual(
user_setting.creator_dashboard_display_pref,
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['LIST'])
def test_update_user_role(self):
gae_id = 'test_id'
username = 'testname'
user_email = 'test@email.com'
user_id = user_services.create_new_user(gae_id, user_email).user_id
user_services.set_username(user_id, username)
self.assertEqual(
user_services.get_user_role_from_id(user_id),
feconf.ROLE_ID_EXPLORATION_EDITOR)
user_services.update_user_role(
user_id, feconf.ROLE_ID_COLLECTION_EDITOR)
self.assertEqual(
user_services.get_user_role_from_id(user_id),
feconf.ROLE_ID_COLLECTION_EDITOR)
def test_get_all_profiles_auth_details_non_existent_id_raises_error(self):
non_existent_user_id = 'id_x'
error_msg = 'Parent user not found.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.get_all_profiles_auth_details_by_parent_user_id(
non_existent_user_id)
def test_update_user_role_from_learner_to_other_role_raises_exception(self):
gae_id = 'test_id'
user_email = 'test@email.com'
user_pin = '12345'
profile_pin = '123'
display_alias = 'display_alias'
display_alias_2 = 'display_alias_2'
user_id = user_services.create_new_user(gae_id, user_email).user_id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
gae_id, user_email, [self.modifiable_new_user_data])
profile_user_id = (
user_services.get_all_profiles_auth_details_by_parent_user_id(
user_id)[0].user_id
)
self.assertEqual(
user_services.get_user_role_from_id(profile_user_id),
feconf.ROLE_ID_LEARNER)
error_msg = 'The role of a Learner cannot be changed.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.update_user_role(
profile_user_id, feconf.ROLE_ID_EXPLORATION_EDITOR)
def test_update_user_role_from_other_role_to_learner_raises_exception(self):
gae_id = 'test_id'
user_email = 'test@email.com'
user_id = user_services.create_new_user(gae_id, user_email).user_id
self.assertEqual(
user_services.get_user_role_from_id(user_id),
feconf.ROLE_ID_EXPLORATION_EDITOR)
error_msg = 'Updating to a Learner role is not allowed.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.update_user_role(
user_id, feconf.ROLE_ID_LEARNER)
def test_create_new_user_also_creates_a_new_user_auth_details_entry(self):
new_gae_id = 'new_gae_id'
new_email = 'new@example.com'
self.assertIsNone(
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, new_gae_id)
)
user_services.create_new_user(new_gae_id, new_email)
user_settings = user_services.get_user_settings_by_gae_id(
new_gae_id)
user_auth_details = user_models.UserAuthDetailsModel.get_by_id(
user_settings.user_id)
self.assertEqual(user_auth_details.gae_id, user_settings.gae_id)
def test_get_auth_details_by_user_id_for_existing_user_works_fine(self):
gae_id = 'new_gae_id'
email = 'new@example.com'
user_services.create_new_user(gae_id, email)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, gae_id)
)
user_auth_details = user_services.get_auth_details_by_user_id(
user_auth_details_model.id)
self.assertEqual(
user_auth_details.user_id, user_auth_details_model.id)
self.assertEqual(
user_auth_details.gae_id, user_auth_details_model.gae_id)
self.assertEqual(
user_auth_details.parent_user_id,
user_auth_details_model.parent_user_id
)
def test_get_auth_details_by_user_id_non_existing_user_returns_none(self):
non_existent_user_id = 'id_x'
self.assertIsNone(
user_services.get_auth_details_by_user_id(non_existent_user_id))
def test_get_auth_details_by_user_id_strict_non_existing_user_error(self):
non_existent_user_id = 'id_x'
error_msg = 'User not found'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.get_auth_details_by_user_id(
non_existent_user_id, strict=True)
def test_get_auth_details_by_gae_id_non_existing_user_returns_none(self):
non_existent_user_id = 'id_x'
self.assertIsNone(
user_services.get_auth_details_by_user_id(non_existent_user_id))
def test_create_new_profile_with_parent_user_pin_set_is_success(self):
gae_id = 'gae_id'
email = 'new@example.com'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
user_pin = '12345'
profile_pin = '123'
user_services.create_new_user(gae_id, email)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, gae_id)
)
user_id = user_auth_details_model.id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
gae_id, email, [self.modifiable_new_user_data]
)
user_auth_details_models = (
user_services.get_all_profiles_auth_details_by_parent_user_id(
user_id)
)
self.assertEqual(len(user_auth_details_models), 1)
self.assertEqual(user_auth_details_models[0].parent_user_id, user_id)
self.assertIsNone(user_auth_details_models[0].gae_id)
def test_create_new_profile_with_parent_user_pin_not_set_raises_error(self):
gae_id = 'gae_id'
email = 'new@example.com'
display_alias = 'display_alias'
profile_pin = '123'
user_services.create_new_user(gae_id, email)
error_msg = 'Pin must be set for a full user before creating a profile.'
with self.assertRaisesRegexp(Exception, error_msg):
self.modifiable_new_user_data.display_alias = display_alias
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
gae_id, email, [self.modifiable_new_user_data])
def test_create_multiple_new_profiles_for_same_user_works_correctly(self):
schema_version = 1
gae_id = 'gae_id'
email = 'new@example.com'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
display_alias_3 = 'display_alias3'
user_pin = '12345'
profile_pin = '123'
user_services.create_new_user(gae_id, email)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, gae_id)
)
user_id = user_auth_details_model.id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
modifiable_new_user_data_2 = user_domain.ModifiableUserData(
display_alias_3, None, [constants.DEFAULT_LANGUAGE_CODE],
None, None, schema_version
)
user_settings_list = user_services.create_new_profiles(
gae_id, email, [
self.modifiable_new_user_data, modifiable_new_user_data_2
]
)
profile_1_id = user_settings_list[0].user_id
profile_2_id = user_settings_list[1].user_id
user_auth_details_models = [
{
'id': model.id,
'gae_id': model.gae_id,
'parent_user_id': model.parent_user_id
} for model in
user_models.UserAuthDetailsModel.get_all_profiles_by_parent_user_id(
user_id)
]
expected_user_auth_output = [
{
'id': profile_1_id,
'gae_id': None,
'parent_user_id': user_id
},
{
'id': profile_2_id,
'gae_id': None,
'parent_user_id': user_id
}
]
self.assertItemsEqual(
user_auth_details_models, expected_user_auth_output)
user_settings_models = [
{
'id': model.id,
'display_alias': model.display_alias,
'pin': model.pin,
'role': model.role
} for model in
user_models.UserSettingsModel.get_multi(
[profile_1_id, profile_2_id])
]
expected_user_settings_output = [
{
'id': profile_1_id,
'display_alias': display_alias_2,
'pin': profile_pin,
'role': feconf.ROLE_ID_LEARNER
},
{
'id': profile_2_id,
'display_alias': display_alias_3,
'pin': None,
'role': feconf.ROLE_ID_LEARNER
}
]
self.assertItemsEqual(
user_settings_models, expected_user_settings_output)
def test_create_new_profile_with_nonexistent_user_raises_error(self):
non_existent_gae_id = 'gae_id_x'
non_existent_email = 'x@example.com'
profile_pin = '123'
display_alias = 'display_alias'
error_msg = 'User not found.'
with self.assertRaisesRegexp(Exception, error_msg):
self.modifiable_new_user_data.display_alias = display_alias
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
non_existent_gae_id, non_existent_email,
[self.modifiable_new_user_data]
)
def test_create_new_profile_modifiable_user_with_user_id_raises_error(self):
gae_id = 'gae_id'
email = 'new@example.com'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
user_pin = '12345'
profile_pin = '123'
user_services.create_new_user(gae_id, email)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, gae_id)
)
user_id = user_auth_details_model.id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
error_msg = 'User id cannot already exist for a new user.'
with self.assertRaisesRegexp(Exception, error_msg):
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
self.modifiable_new_user_data.user_id = 'user_id'
user_services.create_new_profiles(
gae_id, email, [self.modifiable_new_user_data]
)
def test_update_users_modifiable_object_user_id_not_set_raises_error(self):
gae_id = 'gae_id'
email = 'new@example.com'
display_alias = 'display_alias2'
user_pin = '12345'
user_services.create_new_user(gae_id, email)
self.modifiable_user_data.user_id = None
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
error_msg = 'Missing user ID.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.update_multiple_users_data(
[self.modifiable_user_data])
def test_update_users_for_user_with_non_existent_id_raises_error(self):
gae_id = 'gae_id'
non_existent_user_id = 'id_x'
email = 'new@example.com'
display_alias = 'display_alias2'
user_pin = '12345'
user_services.create_new_user(gae_id, email)
self.modifiable_user_data.user_id = non_existent_user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
error_msg = 'User not found.'
with self.assertRaisesRegexp(Exception, error_msg):
user_services.update_multiple_users_data(
[self.modifiable_user_data])
def test_update_users_data_for_multiple_users_works_correctly(self):
# Preparing for the test.
schema_version = 1
gae_id = 'gae_id'
email = 'new@example.com'
display_alias = 'display_alias'
display_alias_2 = 'display_alias2'
display_alias_3 = 'display_alias3'
user_pin = '12345'
profile_pin = '123'
user_services.create_new_user(gae_id, email)
user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_auth_id(
feconf.AUTH_METHOD_GAE, gae_id)
)
user_id = user_auth_details_model.id
self.modifiable_user_data.user_id = user_id
self.modifiable_user_data.pin = user_pin
self.modifiable_user_data.display_alias = display_alias
user_services.update_multiple_users_data([self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = display_alias_2
self.modifiable_new_user_data.pin = profile_pin
modifiable_new_user_data_2 = user_domain.ModifiableUserData(
display_alias_3, None, [constants.DEFAULT_LANGUAGE_CODE],
None, None, schema_version
)
user_settings_list = user_services.create_new_profiles(
gae_id, email, [
self.modifiable_new_user_data, modifiable_new_user_data_2
]
)
profile_user_ids = [
user_settings_list[0].user_id, user_settings_list[1].user_id]
self.modifiable_new_user_data.user_id = profile_user_ids[0]
modifiable_new_user_data_2.user_id = profile_user_ids[1]
# Performing the actual action.
modifiable_new_user_data_2.pin = '345'
self.modifiable_new_user_data.display_alias = 'xyz'
user_services.update_multiple_users_data(
[self.modifiable_new_user_data, modifiable_new_user_data_2])
# Post-checking.
user_auth_details_models = [
{
'id': model.id,
'gae_id': model.gae_id,
'parent_user_id': model.parent_user_id
} for model in
user_models.UserAuthDetailsModel.get_multi(profile_user_ids)
]
expected_auth_details_output = [
{
'id': profile_user_ids[0],
'gae_id': None,
'parent_user_id': user_id
},
{
'id': profile_user_ids[1],
'gae_id': None,
'parent_user_id': user_id
}
]
self.assertItemsEqual(
expected_auth_details_output, user_auth_details_models)
user_settings_models = [
{
'id': model.id,
'display_alias': model.display_alias,
'pin': model.pin
} for model in
user_models.UserSettingsModel.get_multi(profile_user_ids)
]
expected_user_settings_output = [
{
'id': profile_user_ids[0],
'display_alias': 'xyz',
'pin': profile_pin
},
{
'id': profile_user_ids[1],
'display_alias': display_alias_3,
'pin': '345'
}
]
self.assertItemsEqual(
expected_user_settings_output, user_settings_models)
def test_mark_user_for_deletion_deletes_user_settings(self):
gae_id = 'test_id'
username = 'testname'
user_email = 'test@email.com'
user_id = user_services.create_new_user(gae_id, user_email).user_id
user_services.set_username(user_id, username)
user_settings = user_services.get_user_settings_by_gae_id(gae_id)
self.assertFalse(user_settings.deleted)
user_services.mark_user_for_deletion(user_id)
user_settings = user_services.get_user_settings_by_gae_id(gae_id)
self.assertTrue(user_settings.deleted)
def test_mark_user_for_deletion_deletes_user_auth_details_entry(self):
gae_id = 'test_id'
username = 'testname'
user_email = 'test@email.com'
user_id = user_services.create_new_user(gae_id, user_email).user_id
user_services.set_username(user_id, username)
user_auth_details = user_models.UserAuthDetailsModel.get_by_id(user_id)
self.assertFalse(user_auth_details.deleted)
user_services.mark_user_for_deletion(user_id)
user_auth_details = user_models.UserAuthDetailsModel.get_by_id(user_id)
self.assertTrue(user_auth_details.deleted)
def test_get_current_date_as_string(self):
custom_datetimes = [
datetime.date(2011, 1, 1),
datetime.date(2012, 2, 28)
]
datetime_strings = [custom_datetime.strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
for custom_datetime in custom_datetimes]
self.assertEqual(len(datetime_strings[0].split('-')[0]), 4)
self.assertEqual(len(datetime_strings[0].split('-')[1]), 2)
self.assertEqual(len(datetime_strings[0].split('-')[2]), 2)
self.assertEqual(len(datetime_strings[1].split('-')[0]), 4)
self.assertEqual(len(datetime_strings[1].split('-')[1]), 2)
self.assertEqual(len(datetime_strings[1].split('-')[2]), 2)
self.assertEqual(datetime_strings[0], '2011-01-01')
self.assertEqual(datetime_strings[1], '2012-02-28')
def test_parse_date_from_string(self):
test_datetime_strings = [
'2016-06-30',
'2016-07-05',
'2016-13-01',
'2016-03-32'
]
self.assertEqual(
user_services.parse_date_from_string(test_datetime_strings[0]),
{
'year': 2016,
'month': 6,
'day': 30
})
self.assertEqual(
user_services.parse_date_from_string(test_datetime_strings[1]),
{
'year': 2016,
'month': 7,
'day': 5
})
with self.assertRaisesRegexp(
ValueError,
'time data \'2016-13-01\' does not match format \'%Y-%m-%d\''):
user_services.parse_date_from_string(test_datetime_strings[2])
with self.assertRaisesRegexp(ValueError, 'unconverted data remains: 2'):
user_services.parse_date_from_string(test_datetime_strings[3])
def test_record_user_started_state_translation_tutorial(self):
# Testing of the user translation tutorial firsttime state storage.
gae_id = 'someUser'
username = 'username'
user_id = user_services.create_new_user(
gae_id, 'user@example.com').user_id
user_services.set_username(user_id, username)
user_services.record_user_started_state_translation_tutorial(user_id)
user_settings = user_services.get_user_settings(user_id)
self.assertIsInstance(
user_settings.last_started_state_translation_tutorial,
datetime.datetime)
self.assertTrue(
user_settings.last_started_state_translation_tutorial is not None)
class UpdateContributionMsecTests(test_utils.GenericTestBase):
"""Test whether contribution date changes with publication of
exploration/collection and update of already published
exploration/collection.
"""
EXP_ID = 'test_exp'
COL_ID = 'test_col'
COLLECTION_TITLE = 'title'
COLLECTION_CATEGORY = 'category'
COLLECTION_OBJECTIVE = 'objective'
def setUp(self):
super(UpdateContributionMsecTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.owner = user_services.UserActionsInfo(self.owner_id)
def test_contribution_msec_updates_on_published_explorations(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
exp_services.publish_exploration_and_update_user_profiles(
self.admin, self.EXP_ID)
# Test all owners and editors of exploration after publication have
# updated first contribution times in msecs.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test editor of published exploration has updated contribution time.
rights_manager.release_ownership_of_exploration(
self.admin, self.EXP_ID)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}), exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': '<p>Choice 1</p>'
}]
},
'showChoicesInShuffledOrder': {'value': True}
}
})], 'commit')
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_until_exp_is_published(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
# Test that saving an exploration does not update first contribution
# time.
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that commit to unpublished exploration does not update
# contribution time.
exp_services.update_exploration(
self.admin_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}), exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': '<p>Choice 1</p>'
}]
},
'showChoicesInShuffledOrder': {'value': True}
}
})], '')
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that another user who commits to unpublished exploration does not
# have updated first contribution time.
rights_manager.assign_role_for_exploration(
self.admin, self.EXP_ID, self.editor_id, 'editor')
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'new_state_name': u'¡Hola! αβγ',
})], '')
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
# Test that after an exploration is published, all contributors have
# updated first contribution time.
exp_services.publish_exploration_and_update_user_profiles(
self.admin, self.EXP_ID)
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_no_contribution_to_exp(self):
self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
rights_manager.assign_role_for_exploration(
self.admin, self.EXP_ID, self.editor_id, 'editor')
exp_services.publish_exploration_and_update_user_profiles(
self.admin, self.EXP_ID)
# Test that contribution time is not given to an editor that has not
# contributed.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_exp_unpublished(self):
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
exp_services.publish_exploration_and_update_user_profiles(
self.owner, self.EXP_ID)
rights_manager.unpublish_exploration(self.admin, self.EXP_ID)
# Test that contribution time is not eliminated if exploration is
# unpublished.
self.assertIsNotNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
def test_contribution_msec_updates_on_published_collections(self):
self.save_new_valid_collection(
self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
collection_services.publish_collection_and_update_user_profiles(
self.admin, self.COL_ID)
exp_services.publish_exploration_and_update_user_profiles(
self.admin, self.EXP_ID)
# Test all owners and editors of collection after publication have
# updated first contribution times.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test editor of published collection has updated
# first contribution time.
rights_manager.release_ownership_of_collection(
self.admin, self.COL_ID)
collection_services.update_collection(
self.editor_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'Some new title'
}], 'Changed the title')
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_until_collection_is_published(
self):
self.save_new_valid_collection(
self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
# Test that saving a collection does not update first contribution
# time.
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that commit to unpublished collection does not update
# contribution time.
collection_services.update_collection(
self.admin_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'Some new title'
}], '')
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that another user who commits to unpublished collection does not
# have updated first contribution time.
rights_manager.assign_role_for_collection(
self.admin, self.COL_ID, self.editor_id, 'editor')
collection_services.update_collection(
self.editor_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'category',
'new_value': 'Some new category'
}], '')
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
# Test that after an collection is published, all contributors have
# updated first contribution times.
collection_services.publish_collection_and_update_user_profiles(
self.admin, self.COL_ID)
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_no_contribution_to_collection(
self):
self.save_new_valid_collection(
self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
rights_manager.assign_role_for_collection(
self.admin, self.COL_ID, self.editor_id, 'editor')
collection_services.publish_collection_and_update_user_profiles(
self.admin, self.COL_ID)
# Test that contribution time is not given to an editor that has not
# contributed.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_collection_unpublished(self):
self.save_new_valid_collection(
self.COL_ID, self.owner_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
collection_services.publish_collection_and_update_user_profiles(
self.owner, self.COL_ID)
rights_manager.unpublish_collection(self.admin, self.COL_ID)
# Test that first contribution msec is not eliminated if collection is
# unpublished.
self.assertIsNotNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
class UserDashboardStatsTests(test_utils.GenericTestBase):
"""Test whether exploration-related statistics of a user change as events
are registered.
"""
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EXP_ID = 'exp1'
USER_SESSION_ID = 'session1'
CURRENT_DATE_AS_STRING = user_services.get_current_date_as_string()
def setUp(self):
super(UserDashboardStatsTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def mock_get_current_date_as_string(self):
return self.CURRENT_DATE_AS_STRING
def test_get_user_dashboard_stats(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
init_state_name = exploration.init_state_name
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
event_services.StatsEventsHandler.record(
self.EXP_ID, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.assertEqual(
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.owner_id),
{
'total_plays': 0,
'num_ratings': 0,
'average_ratings': None
})
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_tasks()
self.assertEqual(
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.owner_id),
{
'total_plays': 1,
'num_ratings': 0,
'average_ratings': None
})
def test_get_weekly_dashboard_stats_when_stats_model_is_none(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
init_state_name = exploration.init_state_name
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
self.assertEqual(
user_services.get_weekly_dashboard_stats(self.owner_id), None)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id), None)
with self.swap(
user_services, 'get_current_date_as_string',
self.mock_get_current_date_as_string):
user_services.update_dashboard_stats_log(self.owner_id)
self.assertEqual(
user_services.get_weekly_dashboard_stats(self.owner_id), [{
self.CURRENT_DATE_AS_STRING: {
'total_plays': 0,
'num_ratings': 0,
'average_ratings': None
}
}])
def test_get_weekly_dashboard_stats(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
init_state_name = exploration.init_state_name
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
event_services.StatsEventsHandler.record(
self.EXP_ID, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.assertEqual(
user_services.get_weekly_dashboard_stats(self.owner_id), None)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id), None)
self.process_and_flush_pending_tasks()
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
user_services.get_weekly_dashboard_stats(self.owner_id), None)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id), None)
with self.swap(
user_services, 'get_current_date_as_string',
self.mock_get_current_date_as_string):
user_services.update_dashboard_stats_log(self.owner_id)
self.assertEqual(
user_services.get_weekly_dashboard_stats(self.owner_id), [{
self.CURRENT_DATE_AS_STRING: {
'total_plays': 1,
'num_ratings': 0,
'average_ratings': None
}
}])
class SubjectInterestsUnitTests(test_utils.GenericTestBase):
"""Test the update_subject_interests method."""
def setUp(self):
super(SubjectInterestsUnitTests, self).setUp()
self.gae_id = 'someUser'
self.username = 'username'
self.user_email = 'user@example.com'
self.user_id = user_services.create_new_user(
self.gae_id, self.user_email).user_id
user_services.set_username(self.user_id, self.username)
def test_invalid_subject_interests_are_not_accepted(self):
with self.assertRaisesRegexp(utils.ValidationError, 'to be a list'):
user_services.update_subject_interests(self.user_id, 'not a list')
with self.assertRaisesRegexp(utils.ValidationError, 'to be a string'):
user_services.update_subject_interests(self.user_id, [1, 2, 3])
with self.assertRaisesRegexp(utils.ValidationError, 'to be non-empty'):
user_services.update_subject_interests(self.user_id, ['', 'ab'])
with self.assertRaisesRegexp(
utils.ValidationError,
'to consist only of lowercase alphabetic characters and spaces'
):
user_services.update_subject_interests(self.user_id, ['!'])
with self.assertRaisesRegexp(
utils.ValidationError,
'to consist only of lowercase alphabetic characters and spaces'
):
user_services.update_subject_interests(
self.user_id, ['has-hyphens'])
with self.assertRaisesRegexp(
utils.ValidationError,
'to consist only of lowercase alphabetic characters and spaces'
):
user_services.update_subject_interests(
self.user_id, ['HasCapitalLetters'])
with self.assertRaisesRegexp(utils.ValidationError, 'to be distinct'):
user_services.update_subject_interests(self.user_id, ['a', 'a'])
# The following cases are all valid.
user_services.update_subject_interests(self.user_id, [])
user_services.update_subject_interests(
self.user_id, ['singleword', 'has spaces'])
class LastLoginIntegrationTests(test_utils.GenericTestBase):
"""Integration tests for testing that the last login time for a user updates
correctly.
"""
def setUp(self):
"""Create exploration with two versions."""
super(LastLoginIntegrationTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_legacy_user(self):
"""Test the case of a user who existed in the system before the
last-login check was introduced.
"""
previous_last_logged_in_datetime = (
user_services.get_user_settings(self.viewer_id).last_logged_in)
self.assertIsNotNone(previous_last_logged_in_datetime)
current_datetime = datetime.datetime.utcnow()
mocked_datetime_utcnow = current_datetime - datetime.timedelta(days=1)
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
user_services.record_user_logged_in(self.viewer_id)
user_settings = user_services.get_user_settings(self.viewer_id)
last_logged_in = user_settings.last_logged_in
# After logging in and requesting a URL, the last_logged_in property is
# changed.
self.login(self.VIEWER_EMAIL)
self.get_html_response(feconf.LIBRARY_INDEX_URL)
self.assertLess(
last_logged_in,
user_services.get_user_settings(self.viewer_id).last_logged_in)
self.logout()
def test_last_logged_in_only_updated_if_enough_time_has_elapsed(self):
# The last logged-in time has already been set when the user
# registered.
previous_last_logged_in_datetime = (
user_services.get_user_settings(self.viewer_id).last_logged_in)
self.assertIsNotNone(previous_last_logged_in_datetime)
current_datetime = datetime.datetime.utcnow()
mocked_datetime_utcnow = current_datetime + datetime.timedelta(hours=11)
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
self.login(self.VIEWER_EMAIL)
self.get_html_response(feconf.LIBRARY_INDEX_URL)
self.assertEqual(
user_services.get_user_settings(self.viewer_id).last_logged_in,
previous_last_logged_in_datetime)
self.logout()
mocked_datetime_utcnow = current_datetime + datetime.timedelta(hours=13)
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
self.login(self.VIEWER_EMAIL)
self.get_html_response(feconf.LIBRARY_INDEX_URL)
self.assertGreater(
user_services.get_user_settings(self.viewer_id).last_logged_in,
previous_last_logged_in_datetime)
self.logout()
class LastExplorationEditedIntegrationTests(test_utils.GenericTestBase):
"""Integration tests for testing the time the user last edited an
exploration updates correctly.
"""
EXP_ID = 'exp'
def setUp(self):
"""Create users for creating and editing exploration."""
super(LastExplorationEditedIntegrationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
def test_legacy_user(self):
"""Test the case of a user who are editing exploration for first time
after the last edited time check was introduced.
"""
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(editor_settings.last_edited_an_exploration)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
def test_last_exp_edit_time_gets_updated(self):
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
# Decrease last exploration edited time by 13 hours.
user_settings = user_services.get_user_settings(self.editor_id)
mocked_datetime_utcnow = (
user_settings.last_edited_an_exploration -
datetime.timedelta(hours=13))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
user_services.record_user_edited_an_exploration(self.editor_id)
editor_settings = user_services.get_user_settings(self.editor_id)
previous_last_edited_an_exploration = (
editor_settings.last_edited_an_exploration)
self.assertIsNotNone(previous_last_edited_an_exploration)
# The editor edits the exploration 13 hours after it was created.
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'new objective'
})], 'Test edit 2')
# Make sure last exploration edited time gets updated.
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertGreater(
(editor_settings.last_edited_an_exploration),
previous_last_edited_an_exploration)
class LastExplorationCreatedIntegrationTests(test_utils.GenericTestBase):
"""Integration tests for the time the user last created an exploration
updates correctly.
"""
EXP_ID_A = 'exp_a'
EXP_ID_B = 'exp_b'
def setUp(self):
"""Create user for creating exploration."""
super(LastExplorationCreatedIntegrationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def test_legacy_user(self):
"""Test the case of a user who are creating exploration for first time
after the last edited time check was introduced.
"""
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.save_new_valid_exploration(
self.EXP_ID_A, self.owner_id, end_state_name='End')
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
def test_last_exp_edit_time_gets_updated(self):
self.save_new_valid_exploration(
self.EXP_ID_A, self.owner_id, end_state_name='End')
# Decrease last exploration created time by 13 hours.
user_settings = user_services.get_user_settings(self.owner_id)
with self.mock_datetime_utcnow(
user_settings.last_created_an_exploration -
datetime.timedelta(hours=13)):
user_services.record_user_created_an_exploration(self.owner_id)
owner_settings = user_services.get_user_settings(self.owner_id)
previous_last_created_an_exploration = (
owner_settings.last_created_an_exploration)
self.assertIsNotNone(previous_last_created_an_exploration)
# The creator creates another exploration 13 hours later.
self.save_new_valid_exploration(
self.EXP_ID_B, self.owner_id, end_state_name='End')
# Make sure that last exploration created time gets updated.
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertGreater(
(owner_settings.last_created_an_exploration),
previous_last_created_an_exploration)
class UserSettingsTests(test_utils.GenericTestBase):
def setUp(self):
super(UserSettingsTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.user_settings = user_services.get_user_settings(self.owner_id)
self.user_settings.validate()
self.assertEqual(self.owner.role, feconf.ROLE_ID_EXPLORATION_EDITOR)
schema_version = 1
self.modifiable_user_data = user_domain.ModifiableUserData(
'display_alias', '12345', [constants.DEFAULT_LANGUAGE_CODE],
None, None, schema_version, 'user_id'
)
self.modifiable_new_user_data = user_domain.ModifiableUserData(
'display_alias3', '12345', [constants.DEFAULT_LANGUAGE_CODE],
None, None, schema_version
)
def test_validate_non_str_user_id_raises_exception(self):
self.user_settings.user_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected user_id to be a string'
):
self.user_settings.validate()
def test_validate_wrong_format_user_id_raises_exception(self):
self.user_settings.user_id = 'uid_%sA' % ('a' * 31)
with self.assertRaisesRegexp(
utils.ValidationError, 'The user ID is in a wrong format.'
):
self.user_settings.validate()
self.user_settings.user_id = 'uid_%s' % ('a' * 31)
with self.assertRaisesRegexp(
utils.ValidationError, 'The user ID is in a wrong format.'
):
self.user_settings.validate()
self.user_settings.user_id = 'a' * 36
with self.assertRaisesRegexp(
utils.ValidationError, 'The user ID is in a wrong format.'
):
self.user_settings.validate()
def test_validate_non_str_gae_id_raises_exception(self):
self.user_settings.gae_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected gae_id to be a string'
):
self.user_settings.validate()
def test_validate_non_str_pin_id(self):
self.user_settings.pin = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected PIN to be a string'
):
self.user_settings.validate()
def test_validate_invalid_length_pin_raises_error(self):
invalid_pin_values_list = ['1', '12', '1234', '123@#6', 'ABCa', '1!#a']
error_msg = (
'User PIN can only be of length %s or %s' %
(feconf.FULL_USER_PIN_LENGTH, feconf.PROFILE_USER_PIN_LENGTH)
)
for pin in invalid_pin_values_list:
with self.assertRaisesRegexp(
utils.ValidationError, error_msg
):
self.user_settings.pin = pin
self.user_settings.validate()
def test_validate_valid_length_with_numeric_char_pin_works_fine(self):
valid_pin_values_list = ['123', '12345', '764', '42343']
for pin in valid_pin_values_list:
self.user_settings.pin = pin
self.user_settings.validate()
def test_validate_valid_length_pin_with_non_numeric_char_raises_error(self):
valid_pin_values_list = ['AbC', '123A}', '1!2', 'AB!', '[123]']
error_msg = 'Only numeric characters are allowed in PIN'
for pin in valid_pin_values_list:
with self.assertRaisesRegexp(
utils.ValidationError, error_msg
):
self.user_settings.pin = pin
self.user_settings.validate()
def test_validate_empty_user_id_raises_exception(self):
self.user_settings.user_id = ''
with self.assertRaisesRegexp(
utils.ValidationError, 'No user id specified.'
):
self.user_settings.validate()
def test_validate_non_str_role_raises_exception(self):
self.user_settings.role = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected role to be a string'
):
self.user_settings.validate()
def test_validate_invalid_role_name_raises_exception(self):
self.user_settings.role = 'invalid_role'
with self.assertRaisesRegexp(
utils.ValidationError, 'Role invalid_role does not exist.'):
self.user_settings.validate()
def test_validate_non_str_display_alias_raises_error(self):
self.user_settings.display_alias = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected display_alias to be a string,'
' received %s' % self.user_settings.display_alias):
self.user_settings.validate()
def test_validate_non_str_creator_dashboard_display_pref_raises_error(self):
self.user_settings.creator_dashboard_display_pref = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected dashboard display preference to be a string'
):
self.user_settings.validate()
def test_validate_invalid_creator_dashboard_display_pref_raises_error(self):
self.user_settings.creator_dashboard_display_pref = (
'invalid_creator_dashboard_display_pref')
with self.assertRaisesRegexp(
utils.ValidationError,
'invalid_creator_dashboard_display_pref is not a valid '
'value for the dashboard display preferences.'
):
self.user_settings.validate()
def test_validate_empty_display_alias_for_profiles_raises_error(self):
self.modifiable_user_data.user_id = self.owner_id
self.modifiable_user_data.pin = '12345'
self.modifiable_user_data.display_alias = 'temp_name'
user_services.update_multiple_users_data([self.modifiable_user_data])
gae_id = self.get_gae_id_from_email(self.OWNER_EMAIL)
profile_pin = '123'
error_msg = 'Expected display_alias to be a string, received'
with self.assertRaisesRegexp(utils.ValidationError, error_msg):
self.modifiable_new_user_data.display_alias = ''
self.modifiable_new_user_data.pin = profile_pin
user_services.create_new_profiles(
gae_id, self.OWNER_EMAIL, [self.modifiable_new_user_data]
)
def test_has_not_fully_registered_for_guest_user_is_false(self):
self.assertFalse(user_services.has_fully_registered_account(None))
def test_create_new_user_with_existing_gae_id_raises_error(self):
user_id = self.user_settings.user_id
user_gae_id = self.user_settings.gae_id
with self.assertRaisesRegexp(
Exception, 'User %s already exists for gae_id %s.'
% (user_id, user_gae_id)
):
user_services.create_new_user(
user_gae_id, self.OWNER_EMAIL)
def test_cannot_set_existing_username(self):
with self.assertRaisesRegexp(
utils.ValidationError,
'Sorry, the username \"%s\" is already taken! Please pick '
'a different one.' % self.OWNER_USERNAME
):
user_services.set_username(self.owner_id, self.OWNER_USERNAME)
def test_cannot_update_user_role_with_invalid_role(self):
with self.assertRaisesRegexp(
Exception, 'Role invalid_role does not exist.'
):
user_services.update_user_role(self.owner_id, 'invalid_role')
def test_cannot_get_human_readable_user_ids_with_invalid_user_ids(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'error', _mock_logging_function)
assert_raises_user_not_found = self.assertRaisesRegexp(
Exception, 'User not found.')
with logging_swap, assert_raises_user_not_found:
user_services.get_human_readable_user_ids(['invalid_user_id'])
self.assertEqual(
observed_log_messages,
[
'User id invalid_user_id not known in list of user_ids '
'[u\'invalid_user_id\']'
])
def test_get_human_readable_user_ids(self):
# Create an unregistered user who has no username.
user_models.UserSettingsModel(
id='unregistered_user_id',
gae_id='gae_unregistered_user_id',
email='user@example.com',
username='').put()
user_ids = user_services.get_human_readable_user_ids(
[self.owner_id, feconf.SYSTEM_COMMITTER_ID, 'unregistered_user_id'])
expected_user_ids = [
'owner', 'admin',
'[Awaiting user registration: u..@example.com]']
self.assertEqual(user_ids, expected_user_ids)
def test_created_on_gets_updated_correctly(self):
# created_on should not be updated upon updating other attributes of
# the user settings model.
user_settings = user_services.create_new_user(
'gae_id', 'user@example.com')
user_settings_model = user_models.UserSettingsModel.get_by_id(
user_settings.user_id)
time_of_creation = user_settings_model.created_on
user_services.update_user_bio(user_settings.user_id, 'New bio.')
user_settings_model = user_models.UserSettingsModel.get_by_id(
user_settings.user_id)
self.assertEqual(user_settings_model.created_on, time_of_creation)
class UserAuthDetailsTests(test_utils.GenericTestBase):
def setUp(self):
super(UserAuthDetailsTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_auth_details_model = (
user_models.UserAuthDetailsModel.get_by_id(self.owner_id))
self.user_auth_details = user_services.UserAuthDetails(
self.user_auth_details_model.id,
self.user_auth_details_model.gae_id
)
self.user_auth_details.validate()
def test_validate_non_str_user_id(self):
self.user_auth_details.user_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected user_id to be a string'
):
self.user_auth_details.validate()
def test_validate_user_id(self):
self.user_auth_details.user_id = 'uid_%sA' % ('a' * 31)
with self.assertRaisesRegexp(
utils.ValidationError, 'The user ID is in a wrong format.'
):
self.user_auth_details.validate()
self.user_auth_details.user_id = 'uid_%s' % ('a' * 31)
with self.assertRaisesRegexp(
utils.ValidationError, 'The user ID is in a wrong format.'
):
self.user_auth_details.validate()
self.user_auth_details.user_id = 'a' * 36
with self.assertRaisesRegexp(
utils.ValidationError, 'The user ID is in a wrong format.'
):
self.user_auth_details.validate()
def test_validate_empty_user_id(self):
self.user_auth_details.user_id = ''
with self.assertRaisesRegexp(
utils.ValidationError, 'No user id specified.'
):
self.user_auth_details.validate()
def test_validate_parent_user_id(self):
self.user_auth_details.parent_user_id = 'uid_%sA' % ('a' * 31)
with self.assertRaisesRegexp(
utils.ValidationError, 'The parent user ID is in a wrong format.'
):
self.user_auth_details.validate()
self.user_auth_details.parent_user_id = 'uid_%s' % ('a' * 31)
with self.assertRaisesRegexp(
utils.ValidationError, 'The parent user ID is in a wrong format.'
):
self.user_auth_details.validate()
self.user_auth_details.parent_user_id = 'a' * 36
with self.assertRaisesRegexp(
utils.ValidationError, 'The parent user ID is in a wrong format.'
):
self.user_auth_details.validate()
def test_validate_non_str_gae_id(self):
self.user_auth_details.gae_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected gae_id to be a string'
):
self.user_auth_details.validate()
def test_parent_user_id_gae_id_together_raises_error(self):
self.user_auth_details.parent_user_id = (
user_models.UserSettingsModel.get_new_id(''))
with self.assertRaisesRegexp(
utils.ValidationError, 'The parent user ID and gae_id cannot be '
'present together for a user.'
):
self.user_auth_details.validate()
def test_both_parent_user_id_and_gae_id_none_raises_error(self):
self.user_auth_details.parent_user_id = None
self.user_auth_details.gae_id = None
with self.assertRaisesRegexp(
utils.ValidationError, 'The parent user ID and gae_id cannot be '
'None together for a user.'
):
self.user_auth_details.validate()
class UserContributionsTests(test_utils.GenericTestBase):
def setUp(self):
super(UserContributionsTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_contributions = user_services.get_user_contributions(
self.owner_id)
self.user_contributions.validate()
def test_validate_non_str_user_id(self):
self.user_contributions.user_id = 0
with self.assertRaisesRegexp(
Exception, 'Expected user_id to be a string'):
self.user_contributions.validate()
def test_validate_user_id(self):
self.user_contributions.user_id = ''
with self.assertRaisesRegexp(Exception, 'No user id specified.'):
self.user_contributions.validate()
def test_validate_non_list_created_exploration_ids(self):
self.user_contributions.created_exploration_ids = 0
with self.assertRaisesRegexp(
Exception, 'Expected created_exploration_ids to be a list'):
self.user_contributions.validate()
def test_validate_created_exploration_ids(self):
self.user_contributions.created_exploration_ids = [0]
with self.assertRaisesRegexp(
Exception, 'Expected exploration_id in created_exploration_ids '
'to be a string'):
self.user_contributions.validate()
def test_validate_non_list_edited_exploration_ids(self):
self.user_contributions.edited_exploration_ids = 0
with self.assertRaisesRegexp(
Exception, 'Expected edited_exploration_ids to be a list'):
self.user_contributions.validate()
def test_validate_edited_exploration_ids(self):
self.user_contributions.edited_exploration_ids = [0]
with self.assertRaisesRegexp(
Exception, 'Expected exploration_id in edited_exploration_ids '
'to be a string'):
self.user_contributions.validate()
def test_cannot_create_user_contributions_with_existing_user_id(self):
with self.assertRaisesRegexp(
Exception,
'User contributions model for user %s already exists.'
% self.owner_id):
user_services.create_user_contributions(self.owner_id, [], [])
def test_cannot_update_user_contributions_with_invalid_user_id(self):
with self.assertRaisesRegexp(
Exception,
'User contributions model for user invalid_user_id does not exist'):
user_services.update_user_contributions('invalid_user_id', [], [])
def test_cannot_update_dashboard_stats_log_with_invalid_schema_version(
self):
model = user_models.UserStatsModel.get_or_create(self.owner_id)
model.schema_version = 0
model.put()
self.assertIsNone(user_services.get_user_impact_score(self.owner_id))
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1-v%d dashboard stats schemas at '
'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION):
user_services.update_dashboard_stats_log(self.owner_id)
def test_flush_migration_bot_contributions_model(self):
created_exploration_ids = ['exp_1', 'exp_2']
edited_exploration_ids = ['exp_3', 'exp_4']
user_services.create_user_contributions(
feconf.MIGRATION_BOT_USER_ID, created_exploration_ids,
edited_exploration_ids)
migration_bot_contributions_model = (
user_services.get_user_contributions(feconf.MIGRATION_BOT_USER_ID))
self.assertEqual(
migration_bot_contributions_model.created_exploration_ids,
created_exploration_ids)
self.assertEqual(
migration_bot_contributions_model.edited_exploration_ids,
edited_exploration_ids)
user_services.flush_migration_bot_contributions_model()
migration_bot_contributions_model = (
user_services.get_user_contributions(feconf.MIGRATION_BOT_USER_ID))
self.assertEqual(
migration_bot_contributions_model.created_exploration_ids, [])
self.assertEqual(
migration_bot_contributions_model.edited_exploration_ids, [])
class UserContributionReviewRightsTests(test_utils.GenericTestBase):
TRANSLATOR_EMAIL = 'translator@community.org'
TRANSLATOR_USERNAME = 'translator'
VOICE_ARTIST_EMAIL = 'voiceartist@community.org'
VOICE_ARTIST_USERNAME = 'voiceartist'
QUESTION_REVIEWER_EMAIL = 'question@community.org'
QUESTION_REVIEWER_USERNAME = 'questionreviewer'
def setUp(self):
super(UserContributionReviewRightsTests, self).setUp()
self.signup(self.TRANSLATOR_EMAIL, self.TRANSLATOR_USERNAME)
self.translator_id = self.get_user_id_from_email(self.TRANSLATOR_EMAIL)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.signup(
self.QUESTION_REVIEWER_EMAIL, self.QUESTION_REVIEWER_USERNAME)
self.question_reviewer_id = (
self.get_user_id_from_email(self.QUESTION_REVIEWER_EMAIL))
def test_assign_user_review_translation_suggestion_in_language(self):
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translator_id))
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
self.assertTrue(
user_services.can_review_translation_suggestions(
self.translator_id, language_code='hi'))
def test_translation_review_assignement_adds_language_in_sorted_order(self):
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
user_contribution_rights = user_services.get_user_contribution_rights(
self.translator_id)
self.assertEqual(
user_contribution_rights.can_review_translation_for_language_codes,
['hi'])
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'en')
user_contribution_rights = user_services.get_user_contribution_rights(
self.translator_id)
self.assertEqual(
user_contribution_rights.can_review_translation_for_language_codes,
['en', 'hi'])
def test_assign_user_review_voiceover_application_in_language(self):
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voice_artist_id))
user_services.allow_user_to_review_voiceover_in_language(
self.voice_artist_id, 'hi')
self.assertTrue(
user_services.can_review_voiceover_applications(
self.voice_artist_id, language_code='hi'))
def test_voiceover_review_assignement_adds_language_in_sorted_order(self):
user_services.allow_user_to_review_voiceover_in_language(
self.voice_artist_id, 'hi')
user_contribution_rights = user_services.get_user_contribution_rights(
self.voice_artist_id)
self.assertEqual(
user_contribution_rights.can_review_voiceover_for_language_codes,
['hi'])
user_services.allow_user_to_review_voiceover_in_language(
self.voice_artist_id, 'en')
user_contribution_rights = user_services.get_user_contribution_rights(
self.voice_artist_id)
self.assertEqual(
user_contribution_rights.can_review_voiceover_for_language_codes,
['en', 'hi'])
def test_assign_user_review_question_suggestion(self):
self.assertFalse(
user_services.can_review_question_suggestions(self.voice_artist_id))
user_services.allow_user_to_review_question(self.voice_artist_id)
self.assertTrue(
user_services.can_review_question_suggestions(self.voice_artist_id))
def test_get_users_contribution_rights_with_multiple_reviewer_user_ids(
self):
user_services.allow_user_to_review_question(self.question_reviewer_id)
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'en')
expected_reviewer_ids = [self.question_reviewer_id, self.translator_id]
users_contribution_rights = (
user_services.get_users_contribution_rights(expected_reviewer_ids)
)
reviewer_ids = [
user_contribution_rights.id for user_contribution_rights in
users_contribution_rights
]
self.assertEqual(len(users_contribution_rights), 2)
self.assertItemsEqual(reviewer_ids, expected_reviewer_ids)
def test_get_users_contribution_rights_with_one_reviewer_user_id(
self):
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'en')
users_contribution_rights = (
user_services.get_users_contribution_rights([self.translator_id])
)
self.assertEqual(len(users_contribution_rights), 1)
self.assertEqual(users_contribution_rights[0].id, self.translator_id)
self.assertEqual(
(
users_contribution_rights[0]
.can_review_translation_for_language_codes
), ['en', 'hi']
)
def test_get_users_contribution_rights_returns_empty_for_no_reviewers_ids(
self):
users_contribution_rights = (
user_services.get_users_contribution_rights([])
)
self.assertEqual(len(users_contribution_rights), 0)
def test_get_all_reviewers_contribution_rights(self):
self.assertEqual(
user_services.get_all_reviewers_contribution_rights(), [])
user_services.allow_user_to_review_voiceover_in_language(
self.voice_artist_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
all_reviewers = user_services.get_all_reviewers_contribution_rights()
self.assertItemsEqual(
[reviewer.id for reviewer in all_reviewers],
[self.voice_artist_id, self.translator_id])
def test_get_reviewer_user_ids_to_notify_when_reviewers_want_notifications(
self):
# Assert that there are no reviewers at the start.
self.assertEqual(
user_services.get_all_reviewers_contribution_rights(), [])
# Add a question reviewer and a translation reviewer.
user_services.allow_user_to_review_question(self.question_reviewer_id)
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
# Ensure that these reviewers want email updates.
user_services.update_email_preferences(
self.question_reviewer_id, True,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
user_services.update_email_preferences(
self.translator_id, True,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
reviewer_ids_to_notify = (
user_services.get_reviewer_user_ids_to_notify())
self.assertEqual(len(reviewer_ids_to_notify), 2)
self.assertIn(self.question_reviewer_id, reviewer_ids_to_notify)
self.assertIn(self.translator_id, reviewer_ids_to_notify)
def test_get_reviewer_user_ids_to_notify_when_reviewers_do_not_want_emails(
self):
# Assert that there are no reviewers at the start.
self.assertEqual(
user_services.get_all_reviewers_contribution_rights(), [])
# Add a question reviewer and a translation reviewer.
user_services.allow_user_to_review_question(self.question_reviewer_id)
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
# Ensure that these reviewers do not want email updates.
user_services.update_email_preferences(
self.question_reviewer_id, False,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
user_services.update_email_preferences(
self.translator_id, False,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
reviewer_ids_to_notify = (
user_services.get_reviewer_user_ids_to_notify())
self.assertEqual(len(reviewer_ids_to_notify), 0)
def test_get_reviewer_user_ids_to_notify_returns_empty_for_no_reviewers(
self):
# Assert that there are no reviewers.
self.assertEqual(
user_services.get_all_reviewers_contribution_rights(), [])
reviewer_ids_to_notify = (
user_services.get_reviewer_user_ids_to_notify())
self.assertEqual(len(reviewer_ids_to_notify), 0)
def test_remove_translation_review_rights_in_language(self):
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
self.assertTrue(
user_services.can_review_translation_suggestions(
self.translator_id, language_code='hi'))
user_services.remove_translation_review_rights_in_language(
self.translator_id, 'hi')
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translator_id, language_code='hi'))
def test_remove_voiceover_review_rights_in_language(self):
user_services.allow_user_to_review_voiceover_in_language(
self.voice_artist_id, 'hi')
self.assertTrue(
user_services.can_review_voiceover_applications(
self.voice_artist_id, language_code='hi'))
user_services.remove_voiceover_review_rights_in_language(
self.voice_artist_id, 'hi')
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voice_artist_id, language_code='hi'))
def test_remove_question_review_rights(self):
user_services.allow_user_to_review_question(self.question_reviewer_id)
self.assertTrue(
user_services.can_review_question_suggestions(
self.question_reviewer_id))
user_services.remove_question_review_rights(self.question_reviewer_id)
self.assertFalse(
user_services.can_review_question_suggestions(
self.question_reviewer_id))
def test_remove_contribution_reviewer(self):
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
user_services.allow_user_to_review_voiceover_in_language(
self.translator_id, 'hi')
user_services.allow_user_to_review_question(self.translator_id)
self.assertTrue(
user_services.can_review_translation_suggestions(
self.translator_id, language_code='hi'))
self.assertTrue(
user_services.can_review_voiceover_applications(
self.translator_id, language_code='hi'))
self.assertTrue(
user_services.can_review_question_suggestions(
self.translator_id))
user_services.remove_contribution_reviewer(self.translator_id)
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translator_id, language_code='hi'))
self.assertFalse(
user_services.can_review_voiceover_applications(
self.translator_id, language_code='hi'))
self.assertFalse(
user_services.can_review_question_suggestions(
self.translator_id))
def test_removal_of_all_review_rights_delets_model(self):
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
user_services.allow_user_to_review_question(self.translator_id)
user_services.remove_question_review_rights(self.translator_id)
right_model = user_models.UserContributionRightsModel.get_by_id(
self.translator_id)
self.assertFalse(right_model is None)
user_services.remove_translation_review_rights_in_language(
self.translator_id, 'hi')
right_model = user_models.UserContributionRightsModel.get_by_id(
self.translator_id)
self.assertTrue(right_model is None)
def test_get_question_reviewer_usernames_with_lanaguge_code_raise_error(
self):
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be None'):
user_services.get_contribution_reviewer_usernames(
constants.REVIEW_CATEGORY_QUESTION, language_code='hi')
def test_get_contribution_reviewer_usernames_in_invalid_category_raise_error( # pylint: disable=line-too-long
self):
with self.assertRaisesRegexp(
Exception, 'Invalid review category: invalid_category'):
user_services.get_contribution_reviewer_usernames(
'invalid_category', language_code='hi')
| 42.08734
| 113
| 0.676259
|
7c9bd027e5a58bbe95f56291ecb0bab88985b99e
| 146,605
|
py
|
Python
|
dask/dataframe/tests/test_dataframe.py
|
celsiustx/dask
|
b1e2948a553d60f901a1e475a55dd1566d8eb7e7
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/tests/test_dataframe.py
|
celsiustx/dask
|
b1e2948a553d60f901a1e475a55dd1566d8eb7e7
|
[
"BSD-3-Clause"
] | 3
|
2020-05-15T23:06:48.000Z
|
2020-07-30T09:37:05.000Z
|
dask/dataframe/tests/test_dataframe.py
|
celsiustx/dask
|
b1e2948a553d60f901a1e475a55dd1566d8eb7e7
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from itertools import product
from operator import add
from unittest import TestCase
import pytest
import numpy as np
import pandas as pd
from pandas.io.formats import format as pandas_format
from pandas.testing import assert_frame_equal, assert_series_equal
import dask
import dask.array as da
from dask.array.numpy_compat import _numpy_118, _numpy_120
import dask.dataframe as dd
from dask.blockwise import fuse_roots
from dask.dataframe import _compat
from dask.dataframe._compat import tm, PANDAS_GT_100, PANDAS_GT_110
from dask.base import compute, compute_as_if_collection
from dask.utils import put_lines, M
from dask.dataframe.core import (
repartition_divisions,
aca,
_concat,
Scalar,
has_parallel_type,
total_mem_usage,
is_broadcastable,
)
from dask.dataframe import methods
from dask.dataframe.utils import assert_eq, make_meta, assert_max_deps, PANDAS_VERSION
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 5, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
def test_dataframe_doc():
doc = d.add.__doc__
disclaimer = "Some inconsistencies with the Dask version may exist."
assert disclaimer in doc
def test_dataframe_doc_from_non_pandas():
class Foo:
def foo(self):
"""This is a new docstring that I just made up
Parameters:
----------
None
"""
d._bind_operator_method("foo", Foo.foo, original=Foo)
try:
doc = d.foo.__doc__
disclaimer = "Some inconsistencies with the Dask version may exist."
assert disclaimer in doc
assert "new docstring that I just made up" in doc
finally:
# make sure to clean up this alteration of the dd.DataFrame class
del dd.DataFrame.foo
def test_Dataframe():
expected = pd.Series(
[2, 3, 4, 5, 6, 7, 8, 9, 10], index=[0, 1, 3, 5, 6, 8, 9, 9, 9], name="a"
)
assert_eq(d["a"] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(["a", "b"]))
assert_eq(d[d["b"] > 2], full[full["b"] > 2])
assert_eq(d[["a", "b"]], full[["a", "b"]])
assert_eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert_eq(d.head(2), full.head(2))
assert_eq(d.head(3), full.head(3))
assert_eq(d.head(2), dsk[("x", 0)].head(2))
assert_eq(d["a"].head(2), full["a"].head(2))
assert_eq(d["a"].head(3), full["a"].head(3))
assert_eq(d["a"].head(2), dsk[("x", 0)]["a"].head(2))
assert sorted(d.head(2, compute=False).dask) == sorted(
d.head(2, compute=False).dask
)
assert sorted(d.head(2, compute=False).dask) != sorted(
d.head(3, compute=False).dask
)
assert_eq(d.tail(2), full.tail(2))
assert_eq(d.tail(3), full.tail(3))
assert_eq(d.tail(2), dsk[("x", 2)].tail(2))
assert_eq(d["a"].tail(2), full["a"].tail(2))
assert_eq(d["a"].tail(3), full["a"].tail(3))
assert_eq(d["a"].tail(2), dsk[("x", 2)]["a"].tail(2))
assert sorted(d.tail(2, compute=False).dask) == sorted(
d.tail(2, compute=False).dask
)
assert sorted(d.tail(2, compute=False).dask) != sorted(
d.tail(3, compute=False).dask
)
@pytest.mark.filterwarnings("ignore:Insufficient:UserWarning")
def test_head_npartitions():
assert_eq(d.head(5, npartitions=2), full.head(5))
assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))
assert_eq(d.head(5, npartitions=-1), full.head(5))
assert_eq(d.head(7, npartitions=-1), full.head(7))
assert_eq(d.head(2, npartitions=-1), full.head(2))
with pytest.raises(ValueError):
d.head(2, npartitions=5)
def test_head_npartitions_warn():
match = "5 elements requested, only 3 elements"
with pytest.warns(UserWarning, match=match):
d.head(5)
with pytest.warns(None):
d.head(100)
with pytest.warns(None):
d.head(7)
with pytest.warns(None):
d.head(7, npartitions=2)
def test_index_head():
assert_eq(d.index.head(2), full.index[:2])
assert_eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert_eq((d + 1), full + 1)
def test_Index():
for case in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5),
index=pd.date_range("2011-01-01", freq="D", periods=10),
),
]:
ddf = dd.from_pandas(case, 3)
assert_eq(ddf.index, case.index)
pytest.raises(AttributeError, lambda: ddf.index.index)
def test_Scalar():
val = np.int64(1)
s = Scalar({("a", 0): val}, "a", "i8")
assert hasattr(s, "dtype")
assert "dtype" in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, dtype=int64>"
val = pd.Timestamp("2001-01-01")
s = Scalar({("a", 0): val}, "a", val)
assert not hasattr(s, "dtype")
assert "dtype" not in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, type=Timestamp>"
def test_scalar_raises():
val = np.int64(1)
s = Scalar({("a", 0): val}, "a", "i8")
msg = "cannot be converted to a boolean value"
with pytest.raises(TypeError, match=msg):
bool(s)
def test_attributes():
assert "a" in dir(d)
assert "foo" not in dir(d)
pytest.raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({"a b c": [1, 2, 3]}), npartitions=2)
assert "a b c" not in dir(df)
df = dd.from_pandas(pd.DataFrame({"a": [1, 2], 5: [1, 2]}), npartitions=2)
assert "a" in dir(df)
assert 5 not in dir(df)
df = dd.from_pandas(_compat.makeTimeDataFrame(), npartitions=3)
pytest.raises(AttributeError, lambda: df.foo)
def test_partition_sizes():
df = dd.from_pandas(
pd.DataFrame([{"i": f"{i}{i}"} for i in range(100)]), npartitions=3
)
assert df.partition_sizes == (34, 34, 32)
df["len"] = df.i.map(len)
assert df.partition_sizes == (34, 34, 32)
assert df["len"].partition_sizes == (34, 34, 32)
assert df["i"].partition_sizes == (34, 34, 32)
assert len(df.compute()) == 100
assert tuple(len(partition.compute()) for partition in df.partitions) == (
34,
34,
32,
)
for series in [
df.len + 2,
df.len - 2,
df.len * 2,
df.len / 2,
df.len % 2,
df.len % 2 == 0,
]:
assert series.partition_sizes == (34, 34, 32)
evens = df.len % 2 == 0
assert df[evens].partition_sizes is None
assert evens[evens].partition_sizes is None
# def test_array_series_slice():
from dask.array import from_array
from numpy import array
a = array(range(1100)).reshape((100, 11))
d = from_array(a, chunks=(23, 4))
assert d.chunks == ((23, 23, 23, 23, 8), (4, 4, 3))
from numpy.testing import assert_array_equal
dask_sliced = d[evens]
np_sliced = a[evens.compute()]
assert_array_equal(dask_sliced.compute(), np_sliced)
def check_partition_sizes(df, *args, **kwargs):
if args or kwargs:
if args:
assert len(args) == 1
assert not kwargs
partition_sizes = args[0]
else:
assert not args
assert "partition_sizes" in kwargs
partition_sizes = kwargs.pop("partition_sizes")
assert not kwargs
assert df.partition_sizes == partition_sizes
if partition_sizes is not None:
computed = compute(*[partition for partition in df.partitions])
actual_sizes = tuple(len(partition) for partition in computed)
assert actual_sizes == partition_sizes
else:
computed = compute(*[partition for partition in df.partitions])
partition_sizes = tuple(len(partition) for partition in computed)
assert partition_sizes == df.partition_sizes
def test_repartition_sizes():
df = dd.from_pandas(
pd.DataFrame([{"i": f"{i}{i}"} for i in range(100)]), npartitions=3
)
check_partition_sizes(df, (34, 34, 32))
df["len"] = df.i.map(len)
assert df.partition_sizes == (34, 34, 32)
assert df["len"].partition_sizes == (34, 34, 32)
assert df["i"].partition_sizes == (34, 34, 32)
assert len(df.compute()) == 100
evens = df.len % 2 == 0
assert df[evens].partition_sizes is None
assert evens[evens].partition_sizes is None
df2 = df.repartition(partition_sizes=(40, 40, 20))
from pandas.testing import assert_frame_equal
assert_frame_equal(df.compute(), df2.compute())
check_partition_sizes(df2, (40, 40, 20))
df3 = df.repartition(partition_sizes=[10] * 10)
from pandas.testing import assert_frame_equal
assert_frame_equal(df.compute(), df3.compute())
check_partition_sizes(df3, (10,) * 10)
class Checks:
df = pd.DataFrame([{"i": f"{i}{i}"} for i in range(100)])
ddf = dd.from_pandas(df, npartitions=3)
# def __init__(self, dask, pandas, cmp, seed=123):
def __init__(self, seed=123):
# self.dask = dask
# self.pandas = pandas
# self.cmp = cmp
np.random.seed(seed)
def check(self, fn, pandas_fn=None):
l_exc = None
try:
l = fn(self.dask).compute()
except Exception as exc:
l_exc = exc
pandas_fn = pandas_fn or fn
r_exc = None
try:
r = pandas_fn(self.pandas)
except Exception as exc:
r_exc = exc
if l_exc or r_exc:
# TODO: match error msgs
assert type(l_exc) == type(r_exc)
else:
self.cmp(l, r)
bools = np.random.choice(a=[False, True], size=(100,), p=[0.5, 0.5])
def check_arr_slice(self, elems, chunks=None):
pnds_key = np.array(elems)
if chunks:
dask_key = da.from_array(elems, chunks)
else:
dask_key = pnds_key
self.check(lambda df: df.iloc[dask_key], lambda df: df.iloc[pnds_key])
def test_ranges(self):
idxs = (None, 0, 1, 10, 33, 34, 35, 68, 69, 70, 99, 100)
steps = [None] + [
sgn * i for i in [1, 2, 5, 30, 70, 99, 100] for sgn in [1, -1]
]
def check(idx, args):
self.check(lambda df: df.iloc[range(*args)])
idx = 0
for start in idxs:
for end in idxs:
if end is None:
continue
for step in steps:
if step is not None:
if step < 0:
end -= 1
if start is None:
args = (end,)
else:
args = (start, end)
if step is not None:
args = (*args, step)
try:
check(idx, args)
idx += 1
except AssertionError:
self.assertTrue(False)
def test_slices(self):
class Check:
def __getitem__(slf, slc):
self.check(lambda df: df.iloc[slc])
check = Check()
check[:100] # (lambda df: df[:100])
check[:]
check[0:100]
check[:34]
check[0:34]
check[10:34]
check[10:24]
check[:10]
check[0:10]
# 2nd partition
check[34:68]
check[35:68]
check[34:67]
check[35:67]
check[40:50]
# 3rd/last partition:
check[68:100]
check[69:100]
check[68:]
check[69:]
check[68:99]
check[69:99]
check[68:-1]
check[69:-1]
# empty slices
check[0:0]
check[1:1]
check[10:10]
check[33:33]
check[34:34]
check[35:35]
check[99:99]
check[100:100]
check[-1:-1]
check[-100:-100]
check[101:101]
check[200:200]
check[-101:-101]
check[-200:-200]
# across partitions:
check[10:90]
check[10:-10]
check[-90:-10]
check[1:-1]
check[-99:99]
check[:50]
check[1:50]
check[33:50]
check[:68]
check[1:68]
check[33:68]
check[:69]
check[1:69]
check[33:69]
check[34:69]
check[35:69]
check[:200]
check[0:200]
check[10:200]
check[50:200]
check[90:200]
check[::2]
check[99:-1:-1]
def test_arrays(self):
def check_arr_slice(chunks=None):
self.check_arr_slice(self.bools, chunks)
check_arr_slice()
check_arr_slice(
(
34,
34,
32,
)
)
check_arr_slice((100,))
check_arr_slice(
(
50,
50,
)
)
check_arr_slice((10,) * 10)
check_arr_slice((1,) * 100)
# def test_ranges(self):
# def check(range):
# self.check(lambda df: df.iloc[range])
#
# check(range(10))
def test_ints(self):
# assert_frame_equal(self.df.iloc[range(10)], self.df.iloc[:10])
self.check(lambda df: df.iloc[range(1)])
self.check(lambda df: df.iloc[range(10)])
self.check(lambda df: df.iloc[range(99)])
self.check(lambda df: df.iloc[range(100)])
self.check(lambda df: df.iloc[range(99, -1, -1)])
# Pandas squeezes these to Series, but Dask generally doesn't have enough info to make that
# decision at graph-construction time. In principle, DDF.iloc does know whether the row-indexer
# will return exactly one row, so it could mimic Pandas. That would be a departure from other
# parts of Dask, which don't squeeze in situations like this. TODO: it's probably worth adding
# the squeeze, for consistency with Pandas.
# check(lambda df: df.iloc[0])
# check(lambda df: df.iloc[10])
# check(lambda df: df.iloc[34])
# check(lambda df: df.iloc[-1])
class DataFrameIloc(TestCase, Checks):
dask = Checks.ddf
pandas = Checks.df
def cmp(self, l, r):
assert_frame_equal(l, r)
class SeriesIloc(TestCase, Checks):
dask = Checks.ddf.i
pandas = Checks.df.i
def cmp(self, l, r):
assert_series_equal(l, r)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(["a", "b"]))
tm.assert_index_equal(d[["b", "a"]].columns, pd.Index(["b", "a"]))
assert d["a"].name == "a"
assert (d["a"] + 1).name == "a"
assert (d["a"] + d["b"]).name is None
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == "x"
assert ddf.index.compute().name == "x"
@pytest.mark.parametrize(
"npartitions",
[
1,
pytest.param(
2,
marks=pytest.mark.xfail(
not dd._compat.PANDAS_GT_110, reason="Fixed upstream."
),
),
],
)
def test_timezone_freq(npartitions):
s_naive = pd.Series(pd.date_range("20130101", periods=10))
s_aware = pd.Series(pd.date_range("20130101", periods=10, tz="US/Eastern"))
pdf = pd.DataFrame({"tz": s_aware, "notz": s_naive})
ddf = dd.from_pandas(pdf, npartitions=npartitions)
assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq
def test_rename_columns():
# GH 819
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ["x", "y"]
df.columns = ["x", "y"]
tm.assert_index_equal(ddf.columns, pd.Index(["x", "y"]))
tm.assert_index_equal(ddf._meta.columns, pd.Index(["x", "y"]))
assert_eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with pytest.raises(ValueError) as err:
ddf.columns = [1, 2, 3, 4]
assert msg in str(err.value)
# Multi-index columns
df = pd.DataFrame({("A", "0"): [1, 2, 2, 3], ("B", 1): [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df.columns = ["x", "y"]
ddf.columns = ["x", "y"]
tm.assert_index_equal(ddf.columns, pd.Index(["x", "y"]))
tm.assert_index_equal(ddf._meta.columns, pd.Index(["x", "y"]))
assert_eq(ddf, df)
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
ds = dd.from_pandas(s, 2)
s.name = "renamed"
ds.name = "renamed"
assert s.name == "renamed"
assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = "renamed"
dind.name = "renamed"
assert ind.name == "renamed"
with warnings.catch_warnings():
if _numpy_118:
# Catch DeprecationWarning from numpy from rewrite_blockwise
# where we attempt to do `'str' in ndarray`.
warnings.simplefilter("ignore", DeprecationWarning)
assert_eq(dind, ind)
def test_rename_series_method():
# Series name
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
ds = dd.from_pandas(s, 2)
assert_eq(ds.rename("y"), s.rename("y"))
assert ds.name == "x" # no mutation
assert_eq(ds.rename(), s.rename())
ds.rename("z", inplace=True)
s.rename("z", inplace=True)
assert ds.name == "z"
assert_eq(ds, s)
def test_rename_series_method_2():
# Series index
s = pd.Series(["a", "b", "c", "d", "e", "f", "g"], name="x")
ds = dd.from_pandas(s, 2)
for is_sorted in [True, False]:
res = ds.rename(lambda x: x ** 2, sorted_index=is_sorted)
assert_eq(res, s.rename(lambda x: x ** 2))
assert res.known_divisions == is_sorted
res = ds.rename(s, sorted_index=is_sorted)
assert_eq(res, s.rename(s))
assert res.known_divisions == is_sorted
with pytest.raises(ValueError):
ds.rename(lambda x: -x, sorted_index=True)
assert_eq(ds.rename(lambda x: -x), s.rename(lambda x: -x))
res = ds.rename(ds)
assert_eq(res, s.rename(s))
assert not res.known_divisions
ds2 = ds.clear_divisions()
res = ds2.rename(lambda x: x ** 2, sorted_index=True)
assert_eq(res, s.rename(lambda x: x ** 2))
assert not res.known_divisions
res = ds.rename(lambda x: x ** 2, inplace=True, sorted_index=True)
assert res is ds
s.rename(lambda x: x ** 2, inplace=True)
assert_eq(ds, s)
@pytest.mark.parametrize(
"method,test_values", [("tdigest", (6, 10)), ("dask", (4, 20))]
)
def test_describe_numeric(method, test_values):
if method == "tdigest":
pytest.importorskip("crick")
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(test_values[1])) * test_values[0])
df = pd.DataFrame(
{
"a": list(range(test_values[1])) * test_values[0],
"b": list(range(test_values[0])) * test_values[1],
}
)
ds = dd.from_pandas(s, test_values[0])
ddf = dd.from_pandas(df, test_values[0])
test_quantiles = [0.25, 0.75]
assert_eq(df.describe(), ddf.describe(percentiles_method=method))
assert_eq(s.describe(), ds.describe(percentiles_method=method))
assert_eq(
df.describe(percentiles=test_quantiles),
ddf.describe(percentiles=test_quantiles, percentiles_method=method),
)
assert_eq(s.describe(), ds.describe(split_every=2, percentiles_method=method))
assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))
# remove string columns
df = pd.DataFrame(
{
"a": list(range(test_values[1])) * test_values[0],
"b": list(range(test_values[0])) * test_values[1],
"c": list("abcdef"[: test_values[0]]) * test_values[1],
}
)
ddf = dd.from_pandas(df, test_values[0])
assert_eq(df.describe(), ddf.describe(percentiles_method=method))
assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))
@pytest.mark.xfail(
PANDAS_VERSION == "0.24.2",
reason="Known bug in Pandas. See https://github.com/pandas-dev/pandas/issues/24011.",
)
@pytest.mark.parametrize(
"include,exclude,percentiles,subset",
[
(None, None, None, ["c", "d"]), # numeric
(None, None, None, ["c", "d", "f"]), # numeric + timedelta
(None, None, None, ["c", "d", "g"]), # numeric + bool
(None, None, None, ["c", "d", "f", "g"]), # numeric + bool + timedelta
(None, None, None, ["f", "g"]), # bool + timedelta
pytest.param(
"all",
None,
None,
None,
marks=pytest.mark.xfail(PANDAS_GT_110, reason="upstream changes"),
),
pytest.param(
["number"],
None,
[0.25, 0.5],
None,
marks=pytest.mark.xfail(PANDAS_GT_110, reason="upstream changes"),
),
pytest.param(
[np.timedelta64],
None,
None,
None,
marks=pytest.mark.xfail(PANDAS_GT_110, reason="upstream changes"),
),
pytest.param(
["number", "object"],
None,
[0.25, 0.75],
None,
marks=pytest.mark.xfail(PANDAS_GT_110, reason="upstream changes"),
),
pytest.param(
None,
["number", "object"],
None,
None,
marks=pytest.mark.xfail(PANDAS_GT_110, reason="upstream changes"),
),
pytest.param(
["object", "datetime", "bool"],
None,
None,
None,
marks=pytest.mark.xfail(PANDAS_GT_110, reason="upstream changes"),
),
],
)
def test_describe(include, exclude, percentiles, subset):
data = {
"a": ["aaa", "bbb", "bbb", None, None, "zzz"] * 2,
"c": [None, 0, 1, 2, 3, 4] * 2,
"d": [None, 0, 1] * 4,
"e": [
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 07:56:23.858694"),
pd.Timestamp("2017-05-09 05:59:58.938999"),
None,
None,
]
* 2,
"f": [
np.timedelta64(3, "D"),
np.timedelta64(1, "D"),
None,
None,
np.timedelta64(3, "D"),
np.timedelta64(1, "D"),
]
* 2,
"g": [True, False, True] * 4,
}
# Arrange
df = pd.DataFrame(data)
if subset is not None:
df = df.loc[:, subset]
ddf = dd.from_pandas(df, 2)
# Act
desc_ddf = ddf.describe(include=include, exclude=exclude, percentiles=percentiles)
desc_df = df.describe(include=include, exclude=exclude, percentiles=percentiles)
# Assert
assert_eq(desc_ddf, desc_df)
# Check series
if subset is None:
for col in ["a", "c", "e", "g"]:
assert_eq(
df[col].describe(include=include, exclude=exclude),
ddf[col].describe(include=include, exclude=exclude),
)
def test_describe_empty():
df_none = pd.DataFrame({"A": [None, None]})
ddf_none = dd.from_pandas(df_none, 2)
df_len0 = pd.DataFrame({"A": [], "B": []})
ddf_len0 = dd.from_pandas(df_len0, 2)
ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)
# Pandas have different dtypes for resulting describe dataframe if there are only
# None-values, pre-compute dask df to bypass _meta check
assert_eq(
df_none.describe(), ddf_none.describe(percentiles_method="dask").compute()
)
with pytest.raises(ValueError):
ddf_len0.describe(percentiles_method="dask").compute()
with pytest.raises(ValueError):
ddf_len0.describe(percentiles_method="dask").compute()
with pytest.raises(ValueError):
ddf_nocols.describe(percentiles_method="dask").compute()
def test_describe_empty_tdigest():
pytest.importorskip("crick")
df_none = pd.DataFrame({"A": [None, None]})
ddf_none = dd.from_pandas(df_none, 2)
df_len0 = pd.DataFrame({"A": []})
ddf_len0 = dd.from_pandas(df_len0, 2)
ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)
# Pandas have different dtypes for resulting describe dataframe if there are only
# None-values, pre-compute dask df to bypass _meta check
assert_eq(
df_none.describe(), ddf_none.describe(percentiles_method="tdigest").compute()
)
with warnings.catch_warnings():
# dask.dataframe should probably filter this, to match pandas, but
# it seems quite difficult.
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method="tdigest"))
assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method="tdigest"))
with pytest.raises(ValueError):
ddf_nocols.describe(percentiles_method="tdigest").compute()
def test_describe_for_possibly_unsorted_q():
"""make sure describe is sorting percentiles parameter, q, properly and can
handle lists, tuples and ndarrays.
See https://github.com/dask/dask/issues/4642.
"""
# prepare test case where quantiles should equal values
A = da.arange(0, 101)
ds = dd.from_dask_array(A)
for q in [None, [0.25, 0.50, 0.75], [0.25, 0.50, 0.75, 0.99], [0.75, 0.5, 0.25]]:
for f_convert in [list, tuple, np.array]:
if q is None:
r = ds.describe(percentiles=q).compute()
else:
r = ds.describe(percentiles=f_convert(q)).compute()
assert_eq(r["25%"], 25.0)
assert_eq(r["50%"], 50.0)
assert_eq(r["75%"], 75.0)
def test_cumulative():
index = ["row{:03d}".format(i) for i in range(100)]
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"), index=index)
df_out = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"), index=index)
ddf = dd.from_pandas(df, 5)
ddf_out = dd.from_pandas(df_out, 5)
assert_eq(ddf.cumsum(), df.cumsum())
assert_eq(ddf.cumprod(), df.cumprod())
assert_eq(ddf.cummin(), df.cummin())
assert_eq(ddf.cummax(), df.cummax())
assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))
assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))
assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))
assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))
np.cumsum(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumsum())
np.cumprod(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumprod())
ddf.cummin(out=ddf_out)
assert_eq(ddf_out, df.cummin())
ddf.cummax(out=ddf_out)
assert_eq(ddf_out, df.cummax())
np.cumsum(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumsum(axis=1))
np.cumprod(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumprod(axis=1))
ddf.cummin(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummin(axis=1))
ddf.cummax(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummax(axis=1))
assert_eq(ddf.a.cumsum(), df.a.cumsum())
assert_eq(ddf.a.cumprod(), df.a.cumprod())
assert_eq(ddf.a.cummin(), df.a.cummin())
assert_eq(ddf.a.cummax(), df.a.cummax())
# With NaNs
df = pd.DataFrame(
{
"a": [1, 2, np.nan, 4, 5, 6, 7, 8],
"b": [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],
"c": [np.nan] * 8,
}
)
ddf = dd.from_pandas(df, 3)
assert_eq(df.cumsum(), ddf.cumsum())
assert_eq(df.cummin(), ddf.cummin())
assert_eq(df.cummax(), ddf.cummax())
assert_eq(df.cumprod(), ddf.cumprod())
assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))
assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))
assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))
assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))
assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))
assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))
assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))
assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))
assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))
assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))
assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))
assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))
@pytest.mark.parametrize(
"func",
[
M.cumsum,
M.cumprod,
pytest.param(
M.cummin,
marks=[
pytest.mark.xfail(
reason="ValueError: Can only compare identically-labeled Series objects"
)
],
),
pytest.param(
M.cummax,
marks=[
pytest.mark.xfail(
reason="ValueError: Can only compare identically-labeled Series objects"
)
],
),
],
)
def test_cumulative_empty_partitions(func):
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=4)
assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))
df = pd.DataFrame({"x": [1, 2, 3, 4, None, 5, 6, None, 7, 8]})
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))
def test_dropna():
df = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, 6],
},
index=[10, 20, 30, 40, 50, 60],
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.x.dropna(), df.x.dropna())
assert_eq(ddf.y.dropna(), df.y.dropna())
assert_eq(ddf.z.dropna(), df.z.dropna())
assert_eq(ddf.dropna(), df.dropna())
assert_eq(ddf.dropna(how="all"), df.dropna(how="all"))
assert_eq(ddf.dropna(subset=["x"]), df.dropna(subset=["x"]))
assert_eq(ddf.dropna(subset=["y", "z"]), df.dropna(subset=["y", "z"]))
assert_eq(
ddf.dropna(subset=["y", "z"], how="all"),
df.dropna(subset=["y", "z"], how="all"),
)
# threshold
assert_eq(df.dropna(thresh=None), df.loc[[20, 40]])
assert_eq(ddf.dropna(thresh=None), df.dropna(thresh=None))
assert_eq(df.dropna(thresh=0), df.loc[:])
assert_eq(ddf.dropna(thresh=0), df.dropna(thresh=0))
assert_eq(df.dropna(thresh=1), df.loc[[10, 20, 30, 40, 60]])
assert_eq(ddf.dropna(thresh=1), df.dropna(thresh=1))
assert_eq(df.dropna(thresh=2), df.loc[[10, 20, 30, 40, 60]])
assert_eq(ddf.dropna(thresh=2), df.dropna(thresh=2))
assert_eq(df.dropna(thresh=3), df.loc[[20, 40]])
assert_eq(ddf.dropna(thresh=3), df.dropna(thresh=3))
@pytest.mark.parametrize("lower, upper", [(2, 5), (2.5, 3.5)])
def test_clip(lower, upper):
df = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]}
)
ddf = dd.from_pandas(df, 3)
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
ds = dd.from_pandas(s, 3)
assert_eq(ddf.clip(lower=lower, upper=upper), df.clip(lower=lower, upper=upper))
assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))
assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))
assert_eq(ds.clip(lower=lower, upper=upper), s.clip(lower=lower, upper=upper))
assert_eq(ds.clip(lower=lower), s.clip(lower=lower))
assert_eq(ds.clip(upper=upper), s.clip(upper=upper))
def test_squeeze():
df = pd.DataFrame({"x": [1, 3, 6]})
df2 = pd.DataFrame({"x": [0]})
s = pd.Series({"test": 0, "b": 100})
ddf = dd.from_pandas(df, 3)
ddf2 = dd.from_pandas(df2, 3)
ds = dd.from_pandas(s, 2)
assert_eq(df.squeeze(), ddf.squeeze())
assert_eq(pd.Series([0], name="x"), ddf2.squeeze())
assert_eq(ds.squeeze(), s.squeeze())
with pytest.raises(NotImplementedError) as info:
ddf.squeeze(axis=0)
msg = "{0} does not support squeeze along axis 0".format(type(ddf))
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis=2)
msg = "No axis {0} for object type {1}".format(2, type(ddf))
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis="test")
msg = "No axis test for object type {0}".format(type(ddf))
assert msg in str(info.value)
def test_where_mask():
pdf1 = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]}
)
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({"a": [True, False, True] * 3, "b": [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8],
)
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame(
{"a": [True, False, True] * 3, "b": [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13],
)
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [9, 4, 2, 6, 2, 3, 1, 6, 2],
"c": [5, 6, 7, 8, 9, 10, 11, 12, 13],
},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8],
)
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame(
{
"a": [True, False, True] * 3,
"b": [False, False, True] * 3,
"c": [False] * 9,
"d": [True] * 9,
},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13],
)
ddf6 = dd.from_pandas(pdf6, 2)
cases = [
(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]), pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6),
]
for idx, (ddf, ddcond, pdf, pdcond) in enumerate(cases):
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert_eq(ddf.where(ddcond), pdf.where(pdcond))
assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b), full.a + full.b)
assert_eq(
dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1), full.a + full.b + 1
)
def test_map_partitions():
assert_eq(d.map_partitions(lambda df: df, meta=d), full)
assert_eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1))
assert_eq(result, full.sum(axis=1))
assert_eq(
d.map_partitions(lambda df: 1),
pd.Series([1, 1, 1], dtype=np.int64),
check_divisions=False,
)
x = Scalar({("x", 0): 1}, "x", int)
result = dd.map_partitions(lambda x: 2, x)
assert result.dtype in (np.int32, np.int64) and result.compute() == 2
result = dd.map_partitions(lambda x: 4.0, x)
assert result.dtype == np.float64 and result.compute() == 4.0
def test_map_partitions_type():
result = d.map_partitions(type).compute(scheduler="single-threaded")
assert isinstance(result, pd.Series)
assert all(x == pd.DataFrame for x in result)
def test_map_partitions_names():
func = lambda x: x
assert sorted(dd.map_partitions(func, d, meta=d).dask) == sorted(
dd.map_partitions(func, d, meta=d).dask
)
assert sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) == sorted(
dd.map_partitions(lambda x: x, d, meta=d, token=1).dask
)
func = lambda x, y: x
assert sorted(dd.map_partitions(func, d, d, meta=d).dask) == sorted(
dd.map_partitions(func, d, d, meta=d).dask
)
def test_map_partitions_column_info():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a, meta=a)
tm.assert_index_equal(b.columns, a.columns)
assert_eq(df, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, a)
assert isinstance(b, dd.Series)
assert b.dtype == "i8"
b = dd.map_partitions(lambda df: df.x + 1, a, meta=("x", "i8"))
assert isinstance(b, dd.Series)
assert b.name == "x"
assert b.dtype == "i8"
def test_map_partitions_method_names():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1)
assert isinstance(b, dd.Series)
assert b.dtype == "i8"
b = a.map_partitions(lambda df: df.x + 1, meta=("x", "i8"))
assert isinstance(b, dd.Series)
assert b.name == "x"
assert b.dtype == "i8"
def test_map_partitions_propagates_index_metadata():
index = pd.Series(list("abcde"), name="myindex")
df = pd.DataFrame(
{"A": np.arange(5, dtype=np.int32), "B": np.arange(10, 15, dtype=np.int32)},
index=index,
)
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.map_partitions(
lambda df: df.assign(C=df.A + df.B),
meta=[("A", "i4"), ("B", "i4"), ("C", "i4")],
)
sol = df.assign(C=df.A + df.B)
assert_eq(res, sol)
res = ddf.map_partitions(lambda df: df.rename_axis("newindex"))
sol = df.rename_axis("newindex")
assert_eq(res, sol)
@pytest.mark.xfail(reason="now we use SubgraphCallables")
def test_map_partitions_keeps_kwargs_readable():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
# NOTE: we'd like to ensure that we keep the keyword arguments readable
# in the dask graph
assert "['x', 5]" in str(dict(b.dask)) or "{'x': 5}" in str(dict(b.dask))
assert_eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_map_partitions_with_delayed_collection():
# https://github.com/dask/dask/issues/5854
df = pd.DataFrame(columns=list("abcdefghijk"))
ddf = dd.from_pandas(df, 2)
ddf.dropna(subset=list("abcdefghijk")).compute()
# no error!
def test_metadata_inference_single_partition_aligned_args():
# https://github.com/dask/dask/issues/3034
# Previously broadcastable series functionality broke this
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=1)
def check(df, df_x):
assert len(df) == len(df_x)
assert len(df) > 0
return df
res = dd.map_partitions(check, ddf, ddf.x, preserve_partition_sizes=True)
assert_eq(res, ddf)
def test_drop_duplicates():
res = d.drop_duplicates()
res2 = d.drop_duplicates(split_every=2)
sol = full.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.a.drop_duplicates()
res2 = d.a.drop_duplicates(split_every=2)
sol = full.a.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.index.drop_duplicates()
res2 = d.index.drop_duplicates(split_every=2)
sol = full.index.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
with pytest.raises(NotImplementedError):
d.drop_duplicates(keep=False)
def test_drop_duplicates_subset():
df = pd.DataFrame({"x": [1, 2, 3, 1, 2, 3], "y": ["a", "a", "b", "b", "c", "c"]})
ddf = dd.from_pandas(df, npartitions=2)
for kwarg in [{"keep": "first"}, {"keep": "last"}]:
assert_eq(df.x.drop_duplicates(**kwarg), ddf.x.drop_duplicates(**kwarg))
for ss in [["x"], "y", ["x", "y"]]:
assert_eq(
df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg),
)
assert_eq(df.drop_duplicates(ss, **kwarg), ddf.drop_duplicates(ss, **kwarg))
def test_get_partition():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list("abcde"))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_partition(0)
assert isinstance(div1, dd.DataFrame)
assert_eq(div1, pdf.loc[0:3])
div2 = ddf.get_partition(1)
assert_eq(div2, pdf.loc[4:7])
div3 = ddf.get_partition(2)
assert_eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_partition(0)
assert isinstance(div1, dd.Series)
assert_eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_partition(1)
assert_eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_partition(2)
assert_eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with pytest.raises(ValueError):
ddf.get_partition(-1)
with pytest.raises(ValueError):
ddf.get_partition(3)
def test_ndim():
assert d.ndim == 2
assert d.a.ndim == 1
assert d.index.ndim == 1
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_value_counts():
df = pd.DataFrame({"x": [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts()
expected = df.x.value_counts()
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_value_counts_not_sorted():
df = pd.DataFrame({"x": [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts(sort=False)
expected = df.x.value_counts(sort=False)
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_value_counts_with_dropna():
df = pd.DataFrame({"x": [1, 2, 1, 3, np.nan, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
if not PANDAS_GT_110:
with pytest.raises(NotImplementedError, match="dropna is not a valid argument"):
ddf.x.value_counts(dropna=False)
return
result = ddf.x.value_counts(dropna=False)
expected = df.x.value_counts(dropna=False)
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2, dropna=False)
assert_eq(result2, expected)
assert result._name != result2._name
def test_unique():
pdf = pd.DataFrame(
{
"x": [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
"y": ["a", "c", "b", np.nan, "c", "b", "a", "d", np.nan, "a"],
}
)
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name="x"))
assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name="y"))
assert_eq(ddf.x.unique(split_every=2), pd.Series(pdf.x.unique(), name="x"))
assert_eq(ddf.y.unique(split_every=2), pd.Series(pdf.y.unique(), name="y"))
assert_eq(ddf.index.unique(), pdf.index.unique())
assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name
def test_isin():
f_list = [1, 2, 3]
f_series = pd.Series(f_list)
f_dict = {"a": [0, 3], "b": [1, 2]}
# Series
assert_eq(d.a.isin(f_list), full.a.isin(f_list))
assert_eq(d.a.isin(f_series), full.a.isin(f_series))
with pytest.raises(NotImplementedError):
d.a.isin(d.a)
# Index
da.utils.assert_eq(d.index.isin(f_list), full.index.isin(f_list))
da.utils.assert_eq(d.index.isin(f_series), full.index.isin(f_series))
with pytest.raises(NotImplementedError):
d.a.isin(d.a)
# DataFrame test
assert_eq(d.isin(f_list), full.isin(f_list))
assert_eq(d.isin(f_dict), full.isin(f_dict))
for obj in [d, f_series, full]:
with pytest.raises(NotImplementedError):
d.isin(obj)
def test_contains_frame():
df = dd.from_pandas(pd.DataFrame({"A": [1, 2], 0: [3, 4]}), 1)
assert "A" in df
assert 0 in df
assert "B" not in df
assert 1 not in df
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
assert len(dd.from_pandas(pd.DataFrame(), npartitions=1)) == 0
assert len(dd.from_pandas(pd.DataFrame(columns=[1, 2]), npartitions=1)) == 0
# Regression test for https://github.com/dask/dask/issues/6110
assert len(dd.from_pandas(pd.DataFrame(columns=["foo", "foo"]), npartitions=1)) == 0
def test_size():
assert_eq(d.size, full.size)
assert_eq(d.a.size, full.a.size)
assert_eq(d.index.size, full.index.size)
def test_shape():
result = d.shape
assert_eq((result[0].compute(), result[1]), (len(full), len(full.columns)))
assert_eq(dd.compute(result)[0], (len(full), len(full.columns)))
result = d.a.shape
assert_eq(result[0].compute(), len(full.a))
assert_eq(dd.compute(result)[0], (len(full.a),))
sh = dd.from_pandas(pd.DataFrame(index=[1, 2, 3]), npartitions=2).shape
assert sh == (3, 0)
ddf = dd.from_pandas(pd.DataFrame({"a": [], "b": []}, index=[]), npartitions=1)
sh = ddf.shape
assert sh == (0, 2)
def test_nbytes():
assert_eq(d.a.nbytes, full.a.nbytes)
assert_eq(d.index.nbytes, full.index.nbytes)
@pytest.mark.parametrize(
"method,expected",
[("tdigest", (0.35, 3.80, 2.5, 6.5, 2.0)), ("dask", (0.0, 4.0, 1.2, 6.2, 2.0))],
)
def test_quantile(method, expected):
if method == "tdigest":
pytest.importorskip("crick")
# series / multiple
result = d.b.quantile([0.3, 0.7], method=method)
exp = full.b.quantile([0.3, 0.7]) # result may different
assert len(result) == 2
assert result.divisions == (0.3, 0.7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == pytest.approx(expected[0])
assert result.iloc[1] == pytest.approx(expected[1])
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([0.3, 0.7], method=method)
exp = s.quantile([0.3, 0.7])
assert len(result) == 2
assert result.divisions == (0.3, 0.7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == pytest.approx(expected[2])
assert result.iloc[1] == pytest.approx(expected[3])
# series / single
result = d.b.quantile(0.5, method=method)
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert result == expected[4]
@pytest.mark.parametrize("method", ["tdigest", "dask"])
def test_quantile_missing(method):
if method == "tdigest":
pytest.importorskip("crick")
df = pd.DataFrame({"A": [0, np.nan, 2]})
ddf = dd.from_pandas(df, 2)
expected = df.quantile()
result = ddf.quantile(method=method)
assert_eq(result, expected)
expected = df.A.quantile()
result = ddf.A.quantile(method=method)
assert_eq(result, expected)
@pytest.mark.parametrize("method", ["tdigest", "dask"])
def test_empty_quantile(method):
if method == "tdigest":
pytest.importorskip("crick")
result = d.b.quantile([], method=method)
exp = full.b.quantile([])
assert result.divisions == (None, None)
assert result.name == "b"
assert result.compute().name == "b"
assert_eq(result, exp)
@pytest.mark.parametrize(
"method,expected",
[
(
"tdigest",
(
pd.Series([9.5, 29.5, 19.5], index=["A", "X", "B"]),
pd.DataFrame(
[[4.5, 24.5, 14.5], [14.5, 34.5, 24.5]],
index=[0.25, 0.75],
columns=["A", "X", "B"],
),
),
),
(
"dask",
(
pd.Series([7.0, 27.0, 17.0], index=["A", "X", "B"]),
pd.DataFrame(
[[1.50, 21.50, 11.50], [14.0, 34.0, 24.0]],
index=[0.25, 0.75],
columns=["A", "X", "B"],
),
),
),
],
)
def test_dataframe_quantile(method, expected):
if method == "tdigest":
pytest.importorskip("crick")
# column X is for test column order and result division
df = pd.DataFrame(
{
"A": np.arange(20),
"X": np.arange(20, 40),
"B": np.arange(10, 30),
"C": ["a", "b", "c", "d"] * 5,
},
columns=["A", "X", "B", "C"],
)
ddf = dd.from_pandas(df, 3)
result = ddf.quantile(method=method)
assert result.npartitions == 1
assert result.divisions == ("A", "X")
result = result.compute()
assert isinstance(result, pd.Series)
assert result.name == 0.5
tm.assert_index_equal(result.index, pd.Index(["A", "X", "B"]))
assert (result == expected[0]).all()
result = ddf.quantile([0.25, 0.75], method=method)
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(["A", "X", "B"]))
assert (result == expected[1]).all().all()
assert_eq(ddf.quantile(axis=1, method=method), df.quantile(axis=1))
pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1, method=method))
def test_quantile_for_possibly_unsorted_q():
"""check that quantile is giving correct answers even when quantile parameter, q, may be unsorted.
See https://github.com/dask/dask/issues/4642.
"""
# prepare test case where percentiles should equal values
A = da.arange(0, 101)
ds = dd.from_dask_array(A)
for q in [
[0.25, 0.50, 0.75],
[0.25, 0.50, 0.75, 0.99],
[0.75, 0.5, 0.25],
[0.25, 0.99, 0.75, 0.50],
]:
r = ds.quantile(q).compute()
assert_eq(r.loc[0.25], 25.0)
assert_eq(r.loc[0.50], 50.0)
assert_eq(r.loc[0.75], 75.0)
r = ds.quantile([0.25]).compute()
assert_eq(r.loc[0.25], 25.0)
r = ds.quantile(0.25).compute()
assert_eq(r, 25.0)
def test_quantile_tiny_partitions():
""" See https://github.com/dask/dask/issues/6551 """
df = pd.DataFrame({"a": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=3)
r = ddf["a"].quantile(0.5).compute()
assert r == 2
def test_index():
assert_eq(d.index, full.index)
def test_assign():
df = pd.DataFrame(
{"a": range(8), "b": [float(i) for i in range(10, 18)]},
index=pd.Index(list("abcdefgh")),
)
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.known_divisions
ddf_unknown = dd.from_pandas(df, npartitions=3, sort=False)
assert not ddf_unknown.known_divisions
h = np.array(range(len(df)))
i = da.from_array(h)
res = ddf.assign(
c=1,
d="string",
e=ddf.a.sum(),
f=ddf.a + ddf.b,
g=lambda x: x.a + x.b,
h=h,
i=i,
dt=pd.Timestamp(2018, 2, 13),
)
res_unknown = ddf_unknown.assign(
c=1,
d="string",
e=ddf_unknown.a.sum(),
f=ddf_unknown.a + ddf_unknown.b,
g=lambda x: x.a + x.b,
h=h,
i=i,
dt=pd.Timestamp(2018, 2, 13),
)
sol = df.assign(
c=1,
d="string",
e=df.a.sum(),
f=df.a + df.b,
g=lambda x: x.a + x.b,
h=h,
i=i.compute(),
dt=pd.Timestamp(2018, 2, 13),
)
assert_eq(res, sol)
assert_eq(res_unknown, sol)
res = ddf.assign(c=df.a + 1)
assert_eq(res, df.assign(c=df.a + 1))
res = ddf.assign(c=ddf.index)
assert_eq(res, df.assign(c=df.index))
# divisions unknown won't work with pandas
with pytest.raises(ValueError):
ddf_unknown.assign(c=df.a + 1)
# unsupported type
with pytest.raises(TypeError):
ddf.assign(c=list(range(9)))
# Fails when assigning known divisions to unknown divisions
with pytest.raises(ValueError):
ddf_unknown.assign(foo=ddf.a)
# Fails when assigning unknown divisions to known divisions
with pytest.raises(ValueError):
ddf.assign(foo=ddf_unknown.a)
def test_assign_callable():
df = dd.from_pandas(pd.DataFrame({"A": range(10)}), npartitions=2)
a = df.assign(B=df.A.shift())
b = df.assign(B=lambda x: x.A.shift())
assert_eq(a, b)
def test_assign_dtypes():
ddf = dd.from_pandas(
pd.DataFrame(
data={"col1": ["a", "b"], "col2": [1, 2]}, columns=["col1", "col2"]
),
npartitions=2,
)
new_col = {"col3": pd.Series(["0", "1"])}
res = ddf.assign(**new_col)
assert_eq(
res.dtypes,
pd.Series(data=["object", "int64", "object"], index=["col1", "col2", "col3"]),
)
def test_map():
df = pd.DataFrame(
{"a": range(9), "b": [4, 5, 6, 1, 2, 3, 0, 0, 0]},
index=pd.Index([0, 1, 3, 5, 6, 8, 9, 9, 9], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.a.map(lambda x: x + 1), df.a.map(lambda x: x + 1))
lk = dict((v, v + 1) for v in df.a.values)
assert_eq(ddf.a.map(lk), df.a.map(lk))
assert_eq(ddf.b.map(lk), df.b.map(lk))
lk = pd.Series(lk)
assert_eq(ddf.a.map(lk), df.a.map(lk))
assert_eq(ddf.b.map(lk), df.b.map(lk))
assert_eq(ddf.b.map(lk, meta=ddf.b), df.b.map(lk))
assert_eq(ddf.b.map(lk, meta=("b", "i8")), df.b.map(lk))
def test_concat():
x = _concat([pd.DataFrame(columns=["a", "b"]), pd.DataFrame(columns=["a", "b"])])
assert list(x.columns) == ["a", "b"]
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert_eq(e, f)
assert_eq(d.a, type(d.a)(*d.a._args))
assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame(dsk, "x", meta, divisions=[None, None, None])
assert not df.known_divisions
def test_unknown_divisions():
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}),
}
meta = make_meta({"a": "i8", "b": "i8"})
d = dd.DataFrame(dsk, "x", meta, [None, None, None, None])
full = d.compute(scheduler="sync")
assert_eq(d.a.sum(), full.a.sum())
assert_eq(d.a + d.b + 1, full.a + full.b + 1)
@pytest.mark.skipif(
PANDAS_VERSION < "0.22.0",
reason="Parameter min_count not implemented in "
"DataFrame.sum() and DataFrame.prod()",
)
def test_with_min_count():
dfs = [
pd.DataFrame([[None, 2, 3], [None, 5, 6], [5, 4, 9]]),
pd.DataFrame([[2, None, None], [None, 5, 6], [5, 4, 9]]),
]
ddfs = [dd.from_pandas(df, npartitions=4) for df in dfs]
axes = [0, 1]
for df, ddf in zip(dfs, ddfs):
for axis in axes:
for min_count in [0, 1, 2, 3]:
assert_eq(
df.sum(min_count=min_count, axis=axis),
ddf.sum(min_count=min_count, axis=axis),
)
assert_eq(
df.prod(min_count=min_count, axis=axis),
ddf.prod(min_count=min_count, axis=axis),
)
@pytest.mark.parametrize("join", ["inner", "outer", "left", "right"])
def test_align(join):
df1a = pd.DataFrame(
{"A": np.random.randn(10), "B": np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],
)
df1b = pd.DataFrame(
{"A": np.random.randn(10), "B": np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],
)
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
# DataFrame
res1, res2 = ddf1a.align(ddf1b, join=join)
exp1, exp2 = df1a.align(df1b, join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a["A"].align(ddf1b["B"], join=join)
exp1, exp2 = df1a["A"].align(df1b["B"], join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# DataFrame with fill_value
res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)
exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a["A"].align(ddf1b["B"], join=join, fill_value=1)
exp1, exp2 = df1a["A"].align(df1b["B"], join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
@pytest.mark.parametrize("join", ["inner", "outer", "left", "right"])
def test_align_axis(join):
df1a = pd.DataFrame(
{"A": np.random.randn(10), "B": np.random.randn(10), "C": np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],
)
df1b = pd.DataFrame(
{"B": np.random.randn(10), "C": np.random.randn(10), "D": np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],
)
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)
exp1, exp2 = df1a.align(df1b, join=join, axis=0)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)
exp1, exp2 = df1a.align(df1b, join=join, axis=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis="index")
exp1, exp2 = df1a.align(df1b, join=join, axis="index")
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis="columns")
exp1, exp2 = df1a.align(df1b, join=join, axis="columns")
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# invalid
with pytest.raises(ValueError):
ddf1a.align(ddf1b, join=join, axis="XXX")
with pytest.raises(ValueError):
ddf1a["A"].align(ddf1b["B"], join=join, axis=1)
def test_combine():
df1 = pd.DataFrame(
{
"A": np.random.choice([1, 2, np.nan], 100),
"B": np.random.choice(["a", "b", np.nan], 100),
}
)
df2 = pd.DataFrame(
{
"A": np.random.choice([1, 2, 3], 100),
"B": np.random.choice(["a", "b", "c"], 100),
}
)
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
first = lambda a, b: a
# DataFrame
for dda, ddb, a, b in [
(ddf1, ddf2, df1, df2),
(ddf1.A, ddf2.A, df1.A, df2.A),
(ddf1.B, ddf2.B, df1.B, df2.B),
]:
for func, fill_value in [(add, None), (add, 100), (first, None)]:
sol = a.combine(b, func, fill_value=fill_value)
assert_eq(dda.combine(ddb, func, fill_value=fill_value), sol)
assert_eq(dda.combine(b, func, fill_value=fill_value), sol)
assert_eq(
ddf1.combine(ddf2, add, overwrite=False), df1.combine(df2, add, overwrite=False)
)
assert dda.combine(ddb, add)._name == dda.combine(ddb, add)._name
def test_combine_first():
df1 = pd.DataFrame(
{
"A": np.random.choice([1, 2, np.nan], 100),
"B": np.random.choice(["a", "b", np.nan], 100),
}
)
df2 = pd.DataFrame(
{
"A": np.random.choice([1, 2, 3], 100),
"B": np.random.choice(["a", "b", "c"], 100),
}
)
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
# DataFrame
assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))
assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))
# Series
assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))
assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))
def test_dataframe_picklable():
from pickle import loads, dumps
cloudpickle = pytest.importorskip("cloudpickle")
cp_dumps = cloudpickle.dumps
cp_loads = cloudpickle.loads
d = _compat.makeTimeDataFrame()
df = dd.from_pandas(d, npartitions=3)
df = df + 2
# dataframe
df2 = loads(dumps(df))
assert_eq(df, df2)
df2 = cp_loads(cp_dumps(df))
assert_eq(df, df2)
# series
a2 = loads(dumps(df.A))
assert_eq(df.A, a2)
a2 = cp_loads(cp_dumps(df.A))
assert_eq(df.A, a2)
# index
i2 = loads(dumps(df.index))
assert_eq(df.index, i2)
i2 = cp_loads(cp_dumps(df.index))
assert_eq(df.index, i2)
# scalar
# lambdas are present, so only test cloudpickle
s = df.A.sum()
s2 = cp_loads(cp_dumps(s))
assert_eq(s, s2)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5], 42)
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert a._name != b._name
np.testing.assert_array_equal(a.index, sorted(a.index))
assert len(a.compute()) + len(b.compute()) == len(full)
a2, b2 = d.random_split([0.5, 0.5], 42)
assert a2._name == a._name
assert b2._name == b._name
a, b = d.random_split([0.5, 0.5], 42, True)
a2, b2 = d.random_split([0.5, 0.5], 42, True)
assert_eq(a, a2)
assert_eq(b, b2)
with pytest.raises(AssertionError):
np.testing.assert_array_equal(a.index, sorted(a.index))
parts = d.random_split([0.4, 0.5, 0.1], 42)
names = set([p._name for p in parts])
names.update([a._name, b._name])
assert len(names) == 5
with pytest.raises(ValueError):
d.random_split([0.4, 0.5], 42)
def test_series_round():
ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name="a")
s = dd.from_pandas(ps, npartitions=3)
assert_eq(s.round(), ps.round())
@pytest.mark.slow
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith("repartition-split")]
keys = sorted(keys)
sp = pd.concat(
[compute_as_if_collection(dd.DataFrame, d.dask, k) for k in keys]
)
assert_eq(orig, sp)
assert_eq(orig, d, check_partition_sizes=False)
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")}, index=[10, 20, 30, 40, 50, 60]
)
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert_eq(a, b, check_partition_sizes=False)
assert_eq(compute_as_if_collection(dd.DataFrame, b.dask, (b._name, 0)), df.iloc[:1])
for div in [
[20, 60],
[10, 50],
[1], # first / last element mismatch
[0, 60],
[10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60],
]: # not unique (last element can be duplicated)
pytest.raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list("abxyz"))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [
[0, 6],
[0, 6, 6],
[0, 5, 6],
[0, 4, 6, 6],
[0, 2, 6],
[0, 2, 6, 6],
[0, 2, 3, 6, 6],
[0, 1, 2, 3, 4, 5, 6, 6],
]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
pdf = pd.DataFrame(
{"x": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "y": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list("abcdefghij"),
)
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [
list("aj"),
list("ajj"),
list("adj"),
list("abfj"),
list("ahjj"),
list("acdj"),
list("adfij"),
list("abdefgij"),
list("abcdefghij"),
]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [list("Yadijm"), list("acmrxz"), list("Yajz")]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], "a", "b", "c")
assert result == {
("b", 0): (methods.boundary_slice, ("a", 0), 0, 6, False),
("b", 1): (methods.boundary_slice, ("a", 0), 6, 6, True),
("c", 0): ("b", 0),
("c", 1): ("b", 1),
}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], "a", "b", "c")
assert result == {
("b", 0): (methods.boundary_slice, ("a", 0), 1, 3, False),
("b", 1): (methods.boundary_slice, ("a", 1), 3, 4, False),
("b", 2): (methods.boundary_slice, ("a", 1), 4, 6, False),
("b", 3): (methods.boundary_slice, ("a", 1), 6, 7, True),
("c", 0): (methods.concat, [("b", 0), ("b", 1)]),
("c", 1): ("b", 2),
("c", 2): ("b", 3),
}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")}, index=[10, 20, 30, 40, 50, 60]
)
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df.y)
@pytest.mark.parametrize("use_index", [True, False])
@pytest.mark.parametrize("n", [1, 2, 4, 5])
@pytest.mark.parametrize("k", [1, 2, 4, 5])
@pytest.mark.parametrize("dtype", [float, "M8[ns]"])
@pytest.mark.parametrize("transform", [lambda df: df, lambda df: df.x])
def test_repartition_npartitions(use_index, n, k, dtype, transform):
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6] * 10, "y": list("abdabd") * 10},
index=pd.Series([1, 2, 3, 4, 5, 6] * 10, dtype=dtype),
)
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
check_partition_sizes(a)
b = a.repartition(npartitions=k)
if k > n:
check_partition_sizes(b, None)
else:
check_partition_sizes(b)
assert_eq(a, b, check_partition_sizes=False)
assert b.npartitions == k
parts = dask.get(b.dask, b.__dask_keys__())
assert all(map(len, parts))
@pytest.mark.parametrize("use_index", [True, False])
@pytest.mark.parametrize("n", [2, 5])
@pytest.mark.parametrize("partition_size", ["1kiB", 379])
@pytest.mark.parametrize("transform", [lambda df: df, lambda df: df.x])
def test_repartition_partition_size(use_index, n, partition_size, transform):
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6] * 10, "y": list("abdabd") * 10},
index=pd.Series([10, 20, 30, 40, 50, 60] * 10),
)
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
b = a.repartition(partition_size=partition_size)
assert_eq(a, b, check_divisions=False, check_partition_sizes=False)
assert np.alltrue(b.map_partitions(total_mem_usage, deep=True).compute() <= 1024)
parts = dask.get(b.dask, b.__dask_keys__())
assert all(map(len, parts))
def test_repartition_partition_size_arg():
df = pd.DataFrame({"x": range(10)})
a = dd.from_pandas(df, npartitions=2)
b = a.repartition("1 MiB")
assert b.npartitions == 1
def test_repartition_npartitions_same_limits():
df = pd.DataFrame(
{"x": [1, 2, 3]},
index=[
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 02:45:00.017999"),
pd.Timestamp("2017-05-09 05:59:58.938999"),
],
)
ddf = dd.from_pandas(df, npartitions=2)
ddf.repartition(npartitions=10)
def test_repartition_npartitions_numeric_edge_case():
"""
Test that we cover numeric edge cases when
int(ddf.npartitions / npartitions) * npartitions) != ddf.npartitions
"""
df = pd.DataFrame({"x": range(100)})
a = dd.from_pandas(df, npartitions=15)
assert a.npartitions == 15
b = a.repartition(npartitions=11)
assert_eq(a, b, check_partition_sizes=False)
def test_repartition_object_index():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6] * 10}, index=list("abdabd") * 10)
a = dd.from_pandas(df, npartitions=5)
b = a.repartition(npartitions=2)
assert b.npartitions == 2
assert_eq(b, df)
b = a.repartition(npartitions=10)
assert b.npartitions == 10
assert_eq(b, df)
assert not b.known_divisions
@pytest.mark.slow
@pytest.mark.parametrize("npartitions", [1, 20, 243])
@pytest.mark.parametrize("freq", ["1D", "7D", "28h", "1h"])
@pytest.mark.parametrize(
"end", ["2000-04-15", "2000-04-15 12:37:01", "2000-01-01 12:37:00"]
)
@pytest.mark.parametrize(
"start", ["2000-01-01", "2000-01-01 12:30:00", "2000-01-01 12:30:00"]
)
def test_repartition_freq(npartitions, freq, start, end):
start = pd.Timestamp(start)
end = pd.Timestamp(end)
ind = pd.date_range(start=start, end=end, freq="60s")
df = pd.DataFrame({"x": np.arange(len(ind))}, index=ind)
ddf = dd.from_pandas(df, npartitions=npartitions, name="x")
ddf2 = ddf.repartition(freq=freq)
assert_eq(ddf2, df)
def test_repartition_freq_divisions():
df = pd.DataFrame(
{"x": np.random.random(10)},
index=pd.DatetimeIndex(np.random.random(10) * 100e9),
)
ddf = dd.from_pandas(df, npartitions=3)
ddf2 = ddf.repartition(freq="15s")
for div in ddf2.divisions[1:-1]:
assert div == div.round("15s")
assert ddf2.divisions[0] == df.index.min()
assert ddf2.divisions[-1] == df.index.max()
assert_eq(ddf2, ddf2)
def test_repartition_freq_errors():
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(TypeError) as info:
ddf.repartition(freq="1s")
assert "only" in str(info.value)
assert "timeseries" in str(info.value)
def test_repartition_freq_month():
ts = pd.date_range("2015-01-01 00:00", " 2015-05-01 23:50", freq="10min")
df = pd.DataFrame(
np.random.randint(0, 100, size=(len(ts), 4)), columns=list("ABCD"), index=ts
)
ddf = dd.from_pandas(df, npartitions=1).repartition(freq="1M")
assert_eq(df, ddf)
assert 2 < ddf.npartitions <= 6
def test_repartition_input_errors():
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(ValueError):
ddf.repartition(npartitions=5, divisions=[None, None])
with pytest.raises(ValueError):
ddf.repartition(npartitions=5, partition_size="5MiB")
def test_embarrassingly_parallel_operations():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
assert_eq(a.x.astype("float32"), df.x.astype("float32"))
assert a.x.astype("float32").compute().dtype == "float32"
assert_eq(a.x.dropna(), df.x.dropna())
assert_eq(a.x.between(2, 4), df.x.between(2, 4))
assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert_eq(a.x.notnull(), df.x.notnull())
assert_eq(a.x.isnull(), df.x.isnull())
assert_eq(a.notnull(), df.notnull())
assert_eq(a.isnull(), df.isnull())
assert len(a.sample(frac=0.5).compute()) < len(df)
def test_fillna():
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.fillna(100), df.fillna(100))
assert_eq(ddf.A.fillna(100), df.A.fillna(100))
assert_eq(ddf.A.fillna(ddf["A"].mean()), df.A.fillna(df["A"].mean()))
assert_eq(ddf.fillna(method="pad"), df.fillna(method="pad"))
assert_eq(ddf.A.fillna(method="pad"), df.A.fillna(method="pad"))
assert_eq(ddf.fillna(method="bfill"), df.fillna(method="bfill"))
assert_eq(ddf.A.fillna(method="bfill"), df.A.fillna(method="bfill"))
assert_eq(ddf.fillna(method="pad", limit=2), df.fillna(method="pad", limit=2))
assert_eq(ddf.A.fillna(method="pad", limit=2), df.A.fillna(method="pad", limit=2))
assert_eq(ddf.fillna(method="bfill", limit=2), df.fillna(method="bfill", limit=2))
assert_eq(
ddf.A.fillna(method="bfill", limit=2), df.A.fillna(method="bfill", limit=2)
)
assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))
assert_eq(ddf.fillna(method="pad", axis=1), df.fillna(method="pad", axis=1))
assert_eq(
ddf.fillna(method="pad", limit=2, axis=1),
df.fillna(method="pad", limit=2, axis=1),
)
pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))
df = _compat.makeMissingDataframe()
df.iloc[:15, 0] = np.nan # all NaN partition
ddf = dd.from_pandas(df, npartitions=5, sort=False)
pytest.raises(ValueError, lambda: ddf.fillna(method="pad").compute())
assert_eq(df.fillna(method="pad", limit=3), ddf.fillna(method="pad", limit=3))
def test_fillna_duplicate_index():
@dask.delayed
def f():
return pd.DataFrame(dict(a=[1.0], b=[np.NaN]))
ddf = dd.from_delayed([f(), f()], meta=dict(a=float, b=float))
ddf.b = ddf.b.fillna(ddf.a)
ddf.compute()
def test_fillna_multi_dataframe():
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))
assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))
def test_ffill_bfill():
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.ffill(), df.ffill())
assert_eq(ddf.bfill(), df.bfill())
assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))
assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))
def test_fillna_series_types():
# https://github.com/dask/dask/issues/2809
df = pd.DataFrame({"A": [1, np.nan, 3], "B": [1, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=2)
fill_value = pd.Series([1, 10], index=["A", "C"])
assert_eq(ddf.fillna(fill_value), df.fillna(fill_value))
def test_sample():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.5)
assert_eq(b, b)
c = a.sample(frac=0.5, random_state=1234)
d = a.sample(frac=0.5, random_state=1234)
assert_eq(c, d)
assert a.sample(frac=0.5)._name != a.sample(frac=0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_sample_raises():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
# Make sure frac is replaced with n when 0 <= n <= 1
# This is so existing code (i.e. ddf.sample(0.5)) won't break
with pytest.warns(UserWarning):
b = a.sample(0.5, random_state=1234)
c = a.sample(frac=0.5, random_state=1234)
assert_eq(b, c)
with pytest.raises(ValueError):
a.sample(n=10)
# Make sure frac is provided
with pytest.raises(ValueError):
a.sample(frac=None)
def test_empty_max():
meta = make_meta({"x": "i8"})
a = dd.DataFrame(
{("x", 0): pd.DataFrame({"x": [1]}), ("x", 1): pd.DataFrame({"x": []})},
"x",
meta,
[None, None, None],
)
assert_eq(a.x.max(), 1)
def test_query():
pytest.importorskip("numexpr")
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.query("x**2 > y"), df.query("x**2 > y"))
assert_eq(
ddf.query("x**2 > @value", local_dict={"value": 4}),
df.query("x**2 > @value", local_dict={"value": 4}),
)
def test_eval():
pytest.importorskip("numexpr")
p = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
assert_eq(p.eval("x + y"), d.eval("x + y"))
assert_eq(p.eval("z = x + y", inplace=False), d.eval("z = x + y", inplace=False))
with pytest.raises(NotImplementedError):
d.eval("z = x + y", inplace=True)
@pytest.mark.parametrize(
"include, exclude",
[
([int], None),
(None, [int]),
([np.number, object], [float]),
(["datetime"], None),
],
)
def test_select_dtypes(include, exclude):
n = 10
df = pd.DataFrame(
{
"cint": [1] * n,
"cstr": ["a"] * n,
"clfoat": [1.0] * n,
"cdt": pd.date_range("2016-01-01", periods=n),
}
)
a = dd.from_pandas(df, npartitions=2)
result = a.select_dtypes(include=include, exclude=exclude)
expected = df.select_dtypes(include=include, exclude=exclude)
assert_eq(result, expected)
# count dtypes
tm.assert_series_equal(a.dtypes.value_counts(), df.dtypes.value_counts())
tm.assert_series_equal(result.dtypes.value_counts(), expected.dtypes.value_counts())
if not PANDAS_GT_100:
# removed in pandas 1.0
ctx = pytest.warns(FutureWarning)
with ctx:
tm.assert_series_equal(a.get_ftype_counts(), df.get_ftype_counts())
tm.assert_series_equal(
result.get_ftype_counts(), expected.get_ftype_counts()
)
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert sorted(a.x.drop_duplicates().dask) == sorted(a.x.drop_duplicates().dask)
assert sorted(a.groupby("x").y.mean().dask) == sorted(a.groupby("x").y.mean().dask)
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert sorted(aca(a.x, f, f, a.x._meta).dask) != sorted(
aca(a.x, f2, f2, a.x._meta).dask
)
assert sorted(aca(a.x, f, f, a.x._meta).dask) == sorted(
aca(a.x, f, f, a.x._meta).dask
)
# Test aca with keywords
def chunk(x, c_key=0, both_key=0):
return x.sum() + c_key + both_key
def agg(x, a_key=0, both_key=0):
return pd.Series(x).sum() + a_key + both_key
c_key = 2
a_key = 3
both_key = 4
res = aca(
a.x,
chunk=chunk,
aggregate=agg,
chunk_kwargs={"c_key": c_key},
aggregate_kwargs={"a_key": a_key},
both_key=both_key,
)
assert sorted(res.dask) == sorted(
aca(
a.x,
chunk=chunk,
aggregate=agg,
chunk_kwargs={"c_key": c_key},
aggregate_kwargs={"a_key": a_key},
both_key=both_key,
).dask
)
assert sorted(res.dask) != sorted(
aca(
a.x,
chunk=chunk,
aggregate=agg,
chunk_kwargs={"c_key": c_key},
aggregate_kwargs={"a_key": a_key},
both_key=0,
).dask
)
assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)
def test_aca_meta_infer():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
def chunk(x, y, constant=1.0):
return (x + y + constant).head()
def agg(x):
return x.head()
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, chunk_kwargs=dict(constant=2.0))
sol = (df + 2.0 + 2.0).head()
assert_eq(res, sol)
# Should infer as a scalar
res = aca(
[ddf.x], chunk=lambda x: pd.Series([x.sum()]), aggregate=lambda x: x.sum()
)
assert isinstance(res, Scalar)
assert res.compute() == df.x.sum()
def test_aca_split_every():
df = pd.DataFrame({"x": [1] * 60})
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, y, constant=0):
return x.sum() + y + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: aca(
[ddf, 2.0],
chunk=chunk,
aggregate=agg,
combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n,
)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = aca(
[ddf, 2.0],
chunk=chunk,
aggregate=agg,
combine=combine,
constant=3.0,
split_every=3,
)
assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
aca(
[ddf, 2.0],
chunk=chunk,
aggregate=agg,
split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
)
def test_reduction_method():
df = pd.DataFrame({"x": range(50), "y": range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
chunk = lambda x, val=0: (x >= val).sum()
agg = lambda x: x.sum()
# Output of chunk is a scalar
res = ddf.x.reduction(chunk, aggregate=agg)
assert_eq(res, df.x.count())
# Output of chunk is a series
res = ddf.reduction(chunk, aggregate=agg)
assert res._name == ddf.reduction(chunk, aggregate=agg)._name
assert_eq(res, df.count())
# Test with keywords
res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={"val": 25})
res2._name == ddf.reduction(chunk, aggregate=agg, chunk_kwargs={"val": 25})._name
assert res2._name != res._name
assert_eq(res2, (df >= 25).sum())
# Output of chunk is a dataframe
def sum_and_count(x):
return pd.DataFrame({"sum": x.sum(), "count": x.count()})
res = ddf.reduction(sum_and_count, aggregate=lambda x: x.groupby(level=0).sum())
assert_eq(res, pd.DataFrame({"sum": df.sum(), "count": df.count()}))
def test_reduction_method_split_every():
df = pd.Series([1] * 60)
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, constant=0):
return x.sum() + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: ddf.reduction(
chunk,
aggregate=agg,
combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n,
)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = ddf.reduction(
chunk, aggregate=agg, combine=combine, constant=3.0, split_every=3
)
assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
ddf.reduction(
chunk,
aggregate=agg,
split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
)
def test_pipe():
df = pd.DataFrame({"x": range(50), "y": range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
def f(x, y, z=0):
return x + y + z
assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))
assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=["a", "b"])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8], "z": [9, 10, 11, 12]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.drop("y", axis=1), df.drop("y", axis=1))
assert_eq(ddf.drop(["y", "z"], axis=1), df.drop(["y", "z"], axis=1))
with pytest.raises(ValueError):
ddf.drop(["a", "x"], axis=1)
assert_eq(
ddf.drop(["a", "x"], axis=1, errors="ignore"),
df.drop(["a", "x"], axis=1, errors="ignore"),
)
assert_eq(ddf.drop(columns=["y", "z"]), df.drop(columns=["y", "z"]))
def test_gh580():
df = pd.DataFrame({"x": np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert_eq(np.cos(df["x"]), np.cos(ddf["x"]))
assert_eq(np.cos(df["x"]), np.cos(ddf["x"]))
def test_gh6305():
df = pd.DataFrame({"x": np.arange(3, dtype=float)})
ddf = dd.from_pandas(df, 1)
ddf_index_only = ddf.set_index("x")
ds = ddf["x"]
is_broadcastable([ddf_index_only], ds)
def test_rename_dict():
renamer = {"a": "A", "b": "B"}
assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
pytest.raises(ValueError, lambda: d.rename(index=renamer))
def test_to_timestamp():
index = pd.period_range(freq="A", start="1/1/2001", end="12/1/2004")
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]}, index=index)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.to_timestamp(), df.to_timestamp(), **CHECK_FREQ)
assert_eq(
ddf.to_timestamp(freq="M", how="s").compute(),
df.to_timestamp(freq="M", how="s"),
**CHECK_FREQ,
)
assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())
assert_eq(
ddf.x.to_timestamp(freq="M", how="s").compute(),
df.x.to_timestamp(freq="M", how="s"),
**CHECK_FREQ,
)
def test_to_frame():
s = pd.Series([1, 2, 3], name="foo")
a = dd.from_pandas(s, npartitions=2)
assert_eq(s.to_frame(), a.to_frame())
assert_eq(s.to_frame("bar"), a.to_frame("bar"))
@pytest.mark.parametrize("as_frame", [False, False])
def test_to_dask_array_raises(as_frame):
s = pd.Series([1, 2, 3, 4, 5, 6], name="foo")
a = dd.from_pandas(s, npartitions=2)
if as_frame:
a = a.to_frame()
with pytest.raises(ValueError, match="6 != 10"):
a.to_dask_array((1, 2, 3, 4))
with pytest.raises(ValueError, match="Unexpected value"):
a.to_dask_array(5)
@pytest.mark.parametrize("as_frame", [False, True])
def test_to_dask_array_unknown(as_frame):
s = pd.Series([1, 2, 3, 4, 5], name="foo")
a = dd.from_pandas(s, chunksize=2)
if as_frame:
a = a.to_frame()
result = a.to_dask_array()
assert isinstance(result, da.Array)
chunks = result.chunks
if as_frame:
assert chunks == ((2, 3), (1,))
else:
assert chunks == ((2, 3),)
@pytest.mark.parametrize(
"lengths,as_frame,meta",
[
([2, 3], False, None),
(True, False, None),
(True, False, np.array([], dtype="f4")),
],
)
def test_to_dask_array(meta, as_frame, lengths):
s = pd.Series([1, 2, 3, 4, 5], name="foo", dtype="i4")
a = dd.from_pandas(s, chunksize=2)
if as_frame:
a = a.to_frame()
result = a.to_dask_array(lengths=lengths, meta=meta)
assert isinstance(result, da.Array)
expected_chunks = ((2, 3),)
if as_frame:
expected_chunks = expected_chunks + ((1,),)
assert result.chunks == expected_chunks
def test_apply():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row["x"] + row["y"]
assert_eq(
ddf.x.apply(lambda x: x + 1, meta=("x", int)), df.x.apply(lambda x: x + 1)
)
# specify meta
assert_eq(
ddf.apply(lambda xy: xy[0] + xy[1], axis=1, meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis=1),
)
assert_eq(
ddf.apply(lambda xy: xy[0] + xy[1], axis="columns", meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis="columns"),
)
# inference
with pytest.warns(None):
assert_eq(
ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1),
)
with pytest.warns(None):
assert_eq(ddf.apply(lambda xy: xy, axis=1), df.apply(lambda xy: xy, axis=1))
# specify meta
func = lambda x: pd.Series([x, x])
assert_eq(ddf.x.apply(func, meta=[(0, int), (1, int)]), df.x.apply(func))
# inference
with pytest.warns(None):
assert_eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis="index")
def test_apply_warns():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row["x"] + row["y"]
with pytest.warns(UserWarning) as w:
ddf.apply(func, axis=1)
assert len(w) == 1
with pytest.warns(None) as w:
ddf.apply(func, axis=1, meta=(None, int))
assert len(w) == 0
with pytest.warns(UserWarning) as w:
ddf.apply(lambda x: x, axis=1)
assert len(w) == 1
assert "'x'" in str(w[0].message)
assert "int64" in str(w[0].message)
def test_applymap():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))
assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))
def test_abs():
df = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5],
"B": [-6.0, -7, -8, -9, 10],
"C": ["a", "b", "c", "d", "e"],
}
)
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.A.abs(), df.A.abs())
assert_eq(ddf[["A", "B"]].abs(), df[["A", "B"]].abs())
pytest.raises(ValueError, lambda: ddf.C.abs())
pytest.raises(TypeError, lambda: ddf.abs())
def test_round():
df = pd.DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.round(), df.round())
assert_eq(ddf.round(2), df.round(2))
def test_cov():
# DataFrame
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.cov()
res2 = ddf.cov(split_every=2)
res3 = ddf.cov(10)
res4 = ddf.cov(10, split_every=2)
sol = df.cov()
sol2 = df.cov(10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.cov()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.cov(db)
res2 = da.cov(db, split_every=2)
res3 = da.cov(db, 10)
res4 = da.cov(db, 10, split_every=2)
sol = a.cov(b)
sol2 = a.cov(b, 10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.cov(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
def test_corr():
# DataFrame
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.corr()
res2 = ddf.corr(split_every=2)
res3 = ddf.corr(min_periods=10)
res4 = ddf.corr(min_periods=10, split_every=2)
sol = df.corr()
sol2 = df.corr(min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.corr()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method="spearman"))
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.corr(db)
res2 = da.corr(db, split_every=2)
res3 = da.corr(db, min_periods=10)
res4 = da.corr(db, min_periods=10, split_every=2)
sol = da.corr(db)
sol2 = da.corr(db, min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.corr(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method="spearman"))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_corr_same_name():
# Series with same names (see https://github.com/dask/dask/issues/4906)
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=6)
result = ddf.A.corr(ddf.B.rename("A"))
expected = ddf.A.corr(ddf.B)
assert_eq(result, expected)
# test with split_every
result2 = ddf.A.corr(ddf.B.rename("A"), split_every=2)
assert_eq(result2, expected)
def test_cov_corr_meta():
df = pd.DataFrame(
{
"a": np.array([1, 2, 3]),
"b": np.array([1.0, 2.0, 3.0], dtype="f4"),
"c": np.array([1.0, 2.0, 3.0]),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.corr(), df.corr())
assert_eq(ddf.cov(), df.cov())
assert ddf.a.cov(ddf.b)._meta.dtype == "f8"
assert ddf.a.corr(ddf.b)._meta.dtype == "f8"
@pytest.mark.slow
def test_cov_corr_stable():
df = pd.DataFrame(np.random.uniform(-1, 1, (20000000, 2)), columns=["a", "b"])
ddf = dd.from_pandas(df, npartitions=50)
assert_eq(ddf.cov(split_every=8), df.cov())
assert_eq(ddf.corr(split_every=8), df.corr())
def test_cov_corr_mixed():
size = 1000
d = {
"dates": pd.date_range("2015-01-01", periods=size, freq="1T"),
"unique_id": np.arange(0, size),
"ints": np.random.randint(0, size, size=size),
"floats": np.random.randn(size),
"bools": np.random.choice([0, 1], size=size),
"int_nans": np.random.choice([0, 1, np.nan], size=size),
"float_nans": np.random.choice([0.0, 1.0, np.nan], size=size),
"constant": 1,
"int_categorical": np.random.choice([10, 20, 30, 40, 50], size=size),
"categorical_binary": np.random.choice(["a", "b"], size=size),
"categorical_nans": np.random.choice(["a", "b", "c"], size=size),
}
df = pd.DataFrame(d)
df["hardbools"] = df["bools"] == 1
df["categorical_nans"] = df["categorical_nans"].replace("c", np.nan)
df["categorical_binary"] = df["categorical_binary"].astype("category")
df["unique_id"] = df["unique_id"].astype(str)
ddf = dd.from_pandas(df, npartitions=20)
assert_eq(ddf.corr(split_every=4), df.corr(), check_divisions=False)
assert_eq(ddf.cov(split_every=4), df.cov(), check_divisions=False)
def test_autocorr():
x = pd.Series(np.random.random(100))
dx = dd.from_pandas(x, npartitions=10)
assert_eq(dx.autocorr(2), x.autocorr(2))
assert_eq(dx.autocorr(0), x.autocorr(0))
assert_eq(dx.autocorr(-2), x.autocorr(-2))
assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))
pytest.raises(TypeError, lambda: dx.autocorr(1.5))
def test_apply_infer_columns():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=["sum", "mean"])
# DataFrame to completely different DataFrame
with pytest.warns(None):
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(["sum", "mean"]))
assert_eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
with pytest.warns(None):
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert_eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=["x2", "x3"])
# Series to completely different DataFrame
with pytest.warns(None):
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(["x2", "x3"]))
assert_eq(result, df.x.apply(return_df2))
# Series to Series
with pytest.warns(None):
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == "x"
assert_eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = _compat.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert "day" in dir(a.index)
# returns a numpy array in pandas, but a Index in dask
assert_eq(a.index.day, pd.Index(i.index.day))
assert_eq(a.index.month, pd.Index(i.index.month))
def test_nlargest_nsmallest():
from string import ascii_lowercase
df = pd.DataFrame(
{
"a": np.random.permutation(20),
"b": list(ascii_lowercase[:20]),
"c": np.random.permutation(20).astype("float64"),
}
)
ddf = dd.from_pandas(df, npartitions=3)
for m in ["nlargest", "nsmallest"]:
f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)
res = f(ddf, 5, "a")
res2 = f(ddf, 5, "a", split_every=2)
sol = f(df, 5, "a")
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf, 5, ["a", "c"])
res2 = f(ddf, 5, ["a", "c"], split_every=2)
sol = f(df, 5, ["a", "c"])
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf.a, 5)
res2 = f(ddf.a, 5, split_every=2)
sol = f(df.a, 5)
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
def test_reset_index():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
sol = df.reset_index()
res = ddf.reset_index()
assert res.divisions == (0, 2, 3)
assert_eq(res, sol)
sol = df.reset_index(drop=True)
res = ddf.reset_index(drop=True)
assert res.divisions == (0, 2, 3)
assert_eq(res, sol)
sol = df.x.reset_index()
res = ddf.x.reset_index()
assert res.divisions == (0, 2, 3)
assert_eq(res, sol)
sol = df.x.reset_index(drop=True)
res = ddf.x.reset_index(drop=True)
assert res.divisions == (0, 2, 3)
assert_eq(res, sol)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df["x"].iteritems(), ddf["x"].iteritems()):
assert a == b
def test_series_iter():
s = pd.DataFrame({"x": [1, 2, 3, 4]})
ds = dd.from_pandas(s, npartitions=2)
for (a, b) in zip(s["x"], ds["x"]):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
@pytest.mark.parametrize(
"columns",
[
("x", "y"),
("x", "x"),
pd.MultiIndex.from_tuples([("x", 1), ("x", 2)], names=("letter", "number")),
],
)
def test_dataframe_items(columns):
df = pd.DataFrame([[1, 10], [2, 20], [3, 30], [4, 40]], columns=columns)
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.items(), ddf.items()):
assert a[0] == b[0] # column name
assert_eq(a[1], b[1].compute()) # column values
def test_dataframe_itertuples_with_index_false():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(index=False), ddf.itertuples(index=False)):
assert a == b
def test_dataframe_itertuples_with_name_none():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(name=None), ddf.itertuples(name=None)):
assert a == b
assert type(a) is type(b)
def test_astype():
df = pd.DataFrame(
{"x": [1, 2, 3, None], "y": [10, 20, 30, 40]}, index=[10, 20, 30, 40]
)
a = dd.from_pandas(df, 2)
assert_eq(a.astype(float), df.astype(float))
assert_eq(a.x.astype(float), df.x.astype(float))
def test_astype_categoricals():
df = pd.DataFrame(
{
"x": ["a", "b", "c", "b", "c"],
"y": ["x", "y", "z", "x", "y"],
"z": [1, 2, 3, 4, 5],
}
)
df = df.astype({"y": "category"})
ddf = dd.from_pandas(df, 2)
assert ddf.y.cat.known
ddf2 = ddf.astype({"x": "category"})
assert not ddf2.x.cat.known
assert ddf2.y.cat.known
assert ddf2.x.dtype == "category"
assert ddf2.compute().x.dtype == "category"
dx = ddf.x.astype("category")
assert not dx.cat.known
assert dx.dtype == "category"
assert dx.compute().dtype == "category"
def test_astype_categoricals_known():
df = pd.DataFrame(
{
"x": ["a", "b", "c", "b", "c"],
"y": ["x", "y", "z", "y", "z"],
"z": ["b", "b", "b", "c", "b"],
"other": [1, 2, 3, 4, 5],
}
)
ddf = dd.from_pandas(df, 2)
abc = pd.api.types.CategoricalDtype(["a", "b", "c"], ordered=False)
category = pd.api.types.CategoricalDtype(ordered=False)
# DataFrame
ddf2 = ddf.astype({"x": abc, "y": category, "z": "category", "other": "f8"})
for col, known in [("x", True), ("y", False), ("z", False)]:
x = getattr(ddf2, col)
assert pd.api.types.is_categorical_dtype(x.dtype)
assert x.cat.known == known
# Series
for dtype, known in [("category", False), (category, False), (abc, True)]:
dx2 = ddf.x.astype(dtype)
assert pd.api.types.is_categorical_dtype(dx2.dtype)
assert dx2.cat.known == known
def test_groupby_callable():
a = pd.DataFrame({"x": [1, 2, 3, None], "y": [10, 20, 30, 40]}, index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert_eq(a.groupby(iseven).y.sum(), b.groupby(iseven).y.sum())
assert_eq(a.y.groupby(iseven).sum(), b.y.groupby(iseven).sum())
def test_methods_tokenize_differently():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (
df.x.map_partitions(lambda x: pd.Series(x.min()))._name
!= df.x.map_partitions(lambda x: pd.Series(x.max()))._name
)
def _assert_info(df, ddf, memory_usage=True):
from io import StringIO
assert isinstance(df, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
buf_pd, buf_da = StringIO(), StringIO()
df.info(buf=buf_pd, memory_usage=memory_usage)
ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)
stdout_pd = buf_pd.getvalue()
stdout_da = buf_da.getvalue()
stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))
# TODO
assert stdout_pd == stdout_da
@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason="Changed info repr")
def test_info():
from io import StringIO
pandas_format._put_lines = put_lines
test_frames = [
pd.DataFrame(
{"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]}, index=pd.Int64Index(range(4))
), # No RangeIndex in dask
pd.DataFrame(),
]
for df in test_frames:
ddf = dd.from_pandas(df, npartitions=4)
_assert_info(df, ddf)
buf = StringIO()
ddf = dd.from_pandas(
pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]}, index=range(4)),
npartitions=4,
)
# Verbose=False
ddf.info(buf=buf, verbose=False)
assert buf.getvalue() == (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, x to y\n"
"dtypes: int64(2)"
)
# buf=None
assert ddf.info(buf=None) is None
@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason="Changed info repr")
def test_groupby_multilevel_info():
# GH 1844
from io import StringIO
pandas_format._put_lines = put_lines
df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby(["A", "B"]).sum()
# slight difference between memory repr (single additional space)
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 1 entries, C to C\n"
"dtypes: int64(1)"
)
# multilevel
g = ddf.groupby(["A", "B"]).agg(["count", "sum"])
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
expected = (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, ('C', 'count') to ('C', 'sum')\n"
"dtypes: int64(2)"
)
assert buf.getvalue() == expected
@pytest.mark.skipif(not dd._compat.PANDAS_GT_100, reason="Changed info repr")
def test_categorize_info():
# assert that we can call info after categorize
# workaround for: https://github.com/pydata/pandas/issues/14368
from io import StringIO
pandas_format._put_lines = put_lines
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": pd.Series(list("aabc")), "z": pd.Series(list("aabc"))},
index=pd.Int64Index(range(4)),
) # No RangeIndex in dask
ddf = dd.from_pandas(df, npartitions=4).categorize(["y"])
# Verbose=False
buf = StringIO()
ddf.info(buf=buf, verbose=True)
expected = (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Int64Index: 4 entries, 0 to 3\n"
"Data columns (total 3 columns):\n"
" # Column Non-Null Count Dtype\n"
"--- ------ -------------- -----\n"
" 0 x 4 non-null int64\n"
" 1 y 4 non-null category\n"
" 2 z 4 non-null object\n"
"dtypes: category(1), object(1), int64(1)"
)
assert buf.getvalue() == expected
def test_gh_1301():
df = pd.DataFrame([["1", "2"], ["3", "4"]])
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.assign(y=ddf[1].astype(int))
assert_eq(ddf2, df.assign(y=df[1].astype(int)))
assert ddf2.dtypes["y"] == np.dtype(int)
def test_timeseries_sorted():
df = _compat.makeTimeDataFrame()
ddf = dd.from_pandas(df.reset_index(), npartitions=2)
df.index.name = "index"
assert_eq(ddf.set_index("index", sorted=True, drop=True), df)
def test_column_assignment():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
ddf["z"] = ddf.x + ddf.y
df["z"] = df.x + df.y
assert_eq(df, ddf)
assert "z" not in orig.columns
def test_array_assignment():
df = pd.DataFrame({"x": np.random.normal(size=50), "y": np.random.normal(size=50)})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
arr = np.array(np.random.normal(size=50))
darr = da.from_array(arr, chunks=25)
df["z"] = arr
ddf["z"] = darr
assert_eq(df, ddf)
assert "z" not in orig.columns
arr = np.array(np.random.normal(size=(50, 50)))
darr = da.from_array(arr, chunks=25)
msg = "Array assignment only supports 1-D arrays"
with pytest.raises(ValueError, match=msg):
ddf["z"] = darr
arr = np.array(np.random.normal(size=50))
darr = da.from_array(arr, chunks=10)
ddf["z"] = darr
np.testing.assert_array_equal(ddf.z.values.compute(), darr.compute())
# If we don't know the partition_sizes, assigning an Array w/ a different number of partitions raises
ddf.partition_sizes = None
msg = "Number of partitions do not match"
with pytest.raises(ValueError, match=msg):
ddf["z"] = darr
def test_columns_assignment():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df2 = df.assign(y=df.x + 1, z=df.x - 1)
df[["a", "b"]] = df2[["y", "z"]]
ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)
ddf[["a", "b"]] = ddf2[["y", "z"]]
assert_eq(df, ddf)
def test_attribute_assignment():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [1.0, 2.0, 3.0, 4.0, 5.0]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y = ddf.x + ddf.y
assert_eq(ddf, df.assign(y=df.x + df.y))
def test_setitem_triggering_realign():
a = dd.from_pandas(pd.DataFrame({"A": range(12)}), npartitions=3)
b = dd.from_pandas(pd.Series(range(12), name="B"), npartitions=4)
a["C"] = b
assert len(a) == 12
def test_inplace_operators():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [1.0, 2.0, 3.0, 4.0, 5.0]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y **= 0.5
assert_eq(ddf.y, df.y ** 0.5)
assert_eq(ddf, df.assign(y=df.y ** 0.5))
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"idx",
[
np.arange(100),
sorted(np.random.random(size=100)),
pd.date_range("20150101", periods=100),
],
)
def test_idxmaxmin(idx, skipna):
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"), index=idx)
df.b.iloc[31] = np.nan
df.d.iloc[78] = np.nan
ddf = dd.from_pandas(df, npartitions=3)
with warnings.catch_warnings(record=True):
assert_eq(df.idxmax(axis=1, skipna=skipna), ddf.idxmax(axis=1, skipna=skipna))
assert_eq(df.idxmin(axis=1, skipna=skipna), ddf.idxmin(axis=1, skipna=skipna))
assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna))
assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna, split_every=2))
assert (
ddf.idxmax(skipna=skipna)._name
!= ddf.idxmax(skipna=skipna, split_every=2)._name
)
assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna))
assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna, split_every=2))
assert (
ddf.idxmin(skipna=skipna)._name
!= ddf.idxmin(skipna=skipna, split_every=2)._name
)
assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))
assert_eq(
df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna, split_every=2)
)
assert (
ddf.a.idxmax(skipna=skipna)._name
!= ddf.a.idxmax(skipna=skipna, split_every=2)._name
)
assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))
assert_eq(
df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna, split_every=2)
)
assert (
ddf.a.idxmin(skipna=skipna)._name
!= ddf.a.idxmin(skipna=skipna, split_every=2)._name
)
def test_idxmaxmin_empty_partitions():
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [1.5, 2, 3], "c": [np.NaN] * 3, "d": [1, 2, np.NaN]}
)
empty = df.iloc[:0]
ddf = dd.concat(
[dd.from_pandas(df, npartitions=1)]
+ [dd.from_pandas(empty, npartitions=1)] * 10
)
for skipna in [True, False]:
assert_eq(ddf.idxmin(skipna=skipna, split_every=3), df.idxmin(skipna=skipna))
assert_eq(
ddf[["a", "b", "d"]].idxmin(skipna=skipna, split_every=3),
df[["a", "b", "d"]].idxmin(skipna=skipna),
)
assert_eq(ddf.b.idxmax(split_every=3), df.b.idxmax())
# Completely empty raises
ddf = dd.concat([dd.from_pandas(empty, npartitions=1)] * 10)
with pytest.raises(ValueError):
ddf.idxmax().compute()
with pytest.raises(ValueError):
ddf.b.idxmax().compute()
def test_getitem_meta():
data = {"col1": ["a", "a", "b"], "col2": [0, 1, 0]}
df = pd.DataFrame(data=data, columns=["col1", "col2"])
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df.col2[df.col1 == "a"], ddf.col2[ddf.col1 == "a"])
def test_getitem_multilevel():
pdf = pd.DataFrame({("A", "0"): [1, 2, 2], ("B", "1"): [1, 2, 3]})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(pdf["A", "0"], ddf["A", "0"])
assert_eq(pdf[[("A", "0"), ("B", "1")]], ddf[[("A", "0"), ("B", "1")]])
def test_getitem_string_subclass():
df = pd.DataFrame({"column_1": list(range(10))})
ddf = dd.from_pandas(df, npartitions=3)
class string_subclass(str):
pass
column_1 = string_subclass("column_1")
assert_eq(df[column_1], ddf[column_1])
@pytest.mark.parametrize("col_type", [list, np.array, pd.Series, pd.Index])
def test_getitem_column_types(col_type):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
cols = col_type(["C", "A", "B"])
assert_eq(df[cols], ddf[cols])
def test_ipython_completion():
df = pd.DataFrame({"a": [1], "b": [2]})
ddf = dd.from_pandas(df, npartitions=1)
completions = ddf._ipython_key_completions_()
assert "a" in completions
assert "b" in completions
assert "c" not in completions
def test_diff():
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.diff(), df.diff())
assert_eq(ddf.diff(0), df.diff(0))
assert_eq(ddf.diff(2), df.diff(2))
assert_eq(ddf.diff(-2), df.diff(-2))
assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))
assert_eq(ddf.a.diff(), df.a.diff())
assert_eq(ddf.a.diff(0), df.a.diff(0))
assert_eq(ddf.a.diff(2), df.a.diff(2))
assert_eq(ddf.a.diff(-2), df.a.diff(-2))
assert ddf.diff(2)._name == ddf.diff(2)._name
assert ddf.diff(2)._name != ddf.diff(3)._name
pytest.raises(TypeError, lambda: ddf.diff(1.5))
def test_shift():
df = _compat.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
# DataFrame
assert_eq(ddf.shift(), df.shift())
assert_eq(ddf.shift(0), df.shift(0))
assert_eq(ddf.shift(2), df.shift(2))
assert_eq(ddf.shift(-2), df.shift(-2))
assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))
# Series
assert_eq(ddf.A.shift(), df.A.shift())
assert_eq(ddf.A.shift(0), df.A.shift(0))
assert_eq(ddf.A.shift(2), df.A.shift(2))
assert_eq(ddf.A.shift(-2), df.A.shift(-2))
with pytest.raises(TypeError):
ddf.shift(1.5)
@pytest.mark.parametrize("data_freq,divs1", [("B", False), ("D", True), ("H", True)])
def test_shift_with_freq_DatetimeIndex(data_freq, divs1):
df = _compat.makeTimeDataFrame()
df = df.set_index(_compat.makeDateIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq, divs2 in [("S", True), ("W", False), (pd.Timedelta(10, unit="h"), True)]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions == divs2
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs1
@pytest.mark.parametrize("data_freq,divs", [("B", False), ("D", True), ("H", True)])
def test_shift_with_freq_PeriodIndex(data_freq, divs):
df = _compat.makeTimeDataFrame()
# PeriodIndex
df = df.set_index(pd.period_range("2000-01-01", periods=30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for d, p in [(ddf, df), (ddf.A, df.A)]:
res = d.shift(2, freq=data_freq)
assert_eq(res, p.shift(2, freq=data_freq))
assert res.known_divisions == divs
# PeriodIndex.shift doesn't have `freq` parameter
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs
df = _compat.makeTimeDataFrame()
with pytest.raises(ValueError):
ddf.index.shift(2, freq="D") # freq keyword not supported
def test_shift_with_freq_TimedeltaIndex():
df = _compat.makeTimeDataFrame()
# TimedeltaIndex
for data_freq in ["T", "D", "H"]:
df = df.set_index(_compat.makeTimedeltaIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq in ["S", pd.Timedelta(10, unit="h")]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions
def test_shift_with_freq_errors():
# Other index types error
df = _compat.makeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq="S"))
pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq="S"))
pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))
@pytest.mark.parametrize("method", ["first", "last"])
def test_first_and_last(method):
f = lambda x, offset: getattr(x, method)(offset)
freqs = ["12h", "D"]
offsets = ["0d", "100h", "20d", "20B", "3W", "3M", "400d", "13M"]
for freq in freqs:
index = pd.date_range("1/1/2000", "1/1/2001", freq=freq)[::4]
df = pd.DataFrame(
np.random.random((len(index), 4)), index=index, columns=["A", "B", "C", "D"]
)
ddf = dd.from_pandas(df, npartitions=10)
for offset in offsets:
assert_eq(f(ddf, offset), f(df, offset))
assert_eq(f(ddf.A, offset), f(df.A, offset))
@pytest.mark.parametrize("npartitions", [1, 4, 20])
@pytest.mark.parametrize("split_every", [2, 5])
@pytest.mark.parametrize("split_out", [None, 1, 5, 20])
def test_hash_split_unique(npartitions, split_every, split_out):
from string import ascii_lowercase
s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))
ds = dd.from_pandas(s, npartitions=npartitions)
dropped = ds.unique(split_every=split_every, split_out=split_out)
dsk = dropped.__dask_optimize__(dropped.dask, dropped.__dask_keys__())
from dask.core import get_deps
dependencies, dependents = get_deps(dsk)
assert len([k for k, v in dependencies.items() if not v]) == npartitions
assert dropped.npartitions == (split_out or 1)
assert sorted(dropped.compute(scheduler="sync")) == sorted(s.unique())
@pytest.mark.parametrize("split_every", [None, 2])
def test_split_out_drop_duplicates(split_every):
x = np.concatenate([np.arange(10)] * 100)[:, None]
y = x.copy()
z = np.concatenate([np.arange(20)] * 50)[:, None]
rs = np.random.RandomState(1)
rs.shuffle(x)
rs.shuffle(y)
rs.shuffle(z)
df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=["x", "y", "z"])
ddf = dd.from_pandas(df, npartitions=20)
for subset, keep in product([None, ["x", "z"]], ["first", "last"]):
sol = df.drop_duplicates(subset=subset, keep=keep)
res = ddf.drop_duplicates(
subset=subset, keep=keep, split_every=split_every, split_out=10
)
assert res.npartitions == 10
assert_eq(sol, res)
@pytest.mark.parametrize("split_every", [None, 2])
def test_split_out_value_counts(split_every):
df = pd.DataFrame({"x": [1, 2, 3] * 100})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10
assert_eq(
ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts()
)
def test_values():
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(df.values, ddf.values)
assert_eq(df.x.values, ddf.x.values)
assert_eq(df.y.values, ddf.y.values)
assert_eq(df.index.values, ddf.index.values)
def test_copy():
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, npartitions=2)
b = a.copy()
a["y"] = a.x * 2
assert_eq(b, df)
df["y"] = df.x * 2
def test_del():
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
a = dd.from_pandas(df, 2)
b = a.copy()
del a["x"]
assert_eq(b, df)
del df["x"]
assert_eq(a, df)
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(index, deep):
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(
df.memory_usage(index=index, deep=deep),
ddf.memory_usage(index=index, deep=deep),
)
assert (
df.x.memory_usage(index=index, deep=deep)
== ddf.x.memory_usage(index=index, deep=deep).compute()
)
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage_per_partition(index, deep):
df = pd.DataFrame(
{
"x": [1, 2, 3, 4, 5],
"y": [1.0, 2.0, 3.0, 4.0, 5.0],
"z": ["a", "b", "c", "d", "e"],
}
)
ddf = dd.from_pandas(df, npartitions=2)
# DataFrame.memory_usage_per_partition
expected = pd.Series(
part.compute().memory_usage(index=index, deep=deep).sum()
for part in ddf.partitions
)
result = ddf.memory_usage_per_partition(index=index, deep=deep)
assert_eq(expected, result)
# Series.memory_usage_per_partition
expected = pd.Series(
part.x.compute().memory_usage(index=index, deep=deep) for part in ddf.partitions
)
result = ddf.x.memory_usage_per_partition(index=index, deep=deep)
assert_eq(expected, result)
@pytest.mark.parametrize(
"reduction",
[
"sum",
"mean",
"std",
"var",
"count",
"min",
"max",
"idxmin",
"idxmax",
"prod",
"all",
"sem",
],
)
def test_dataframe_reductions_arithmetic(reduction):
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [1.1, 2.2, 3.3, 4.4, 5.5]})
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(
ddf - (getattr(ddf, reduction)() + 1), df - (getattr(df, reduction)() + 1)
)
def test_dataframe_mode():
data = [["Tom", 10, 7], ["Farahn", 14, 7], ["Julie", 14, 5], ["Nick", 10, 10]]
df = pd.DataFrame(data, columns=["Name", "Num", "Num"])
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.mode(), df.mode())
assert_eq(ddf.Name.mode(), df.Name.mode())
# test empty
df = pd.DataFrame(columns=["a", "b"])
ddf = dd.from_pandas(df, npartitions=1)
# check_index=False should be removed once https://github.com/pandas-dev/pandas/issues/33321 is resolved.
assert_eq(ddf.mode(), df.mode(), check_index=False)
def test_datetime_loc_open_slicing():
dtRange = pd.date_range("01.01.2015", "05.05.2015")
df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(df.loc[:"02.02.2015"], ddf.loc[:"02.02.2015"])
assert_eq(df.loc["02.02.2015":], ddf.loc["02.02.2015":])
assert_eq(df[0].loc[:"02.02.2015"], ddf[0].loc[:"02.02.2015"])
assert_eq(df[0].loc["02.02.2015":], ddf[0].loc["02.02.2015":])
def test_to_datetime():
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df.index.name = "ix"
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))
s = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100)
s.index = s.values
ds = dd.from_pandas(s, npartitions=10, sort=False)
assert_eq(
pd.to_datetime(s, infer_datetime_format=True),
dd.to_datetime(ds, infer_datetime_format=True),
)
assert_eq(
pd.to_datetime(s.index, infer_datetime_format=True),
dd.to_datetime(ds.index, infer_datetime_format=True),
check_divisions=False,
)
def test_to_timedelta():
s = pd.Series(range(10))
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s), dd.to_timedelta(ds))
assert_eq(pd.to_timedelta(s, unit="h"), dd.to_timedelta(ds, unit="h"))
s = pd.Series([1, 2, "this will error"])
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s, errors="coerce"), dd.to_timedelta(ds, errors="coerce"))
@pytest.mark.skipif(PANDAS_VERSION < "0.22.0", reason="No isna method")
@pytest.mark.parametrize("values", [[np.NaN, 0], [1, 1]])
def test_isna(values):
s = pd.Series(values)
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.isna(s), dd.isna(ds))
@pytest.mark.parametrize("drop", [0, 9])
def test_slice_on_filtered_boundary(drop):
# https://github.com/dask/dask/issues/2211
x = np.arange(10)
x[[5, 6]] -= 2
df = pd.DataFrame({"A": x, "B": np.arange(len(x))})
pdf = df.set_index("A").query("B != {}".format(drop))
ddf = dd.from_pandas(df, 1).set_index("A").query("B != {}".format(drop))
result = dd.concat([ddf, ddf.rename(columns={"B": "C"})], axis=1)
expected = pd.concat([pdf, pdf.rename(columns={"B": "C"})], axis=1)
assert_eq(result, expected)
def test_boundary_slice_nonmonotonic():
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, 0, 4)
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -1, 4)
expected = df.drop(-2)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3.5)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 4)
expected = df
tm.assert_frame_equal(result, expected)
def test_boundary_slice_empty():
df = pd.DataFrame()
result = methods.boundary_slice(df, 1, 4)
expected = pd.DataFrame()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, right_boundary, left_boundary, drop",
[
(-1, None, False, False, [-1, -2]),
(-1, None, False, True, [-2]),
(None, 3, False, False, [3, 4]),
(None, 3, True, False, [4]),
# Missing keys
(-0.5, None, False, False, [-1, -2]),
(-0.5, None, False, True, [-1, -2]),
(-1.5, None, False, True, [-2]),
(None, 3.5, False, False, [4]),
(None, 3.5, True, False, [4]),
(None, 2.5, False, False, [3, 4]),
],
)
def test_with_boundary(start, stop, right_boundary, left_boundary, drop):
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, start, stop, right_boundary, left_boundary)
expected = df.drop(drop)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index, left, right",
[
(range(10), 0, 9),
(range(10), -1, None),
(range(10), None, 10),
([-1, 0, 2, 1], None, None),
([-1, 0, 2, 1], -1, None),
([-1, 0, 2, 1], None, 2),
([-1, 0, 2, 1], -2, 3),
(pd.date_range("2017", periods=10), None, None),
(pd.date_range("2017", periods=10), pd.Timestamp("2017"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2017-01-10")),
(pd.date_range("2017", periods=10), pd.Timestamp("2016"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2018")),
],
)
def test_boundary_slice_same(index, left, right):
df = pd.DataFrame({"A": range(len(index))}, index=index)
result = methods.boundary_slice(df, left, right)
tm.assert_frame_equal(result, df)
def test_better_errors_object_reductions():
# GH2452
s = pd.Series(["a", "b", "c", "d"])
ds = dd.from_pandas(s, npartitions=2)
with pytest.raises(ValueError) as err:
ds.mean()
assert str(err.value) == "`mean` not supported with object series"
def test_sample_empty_partitions():
@dask.delayed
def make_df(n):
return pd.DataFrame(np.zeros((n, 4)), columns=list("abcd"))
ddf = dd.from_delayed([make_df(0), make_df(100), make_df(0)])
ddf2 = ddf.sample(frac=0.2)
# smoke test sample on empty partitions
res = ddf2.compute()
assert res.dtypes.equals(ddf2.dtypes)
def test_coerce():
df = pd.DataFrame(np.arange(100).reshape((10, 10)))
ddf = dd.from_pandas(df, npartitions=2)
funcs = (int, float, complex)
for d, t in product(funcs, (ddf, ddf[0])):
pytest.raises(TypeError, lambda: t(d))
def test_bool():
df = pd.DataFrame(np.arange(100).reshape((10, 10)))
ddf = dd.from_pandas(df, npartitions=2)
conditions = [ddf, ddf[0], ddf == ddf, ddf[0] == ddf[0]]
for cond in conditions:
with pytest.raises(ValueError):
bool(cond)
def test_cumulative_multiple_columns():
# GH 3037
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"))
ddf = dd.from_pandas(df, 5)
for d in [ddf, df]:
for c in df.columns:
d[c + "cs"] = d[c].cumsum()
d[c + "cmin"] = d[c].cummin()
d[c + "cmax"] = d[c].cummax()
d[c + "cp"] = d[c].cumprod()
assert_eq(ddf, df)
@pytest.mark.parametrize("func", [np.asarray, M.to_records])
def test_map_partition_array(func):
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5], "y": [6.0, 7.0, 8.0, 9.0, 10.0]},
index=["a", "b", "c", "d", "e"],
)
ddf = dd.from_pandas(df, npartitions=2)
for pre in [lambda a: a, lambda a: a.x, lambda a: a.y, lambda a: a.index]:
try:
expected = func(pre(df))
except Exception:
continue
x = pre(ddf).map_partitions(func)
assert_eq(x, expected)
assert isinstance(x, da.Array)
assert x.chunks[0] == (np.nan, np.nan)
@pytest.mark.xfail(_numpy_120, reason="sparse-383")
def test_map_partition_sparse():
sparse = pytest.importorskip("sparse")
# Aviod searchsorted failure.
pytest.importorskip("numba", minversion="0.40.0")
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5], "y": [6.0, 7.0, 8.0, 9.0, 10.0]},
index=["a", "b", "c", "d", "e"],
)
ddf = dd.from_pandas(df, npartitions=2)
def f(d):
return sparse.COO(np.array(d))
for pre in [lambda a: a, lambda a: a.x]:
expected = f(pre(df))
result = pre(ddf).map_partitions(f)
assert isinstance(result, da.Array)
computed = result.compute()
assert (computed.data == expected.data).all()
assert (computed.coords == expected.coords).all()
def test_mixed_dask_array_operations():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4, 5, 6])
ddf = dd.from_pandas(df, npartitions=2)
x = ddf.x
v = x.values
chunks = v.chunks
assert chunks == ((3,),)
l = df.x + df.x.values
r = x + v
assert_eq(l, r)
l = df.x.values + df.x
r = v + x
assert_eq(l, r)
assert_eq(df.x + df.index.values, ddf.x + ddf.index.values)
l = df.index.values + df.x
r = ddf.index.values + ddf.x
assert_eq(l, r)
assert_eq(df.x + df.x.values.sum(), ddf.x + ddf.x.values.sum())
def test_mixed_dask_array_operations_errors():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]}, index=[4, 5, 6, 7, 8])
ddf = dd.from_pandas(df, npartitions=2)
x = da.arange(5, chunks=((1, 4),))
x._chunks = ((np.nan, np.nan),)
with pytest.raises(ValueError):
(ddf.x + x).compute()
x = da.arange(5, chunks=((2, 2, 1),))
with pytest.raises(ValueError) as info:
ddf.x + x
assert "add" in str(info.value)
def test_mixed_dask_array_multi_dimensional():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5], "y": [5.0, 6.0, 7.0, 8.0, 9.0]}, columns=["x", "y"]
)
ddf = dd.from_pandas(df, npartitions=2)
x = (df.values + 1).astype(float)
dx = (ddf.values + 1).astype(float)
assert_eq(ddf + dx + 1, df + x + 1)
assert_eq(ddf + dx.rechunk((None, 1)) + 1, df + x + 1)
assert_eq(ddf[["y", "x"]] + dx + 1, df[["y", "x"]] + x + 1)
def test_meta_raises():
# Raise when we use a user defined function
s = pd.Series(["abcd", "abcd"])
ds = dd.from_pandas(s, npartitions=2)
try:
ds.map(lambda x: x[3])
except ValueError as e:
assert "meta=" in str(e)
# But not otherwise
df = pd.DataFrame({"a": ["x", "y", "y"], "b": ["x", "y", "z"], "c": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(Exception) as info:
ddf.a + ddf.c
assert "meta=" not in str(info.value)
def test_dask_dataframe_holds_scipy_sparse_containers():
sparse = pytest.importorskip("scipy.sparse")
da = pytest.importorskip("dask.array")
x = da.random.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
df = dd.from_dask_array(x)
y = df.map_partitions(sparse.csr_matrix)
assert isinstance(y, da.Array)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler="single-threaded")
assert all(isinstance(v, sparse.csr_matrix) for v in values)
def test_map_partitions_delays_large_inputs():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
big = np.ones(1000000)
b = ddf.map_partitions(lambda x, y: x, y=big)
assert any(big is v for v in b.dask.values())
a = ddf.map_partitions(lambda x, y: x, big)
assert any(big is v for v in a.dask.values())
def test_partitions_indexer():
df = pd.DataFrame({"x": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(ddf.partitions[0], ddf.get_partition(0))
assert_eq(ddf.partitions[3], ddf.get_partition(3))
assert_eq(ddf.partitions[-1], ddf.get_partition(4))
assert ddf.partitions[:3].npartitions == 3
assert ddf.x.partitions[:3].npartitions == 3
assert ddf.x.partitions[::2].compute().tolist() == [0, 1, 4, 5, 8, 9]
def test_mod_eq():
df = pd.DataFrame({"a": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df, ddf)
assert_eq(df.a, ddf.a)
assert_eq(df.a + 2, ddf.a + 2)
assert_eq(df.a + 2 == 0, ddf.a + 2 == 0)
def test_setitem():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
ddf = dd.from_pandas(df.copy(), 2)
df[df.columns] = 1
ddf[ddf.columns] = 1
assert_eq(df, ddf)
def test_broadcast():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf - (ddf.sum() + 1), df - (df.sum() + 1))
def test_scalar_with_array():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
da.utils.assert_eq(df.x.values + df.x.mean(), ddf.x.values + ddf.x.mean())
def test_has_parallel_type():
assert has_parallel_type(pd.DataFrame())
assert has_parallel_type(pd.Series(dtype=float))
assert not has_parallel_type(123)
def test_meta_error_message():
with pytest.raises(TypeError) as info:
dd.DataFrame({("x", 1): 123}, "x", pd.Series(dtype=float), [None, None])
assert "Series" in str(info.value)
assert "DataFrame" in str(info.value)
assert "pandas" in str(info.value)
def test_map_index():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert ddf.known_divisions is True
cleared = ddf.index.map(lambda x: x * 10)
assert cleared.known_divisions is False
applied = ddf.index.map(lambda x: x * 10, is_monotonic=True)
assert applied.known_divisions is True
assert applied.divisions == tuple(x * 10 for x in ddf.divisions)
def test_assign_index():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
ddf_copy = ddf.copy()
ddf.index = ddf.index * 10
expected = df.copy()
expected.index = expected.index * 10
assert_eq(ddf, expected)
assert_eq(ddf_copy, df)
def test_index_divisions():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.index + 1, df.index + 1)
assert_eq(10 * ddf.index, 10 * df.index)
assert_eq(-ddf.index, -df.index)
def test_replace():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.replace(1, 10), ddf.replace(1, 10))
assert_eq(df.replace({1: 10, 2: 20}), ddf.replace({1: 10, 2: 20}))
assert_eq(df.x.replace(1, 10), ddf.x.replace(1, 10))
assert_eq(df.x.replace({1: 10, 2: 20}), ddf.x.replace({1: 10, 2: 20}))
def test_map_partitions_delays_lists():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
L = list(range(100))
out = ddf.map_partitions(lambda x, y: x + sum(y), y=L)
assert any(str(L) == str(v) for v in out.__dask_graph__().values())
out = ddf.map_partitions(lambda x, y: x + sum(y), L)
assert any(str(L) == str(v) for v in out.__dask_graph__().values())
def test_dtype_cast():
df = pd.DataFrame(
{
"A": np.arange(10, dtype=np.int32),
"B": np.arange(10, dtype=np.int64),
"C": np.arange(10, dtype=np.float32),
}
)
ddf = dd.from_pandas(df, npartitions=2)
assert ddf.A.dtype == np.int32
assert ddf.B.dtype == np.int64
assert ddf.C.dtype == np.float32
col = pd.Series(np.arange(10, dtype=np.float32)) / 2
assert col.dtype == np.float32
ddf = ddf.assign(D=col)
assert ddf.D.dtype == np.float32
assert ddf.C.dtype == np.float32
# fails
assert ddf.B.dtype == np.int64
# fails
assert ddf.A.dtype == np.int32
@pytest.mark.parametrize("base_npart", [1, 4])
@pytest.mark.parametrize("map_npart", [1, 3])
@pytest.mark.parametrize("sorted_index", [False, True])
@pytest.mark.parametrize("sorted_map_index", [False, True])
def test_series_map(base_npart, map_npart, sorted_index, sorted_map_index):
base = pd.Series(
["".join(np.random.choice(["a", "b", "c"], size=3)) for x in range(100)]
)
if not sorted_index:
index = np.arange(100)
np.random.shuffle(index)
base.index = index
map_index = ["".join(x) for x in product("abc", repeat=3)]
mapper = pd.Series(np.random.randint(50, size=len(map_index)), index=map_index)
if not sorted_map_index:
map_index = np.array(map_index)
np.random.shuffle(map_index)
mapper.index = map_index
expected = base.map(mapper)
dask_base = dd.from_pandas(base, npartitions=base_npart, sort=False)
dask_map = dd.from_pandas(mapper, npartitions=map_npart, sort=False)
result = dask_base.map(dask_map)
dd.utils.assert_eq(expected, result)
@pytest.mark.skipif(
PANDAS_VERSION < "0.25.0", reason="Explode not implemented in pandas < 0.25.0"
)
def test_dataframe_explode():
df = pd.DataFrame({"A": [[1, 2, 3], "foo", [3, 4]], "B": 1})
exploded_df = df.explode("A")
ddf = dd.from_pandas(df, npartitions=2)
exploded_ddf = ddf.explode("A")
assert ddf.divisions == exploded_ddf.divisions
assert_eq(exploded_ddf.compute(), exploded_df)
@pytest.mark.skipif(
PANDAS_VERSION < "0.25.0", reason="Explode not implemented in pandas < 0.25.0"
)
def test_series_explode():
s = pd.Series([[1, 2, 3], "foo", [3, 4]])
exploded_s = s.explode()
ds = dd.from_pandas(s, npartitions=2)
exploded_ds = ds.explode()
assert_eq(exploded_ds, exploded_s)
assert ds.divisions == exploded_ds.divisions
def test_pop():
df = pd.DataFrame({"x": range(10), "y": range(10)})
ddf = dd.from_pandas(df, npartitions=2)
s = ddf.pop("y")
assert s.name == "y"
assert ddf.columns == ["x"]
assert_eq(ddf, df[["x"]])
def test_simple_map_partitions():
data = {"col_0": [9, -3, 0, -1, 5], "col_1": [-2, -7, 6, 8, -5]}
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.clip(-4, 6)
task = ddf.__dask_graph__()[ddf.__dask_keys__()[0]]
[v] = task[0].dsk.values()
assert v[0] == M.clip or v[1] == M.clip
def test_iter():
df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, 2)
assert list(df) == list(ddf)
for col, expected in zip(ddf, ["A", "B"]):
assert col == expected
def test_dataframe_groupby_agg_empty_partitions():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=4)
assert_eq(ddf[ddf.x < 5].x.cumsum(), df[df.x < 5].x.cumsum())
def test_fuse_roots():
pdf1 = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]}
)
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({"a": [True, False, True] * 3, "b": [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
res = ddf1.where(ddf2)
hlg = fuse_roots(res.__dask_graph__(), keys=res.__dask_keys__())
hlg.validate()
| 30.87071
| 109
| 0.586733
|
e93c7b86a135e517911b5fdc162ee4d382e9ebb0
| 377
|
py
|
Python
|
tools/Polygraphy/tests/tools/test_polygraphy.py
|
hwkyai/TensorRT
|
d04182cd0086c70db4a8ad30e0d7675c4eb33782
|
[
"Apache-2.0"
] | null | null | null |
tools/Polygraphy/tests/tools/test_polygraphy.py
|
hwkyai/TensorRT
|
d04182cd0086c70db4a8ad30e0d7675c4eb33782
|
[
"Apache-2.0"
] | null | null | null |
tools/Polygraphy/tests/tools/test_polygraphy.py
|
hwkyai/TensorRT
|
d04182cd0086c70db4a8ad30e0d7675c4eb33782
|
[
"Apache-2.0"
] | null | null | null |
import os
import polygraphy
from tests.tools.common import run_polygraphy
class TestPolygraphyBin(object):
def test_version(self):
status = run_polygraphy(["-v"])
assert status.stdout.strip().replace("\n", " ").replace(" ", " ") == "Polygraphy | Version: {:} | Path: {:}".format(polygraphy.__version__, list(map(os.path.realpath, polygraphy.__path__)))
| 34.272727
| 198
| 0.681698
|
25b20eb9aec333f80811658e262aa707a14b5d58
| 887
|
py
|
Python
|
userbot/plugins/alive.py
|
Azharjacx/NandiOXbot
|
c1ee9162123d4ecda1da79124ae1219d79e148a0
|
[
"MIT"
] | null | null | null |
userbot/plugins/alive.py
|
Azharjacx/NandiOXbot
|
c1ee9162123d4ecda1da79124ae1219d79e148a0
|
[
"MIT"
] | null | null | null |
userbot/plugins/alive.py
|
Azharjacx/NandiOXbot
|
c1ee9162123d4ecda1da79124ae1219d79e148a0
|
[
"MIT"
] | null | null | null |
"""Check if userbot alive."""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from platform import uname
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "No name set yet nibba, check pinned in @XtraTgBot"
@command(outgoing=True, pattern="^.alive$")
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
await alive.edit("`Apun marta nahi jinda hu bainchod` **ψ(`∇´)ψ**\n\n"
"`Telethon version: 6.9.0\nPython: 3.7.3\n`"
"`Bot created by:` [OPen Camera](tg://user?id=772507084), @opencamera\n"
f"`My NOOB Son`: {DEFAULTUSER}\n\n"
"https://github.com/rishabh-lohiya16/NandiOXbot"
"\nAyushman-Bhav✋")
| 44.35
| 101
| 0.632469
|
01d646e8d30dabc9d7cbb9dff0286a25f2d2903e
| 901
|
py
|
Python
|
django/helpdesk_deploy_old/helpdesk/base/managers/user.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
django/helpdesk_deploy_old/helpdesk/base/managers/user.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
django/helpdesk_deploy_old/helpdesk/base/managers/user.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import reverse
from base.models_i.file import CollectMedia
from django.shortcuts import reverse
from base.models_i.file import CollectMedia
class UserModelCustomManeger():
def get_view_type_url(self,q,s):
## table
return reverse("view:router:router_table",kwargs={"slug_query_pk":q,"slug_stream_pk":s})
def get_view_list(self,queue,stream):
obj = {}
obj['title'] = "Виды отображения"
obj["dep"] = [
{
"name":"Таблица",
"url" :reverse("view:router:router_table", args=[queue['id'], stream['id']])
},{
"name":"Доска",
"url" :reverse("view:router:router_kanban", args=[queue['id'], stream['id']])
},
]
return obj
def get_collect(self,user):
obj = CollectMedia.obj.get(name="wiki")
return obj
| 29.064516
| 96
| 0.580466
|
621becfe68f4065655f72f0008f7e6bdd5340f3d
| 754
|
py
|
Python
|
var/spack/repos/builtin/packages/py-snowballstemmer/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/py-snowballstemmer/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/py-snowballstemmer/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySnowballstemmer(PythonPackage):
"""This package provides 16 stemmer algorithms (15 + Poerter
English stemmer) generated from Snowball algorithms."""
homepage = "https://github.com/shibukawa/snowball_py"
pypi = "snowballstemmer/snowballstemmer-2.0.0.tar.gz"
version('2.0.0', sha256='df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52')
version('1.2.1', sha256='919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128')
depends_on('py-setuptools', when='@2:', type='build')
| 37.7
| 95
| 0.763926
|
f20e92770dc13448e8302823145eb6169bac2627
| 2,546
|
py
|
Python
|
pipe-cli/src/model/cluster_node_model.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 126
|
2019-03-22T19:40:38.000Z
|
2022-02-16T13:01:44.000Z
|
pipe-cli/src/model/cluster_node_model.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 1,189
|
2019-03-25T10:39:27.000Z
|
2022-03-31T12:50:33.000Z
|
pipe-cli/src/model/cluster_node_model.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 62
|
2019-03-22T22:09:49.000Z
|
2022-03-08T12:05:56.000Z
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from .pipeline_run_model import PipelineRunModel
from .pod_model import PodModel
class ClusterNodeModel(object):
def __init__(self):
self.identifier = None
self.name = None
self.created = None
self.run = None
self.addresses = []
self.system_info = None
self.labels = None
self.allocatable = None
self.capacity = None
self.is_master = False
self.pods = []
@classmethod
def load(cls, json):
instance = cls()
instance.identifier = json['uid']
instance.name = json['name']
instance.created = datetime.datetime.strptime(json['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
if 'addresses' in json:
for address in json['addresses']:
instance.addresses.append('{} ({})'.format(address['address'], address['type']))
if 'pipelineRun' in json:
instance.run = PipelineRunModel.load(json['pipelineRun'])
if 'systemInfo' in json:
instance.system_info = json['systemInfo'].items()
if 'labels' in json:
instance.labels = json['labels'].items()
for label in instance.labels:
if label[0].lower() == 'node-role.kubernetes.io/master':
instance.is_master = True
elif label[0].lower() == 'kubeadm.alpha.kubernetes.io/role':
instance.is_master = label[1].lower() == 'master'
elif label[0].lower() == 'cloud-pipeline/role':
instance.is_master = label[1].lower() == 'edge'
if 'allocatable' in json:
instance.allocatable = json['allocatable']
if 'capacity' in json:
instance.capacity = json['capacity']
if 'pods' in json:
for pod_json in json['pods']:
instance.pods.append(PodModel.load(pod_json))
return instance
| 39.78125
| 102
| 0.615868
|
ba3280c74e142ee18b6ef041b3aff4f88df5c35b
| 5,081
|
py
|
Python
|
try_to_import_package_and_modules.py
|
eda-ricercatore/scienza_dei_dati
|
999972b2cbfb52e802143b13651438723898b53d
|
[
"MIT"
] | null | null | null |
try_to_import_package_and_modules.py
|
eda-ricercatore/scienza_dei_dati
|
999972b2cbfb52e802143b13651438723898b53d
|
[
"MIT"
] | null | null | null |
try_to_import_package_and_modules.py
|
eda-ricercatore/scienza_dei_dati
|
999972b2cbfb52e802143b13651438723898b53d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""try_to_import_package_and_modules.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Q4ig5YLEjnEWee61SazViyGvkqpPgaAZ
"""
#!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3
### /usr/bin/python
### /Library/Frameworks/Python.framework/Versions/3.6/bin/python3
### #!/usr/bin/python -mtimeit
"""
This Python script is written by Zhiyang Ong to learn how to
import a Python package and module in Google Colab, via
a Jupyter notebook.
References:
https://colab.research.google.com/notebooks/snippets/importing_libraries.ipynb
https://colab.research.google.com/notebooks/welcome.ipynb
Revision History:
November 1, 2019 Version 0.1, initial build.
"""
__author__ = 'Zhiyang Ong'
__version__ = '1.0'
__date__ = 'November 1, 2019'
# The MIT License (MIT)
# Copyright (c) <2019> <Zhiyang Ong>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n" Che cosa significa?
###############################################################
"""
Import modules from The Python Standard Library.
sys Get access to any command-line arguments.
os Use any operating system dependent functionality.
os.path For pathname manipulations.
subprocess -> call
To make system calls.
time To measure elapsed time.
warnings Raise warnings.
re Use regular expressions.
pathlib->Path
For mapping a string to a path.
"""
"""
import sys
import os
import os.path
#from pathlib import Path
from subprocess import call
import time
import warnings
import re
"""
"""
import importlib.util
from google.colab import drive
drive.mount('/content/drive')
import sys
sys.path.append('/content/drive/My\ Drive/Colab\ Notebooks/utilities')
"""
"""
https://stackoverflow.com/questions/48905127/importing-py-files-in-google-colab/48919022#48919022
from google.colab import files
src = list(files.upload().values())[0]
#open('utilities/simple_module.py','wb').write(src)
open('./utilities/simple_module.py','wb').write(src)
import mylib
"""
"""
using PyDrive
https://stackoverflow.com/questions/37913458/manage-files-from-public-google-drive-url-using-pydrive
"""
"""
Other resources:
https://www.marktechpost.com/2019/06/07/how-to-connect-google-colab-with-google-drive/
"""
from google.colab import drive
drive.mount('/content/gdrive')
import sys
sys.path.append('/content/drive/My\ Drive/Colab\ Notebooks/utilities')
###############################################################
# Import Custom Python Modules
import utilities
# Module to test if I can import a Python package and module.
from utilities.simple_module import simple
###############################################################
# Module with methods that clean BibTeX files.
class Try_to_Import_Package:
"""
Number of times that the "Hello World" print function
has been called.
"""
number_times_executed = 0
# =========================================================
# Accessor and Mutator method.
# =========================================================
# Method to print the number of times that the
# "Hello World" print function has been called, and
# increment this number by one.
# @return - Nothing.
# O(1) method.
@staticmethod
def get_number_times_executed():
Try_to_Import_Package.number_times_executed = Try_to_Import_Package.number_times_executed + 1
print(" Try_to_Import_Package 'Hello World' function called:",Try_to_Import_Package.number_times_executed,"times.")
###############################################################
# Main method for the program.
# If this is executed as a Python script,
if __name__ == "__main__":
for x in range(10):
simple.get_number_times_executed()
Try_to_Import_Package.get_number_times_executed()
| 31.75625
| 462
| 0.696516
|
f8c25b89604291903dc921710140b14dee87c4f3
| 7,646
|
py
|
Python
|
scripts/opengl/src_util.py
|
karolherbst/VK-GL-CTS
|
bb088fddd8673dc4f37e5956c42890645ab31577
|
[
"Apache-2.0"
] | null | null | null |
scripts/opengl/src_util.py
|
karolherbst/VK-GL-CTS
|
bb088fddd8673dc4f37e5956c42890645ab31577
|
[
"Apache-2.0"
] | null | null | null |
scripts/opengl/src_util.py
|
karolherbst/VK-GL-CTS
|
bb088fddd8673dc4f37e5956c42890645ab31577
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015-2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import re
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import khr_util.format
import khr_util.registry
import khr_util.registry_cache
SCRIPTS_DIR = os.path.dirname(__file__)
OPENGL_DIR = os.path.normpath(os.path.join(SCRIPTS_DIR, "..", "..", "framework", "opengl"))
EGL_DIR = os.path.normpath(os.path.join(SCRIPTS_DIR, "..", "..", "framework", "egl"))
OPENGL_INC_DIR = os.path.join(OPENGL_DIR, "wrapper")
GL_SOURCE = khr_util.registry_cache.RegistrySource(
"https://raw.githubusercontent.com/KhronosGroup/OpenGL-Registry",
"xml/gl.xml",
"97ab881f0ab9e03a59388214f9c36dfe4c206c76",
"2bd7209131ca5a5381dfe3fd346568abbb20eda5907f555212e365c141bbce6c")
EXTENSIONS = [
'GL_KHR_texture_compression_astc_ldr',
'GL_KHR_blend_equation_advanced',
'GL_KHR_blend_equation_advanced_coherent',
'GL_KHR_debug',
'GL_EXT_robustness',
'GL_KHR_robustness',
'GL_KHR_no_error',
'GL_KHR_parallel_shader_compile',
'GL_EXT_bgra',
'GL_EXT_geometry_point_size',
'GL_EXT_tessellation_shader',
'GL_EXT_geometry_shader',
'GL_EXT_texture_buffer',
'GL_EXT_texture_filter_anisotropic',
'GL_EXT_texture_cube_map_array',
'GL_EXT_texture_snorm',
'GL_EXT_primitive_bounding_box',
'GL_EXT_texture_compression_s3tc',
'GL_EXT_texture_type_2_10_10_10_REV',
'GL_EXT_copy_image',
'GL_EXT_depth_bounds_test',
'GL_EXT_direct_state_access',
'GL_EXT_draw_buffers_indexed',
'GL_EXT_draw_elements_base_vertex',
'GL_EXT_direct_state_access',
'GL_EXT_read_format_bgra',
'GL_EXT_texture_storage',
'GL_EXT_texture_sRGB_decode',
'GL_EXT_texture_border_clamp',
'GL_EXT_texture_sRGB_R8',
'GL_EXT_texture_sRGB_RG8',
'GL_EXT_debug_marker',
'GL_EXT_polygon_offset_clamp',
'GL_IMG_texture_compression_pvrtc',
'GL_OES_EGL_image',
'GL_OES_EGL_image_external',
'GL_OES_compressed_ETC1_RGB8_texture',
'GL_OES_compressed_paletted_texture',
'GL_OES_required_internalformat',
'GL_OES_packed_depth_stencil',
'GL_OES_texture_3D',
'GL_OES_texture_half_float',
'GL_OES_texture_storage_multisample_2d_array',
'GL_OES_sample_shading',
'GL_OES_standard_derivatives',
'GL_OES_stencil1',
'GL_OES_stencil4',
'GL_OES_surfaceless_context',
'GL_OES_mapbuffer',
'GL_OES_vertex_array_object',
'GL_OES_viewport_array',
'GL_ARB_clip_control',
'GL_ARB_buffer_storage',
'GL_ARB_compute_shader',
'GL_ARB_draw_indirect',
'GL_ARB_draw_instanced',
'GL_ARB_draw_elements_base_vertex',
'GL_ARB_direct_state_access',
'GL_ARB_get_program_binary',
'GL_ARB_gl_spirv',
'GL_ARB_indirect_parameters',
'GL_ARB_internalformat_query',
'GL_ARB_instanced_arrays',
'GL_ARB_multi_draw_indirect',
'GL_ARB_parallel_shader_compile',
'GL_ARB_program_interface_query',
'GL_ARB_separate_shader_objects',
'GL_ARB_shader_ballot',
'GL_ARB_shader_image_load_store',
'GL_ARB_shader_viewport_layer_array',
'GL_ARB_sparse_buffer',
'GL_ARB_sparse_texture',
'GL_ARB_spirv_extensions',
'GL_ARB_tessellation_shader',
'GL_ARB_texture_barrier',
'GL_ARB_texture_filter_minmax',
'GL_ARB_texture_gather',
'GL_ARB_texture_storage',
'GL_ARB_texture_storage_multisample',
'GL_ARB_texture_multisample',
'GL_ARB_texture_view',
'GL_ARB_transform_feedback2',
'GL_ARB_transform_feedback3',
'GL_ARB_transform_feedback_instanced',
'GL_ARB_transform_feedback_overflow_query',
'GL_ARB_vertex_array_bgra',
'GL_ARB_vertex_attrib_64bit',
'GL_ARB_vertex_attrib_binding',
'GL_NV_deep_texture3D',
'GL_NV_gpu_multicast',
'GL_NV_internalformat_sample_query',
'GL_NVX_cross_process_interop',
'GL_OES_draw_elements_base_vertex',
'GL_OVR_multiview',
'GL_OVR_multiview_multisampled_render_to_texture',
]
def getGLRegistry ():
return khr_util.registry_cache.getRegistry(GL_SOURCE)
def getHybridInterface (stripAliasedExtCommands = True):
# This is a bit awkward, since we have to create a strange hybrid
# interface that includes both GL and ES features and extensions.
registry = getGLRegistry()
glFeatures = registry.getFeatures('gl')
esFeatures = registry.getFeatures('gles2')
spec = khr_util.registry.InterfaceSpec()
for feature in registry.getFeatures('gl'):
spec.addFeature(feature, 'gl', 'core')
for feature in registry.getFeatures('gles2'):
spec.addFeature(feature, 'gles2')
for extName in EXTENSIONS:
extension = registry.extensions[extName]
# Add all extensions using the ES2 api, but force even non-ES2
# extensions to be included.
spec.addExtension(extension, 'gles2', 'core', force=True)
iface = khr_util.registry.createInterface(registry, spec, 'gles2')
if stripAliasedExtCommands:
# Remove redundant extension commands that are already provided by core.
strippedCmds = []
for command in iface.commands:
if command.alias == None:
strippedCmds.append(command)
iface.commands = strippedCmds
return iface
def getInterface (registry, api, version=None, profile=None, **kwargs):
spec = khr_util.registry.spec(registry, api, version, profile, **kwargs)
if api == 'gl' and profile == 'core' and version < "3.2":
gl32 = registry.features['GL_VERSION_3_2']
for eRemove in gl32.xpath('remove'):
spec.addComponent(eRemove)
return khr_util.registry.createInterface(registry, spec, api)
def getVersionToken (api, version):
prefixes = { 'gles2': "ES", 'gl': "GL" }
return prefixes[api] + version.replace(".", "")
def genCommandList(iface, renderCommand, directory, filename, align=False):
lines = map(renderCommand, iface.commands)
lines = filter(lambda l: l != None, lines)
if align:
lines = indentLines(lines)
writeInlFile(os.path.join(directory, filename), lines)
def genCommandLists(registry, renderCommand, check, directory, filePattern, align=False):
for eFeature in registry.features:
api = eFeature.get('api')
version = eFeature.get('number')
profile = check(api, version)
if profile is True:
profile = None
elif profile is False:
continue
iface = getInterface(registry, api, version=version, profile=profile)
filename = filePattern % getVersionToken(api, version)
genCommandList(iface, renderCommand, directory, filename, align)
def getFunctionTypeName (funcName):
return "%sFunc" % funcName
def getFunctionMemberName (funcName):
assert funcName[:2] == "gl"
if funcName[:5] == "glEGL":
# Otherwise we end up with gl.eGLImage...
return "egl%s" % funcName[5:]
else:
return "%c%s" % (funcName[2].lower(), funcName[3:])
INL_HEADER = khr_util.format.genInlHeader("Khronos GL API description (gl.xml)", GL_SOURCE.getRevision())
def writeInlFile (filename, source):
khr_util.format.writeInlFile(filename, INL_HEADER, source)
# Aliases from khr_util.common
indentLines = khr_util.format.indentLines
normalizeConstant = khr_util.format.normalizeConstant
commandParams = khr_util.format.commandParams
commandArgs = khr_util.format.commandArgs
| 32.956897
| 105
| 0.760005
|
c926c99a999bb88388c77d859e54c8973417b9db
| 22,087
|
py
|
Python
|
youtube_dl/extractor/twitch.py
|
aalvarito68/https-github.com-rg3-youtube-dl
|
dfc80bdd2e4ef3d30f161a93f99f3050537944ab
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/twitch.py
|
aalvarito68/https-github.com-rg3-youtube-dl
|
dfc80bdd2e4ef3d30f161a93f99f3050537944ab
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/twitch.py
|
aalvarito68/https-github.com-rg3-youtube-dl
|
dfc80bdd2e4ef3d30f161a93f99f3050537944ab
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import random
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
js_to_json,
orderedSet,
parse_duration,
parse_iso8601,
update_url_query,
urlencode_postdata,
urljoin,
)
class TwitchBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:(?:www|go)\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'https://usher.ttvnw.net'
_LOGIN_URL = 'https://www.twitch.tv/login'
_CLIENT_ID = 'jzkbprff40iqj646a697cyrvl0zt2m6'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _call_api(self, path, item_id, note):
response = self._download_json(
'%s/%s' % (self._API_BASE, path), item_id, note,
headers={'Client-ID': self._CLIENT_ID})
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
def fail(message):
raise ExtractorError(
'Unable to login. Twitch said: %s' % message, expected=True)
def login_step(page, urlh, note, data):
form = self._hidden_inputs(page)
form.update(data)
page_url = urlh.geturl()
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page,
'post url', default=page_url, group='url')
post_url = urljoin(page_url, post_url)
headers = {'Referer': page_url}
try:
response = self._download_json(
post_url, None, note,
data=urlencode_postdata(form),
headers=headers)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
response = self._parse_json(
e.cause.read().decode('utf-8'), None)
fail(response['message'])
raise
redirect_url = urljoin(post_url, response['redirect'])
return self._download_webpage_handle(
redirect_url, None, 'Downloading login redirect page',
headers=headers)
login_page, handle = self._download_webpage_handle(
self._LOGIN_URL, None, 'Downloading login page')
# Some TOR nodes and public proxies are blocked completely
if 'blacklist_message' in login_page:
fail(clean_html(login_page))
redirect_page, handle = login_step(
login_page, handle, 'Logging in as %s' % username, {
'username': username,
'password': password,
})
if re.search(r'(?i)<form[^>]+id="two-factor-submit"', redirect_page) is not None:
# TODO: Add mechanism to request an SMS or phone call
tfa_token = self._get_tfa_info('two-factor authentication token')
login_step(redirect_page, handle, 'Submitting TFA token', {
'authy_token': tfa_token,
'remember_2fa': 'true',
})
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
source['preference'] = 10
except StopIteration:
pass # No Source stream present
self._sort_formats(formats)
class TwitchItemBaseIE(TwitchBaseIE):
def _download_info(self, item, item_id):
return self._extract_info(self._call_api(
'kraken/videos/%s%s' % (item, item_id), item_id,
'Downloading %s info JSON' % self._ITEM_TYPE))
def _extract_media(self, item_id):
info = self._download_info(self._ITEM_SHORTCUT, item_id)
response = self._call_api(
'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id,
'Downloading %s playlist JSON' % self._ITEM_TYPE)
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
return {
'id': info['_id'],
'title': info.get('title') or 'Untitled Broadcast',
'description': info.get('description'),
'duration': int_or_none(info.get('length')),
'thumbnail': info.get('preview'),
'uploader': info.get('channel', {}).get('display_name'),
'uploader_id': info.get('channel', {}).get('name'),
'timestamp': parse_iso8601(info.get('recorded_at')),
'view_count': int_or_none(info.get('views')),
}
def _real_extract(self, url):
return self._extract_media(self._match_id(url))
class TwitchVideoIE(TwitchItemBaseIE):
IE_NAME = 'twitch:video'
_VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'video'
_ITEM_SHORTCUT = 'a'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
'skip': 'HTTP Error 404: Not Found',
}
class TwitchChapterIE(TwitchItemBaseIE):
IE_NAME = 'twitch:chapter'
_VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'chapter'
_ITEM_SHORTCUT = 'c'
_TESTS = [{
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
'only_matching': True,
}]
class TwitchVodIE(TwitchItemBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:www|go)\.)?twitch\.tv/(?:[^/]+/v|videos)/|
player\.twitch\.tv/\?.*?\bvideo=v
)
(?P<id>\d+)
'''
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
_TESTS = [{
'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
'title': 'LCK Summer Split - Week 6 Day 1',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 17208,
'timestamp': 1435131709,
'upload_date': '20150624',
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
'start_time': 310,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# Untitled broadcast (title is None)
'url': 'http://www.twitch.tv/belkao_o/v/11230755',
'info_dict': {
'id': 'v11230755',
'ext': 'mp4',
'title': 'Untitled Broadcast',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1638,
'timestamp': 1439746708,
'upload_date': '20150816',
'uploader': 'BelkAO_o',
'uploader_id': 'belkao_o',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://player.twitch.tv/?t=5m10s&video=v6528877',
'only_matching': True,
}, {
'url': 'https://www.twitch.tv/videos/6528877',
'only_matching': True,
}]
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._call_api(
'api/vods/%s/access_token' % item_id, item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?%s' % (
self._USHER_BASE, item_id,
compat_urllib_parse_urlencode({
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'player': 'twitchweb',
'nauth': access_token['token'],
'nauthsig': access_token['sig'],
})),
item_id, 'mp4', entry_protocol='m3u8_native')
self._prefer_source(formats)
info['formats'] = formats
parsed_url = compat_urllib_parse_urlparse(url)
query = compat_parse_qs(parsed_url.query)
if 't' in query:
info['start_time'] = parse_duration(query['t'][0])
if info.get('timestamp') is not None:
info['subtitles'] = {
'rechat': [{
'url': update_url_query(
'https://rechat.twitch.tv/rechat-messages', {
'video_id': 'v%s' % item_id,
'start': info['timestamp'],
}),
'ext': 'json',
}],
}
return info
class TwitchPlaylistBaseIE(TwitchBaseIE):
_PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d'
_PAGE_LIMIT = 100
def _extract_playlist(self, channel_id):
info = self._call_api(
'kraken/channels/%s' % channel_id,
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
broken_paging_detected = False
counter_override = None
for counter in itertools.count(1):
response = self._call_api(
self._PLAYLIST_PATH % (channel_id, offset, limit),
channel_id,
'Downloading %s JSON page %s'
% (self._PLAYLIST_TYPE, counter_override or counter))
page_entries = self._extract_playlist_page(response)
if not page_entries:
break
total = int_or_none(response.get('_total'))
# Since the beginning of March 2016 twitch's paging mechanism
# is completely broken on the twitch side. It simply ignores
# a limit and returns the whole offset number of videos.
# Working around by just requesting all videos at once.
# Upd: pagination bug was fixed by twitch on 15.03.2016.
if not broken_paging_detected and total and len(page_entries) > limit:
self.report_warning(
'Twitch pagination is broken on twitch side, requesting all videos at once',
channel_id)
broken_paging_detected = True
offset = total
counter_override = '(all at once)'
continue
entries.extend(page_entries)
if broken_paging_detected or total and len(page_entries) >= total:
break
offset += limit
return self.playlist_result(
[self.url_result(entry) for entry in orderedSet(entries)],
channel_id, channel_name)
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class TwitchProfileIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:profile'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_TYPE = 'profile'
_TEST = {
'url': 'http://www.twitch.tv/vanillatv/profile',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}
class TwitchVideosBaseIE(TwitchPlaylistBaseIE):
_VALID_URL_VIDEOS_BASE = r'%s/(?P<id>[^/]+)/videos' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcast_type='
class TwitchAllVideosIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:all'
_VALID_URL = r'%s/all' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive,upload,highlight'
_PLAYLIST_TYPE = 'all videos'
_TEST = {
'url': 'https://www.twitch.tv/spamfish/videos/all',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 869,
}
class TwitchUploadsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:uploads'
_VALID_URL = r'%s/uploads' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'upload'
_PLAYLIST_TYPE = 'uploads'
_TEST = {
'url': 'https://www.twitch.tv/spamfish/videos/uploads',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 0,
}
class TwitchPastBroadcastsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:past-broadcasts'
_VALID_URL = r'%s/past-broadcasts' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive'
_PLAYLIST_TYPE = 'past broadcasts'
_TEST = {
'url': 'https://www.twitch.tv/spamfish/videos/past-broadcasts',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 0,
}
class TwitchHighlightsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:highlights'
_VALID_URL = r'%s/highlights' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'highlight'
_PLAYLIST_TYPE = 'highlights'
_TEST = {
'url': 'https://www.twitch.tv/spamfish/videos/highlights',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 805,
}
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:www|go)\.)?twitch\.tv/|
player\.twitch\.tv/\?.*?\bchannel=
)
(?P<id>[^/#?]+)
'''
_TESTS = [{
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
'display_id': 'shroomztv',
'ext': 'mp4',
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
'is_live': True,
'timestamp': 1421928037,
'upload_date': '20150122',
'uploader': 'ShroomzTV',
'uploader_id': 'shroomztv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.twitch.tv/miracle_doto#profile-0',
'only_matching': True,
}, {
'url': 'https://player.twitch.tv/?channel=lotsofs',
'only_matching': True,
}, {
'url': 'https://go.twitch.tv/food',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False
if any(ie.suitable(url) for ie in (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchAllVideosIE,
TwitchUploadsIE,
TwitchPastBroadcastsIE,
TwitchHighlightsIE))
else super(TwitchStreamIE, cls).suitable(url))
def _real_extract(self, url):
channel_id = self._match_id(url)
stream = self._call_api(
'kraken/streams/%s?stream_type=all' % channel_id, channel_id,
'Downloading stream JSON').get('stream')
if not stream:
raise ExtractorError('%s is offline' % channel_id, expected=True)
# Channel name may be typed if different case than the original channel name
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
# an invalid m3u8 URL. Working around by use of original channel name from stream
# JSON and fallback to lowercase if it's not available.
channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
access_token = self._call_api(
'api/channels/%s/access_token' % channel_id, channel_id,
'Downloading channel access token')
query = {
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
'token': access_token['token'].encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
% (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
view_count = stream.get('viewers')
timestamp = parse_iso8601(stream.get('created_at'))
channel = stream['channel']
title = self._live_title(channel.get('display_name') or channel.get('name'))
description = channel.get('status')
thumbnails = []
for thumbnail_key, thumbnail_url in stream['preview'].items():
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
if not m:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return {
'id': compat_str(stream['_id']),
'display_id': channel_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'uploader': channel.get('display_name'),
'uploader_id': channel.get('name'),
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
'is_live': True,
}
class TwitchClipsIE(InfoExtractor):
IE_NAME = 'twitch:clips'
_VALID_URL = r'https?://clips\.twitch\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://clips.twitch.tv/ea/AggressiveCobraPoooound',
'md5': '761769e1eafce0ffebfb4089cb3847cd',
'info_dict': {
'id': 'AggressiveCobraPoooound',
'ext': 'mp4',
'title': 'EA Play 2016 Live from the Novo Theatre',
'thumbnail': r're:^https?://.*\.jpg',
'creator': 'EA',
'uploader': 'stereotype_',
'uploader_id': 'stereotype_',
},
}, {
# multiple formats
'url': 'https://clips.twitch.tv/rflegendary/UninterestedBeeDAESuppy',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
clip = self._parse_json(
self._search_regex(
r'(?s)clipInfo\s*=\s*({.+?});', webpage, 'clip info'),
video_id, transform_source=js_to_json)
title = clip.get('channel_title') or self._og_search_title(webpage)
formats = [{
'url': option['source'],
'format_id': option.get('quality'),
'height': int_or_none(option.get('quality')),
} for option in clip.get('quality_options', []) if option.get('source')]
if not formats:
formats = [{
'url': clip['clip_video_url'],
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'creator': clip.get('broadcaster_display_name') or clip.get('broadcaster_login'),
'uploader': clip.get('curator_login'),
'uploader_id': clip.get('curator_display_name'),
'formats': formats,
}
| 34.727987
| 101
| 0.543759
|
97e21dd1bccdee7f36bbd780487aa19ea7819d01
| 4,710
|
py
|
Python
|
AWS/check_reserved_instances_ec2.py
|
borkit/scriptdump
|
86c622e7a5ce48c75efa9eceeac58b8c69e24973
|
[
"MIT"
] | 4
|
2019-02-01T22:50:24.000Z
|
2020-10-22T15:50:04.000Z
|
AWS/check_reserved_instances_ec2.py
|
borkit/scriptdump
|
86c622e7a5ce48c75efa9eceeac58b8c69e24973
|
[
"MIT"
] | null | null | null |
AWS/check_reserved_instances_ec2.py
|
borkit/scriptdump
|
86c622e7a5ce48c75efa9eceeac58b8c69e24973
|
[
"MIT"
] | 2
|
2019-01-27T09:30:56.000Z
|
2020-10-22T15:50:28.000Z
|
#!/usr/bin/python
import boto3
import logging
import argparse
from pprint import pformat,pprint
parser = argparse.ArgumentParser(description='Cross reference existing ec2 reservations to current instances.')
parser.add_argument('--log', default="WARN", help='Change log level (default: WARN)')
parser.add_argument('--region', default='us-east-1', help='AWS Region to connect to')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log))
logger = logging.getLogger('ec2-check')
# Dump some environment details
logger.debug("boto version = %s", boto3.__version__)
ec2_client = boto3.client('ec2',
region_name=args.region)
ec2_instances = ec2_client.describe_instances()
running_ec2_instances = {}
for reservation in ec2_instances['Reservations']:
for instance in reservation['Instances']:
if instance['State']['Name'] != "running":
logger.debug("Disqualifying instance %s: not running\n" % ( instance['InstanceId'] ) )
elif "InstanceLifecycle" in instance:
if instance['InstanceLifecycle'] == "spot":
logger.debug("Disqualifying instance %s: spot\n" % ( instance['InstanceId'] ) )
else:
az = instance['Placement']['AvailabilityZone']
instance_type = instance['InstanceType']
logger.debug("Running instance: %s"% (instance))
if "VpcId" in instance:
location = 'vpc'
else:
location = 'ec2'
if "Platform" in instance:
platform = instance['Platform']
else:
platform = 'linux'
running_ec2_instances[ (instance_type, az, platform, location ) ] = running_ec2_instances.get( (instance_type, az, platform, location ) , 0 ) + 1
logger.debug("FOO -- Running instances: %s"% pformat(running_ec2_instances))
ec2_reservations = ec2_client.describe_reserved_instances()
ec2_reserved_instances = {}
ec2_reserved_instances_ids = {}
for ri in ec2_reservations['ReservedInstances']:
if ri['State'] != "active":
logger.debug("Excluding reserved instances %s: no longer active\n" % (ri['ReservedInstancesId']))
else:
if ri['Scope'] != "Region":
az = ri['AvailabilityZone']
else:
az = 'Region'
instance_type = ri['InstanceType']
logger.debug("Reserved instance: %s" % (ri))
description = ri['ProductDescription']
if "Windows" in description:
platform = 'windows'
else:
platform = 'linux'
if "VPC" in description:
location = 'vpc'
else:
location = 'ec2'
instance_signature = (instance_type, az, platform, location)
ec2_reserved_instances[instance_signature] = ec2_reserved_instances.get(instance_signature,
0) + ri['InstanceCount']
if instance_signature not in ec2_reserved_instances_ids:
# print "Resetting instance_signature: (%s)" % (instance_signature)
ec2_reserved_instances_ids[instance_signature] = []
logger.debug("inserting reserved_instance_id (%s) into list (%s)" % (
instance_signature, ec2_reserved_instances_ids[instance_signature]))
ec2_reserved_instances_ids[instance_signature].append(ri['ReservedInstancesId'])
logger.debug("Reserved instances: %s" % pformat(ec2_reserved_instances))
for running_instance in running_ec2_instances:
for _ in range(running_ec2_instances[running_instance]):
if running_instance in ec2_reserved_instances:
if ec2_reserved_instances[running_instance] >= 2:
ec2_reserved_instances[running_instance] -= 1
else:
ec2_reserved_instances.pop(running_instance)
logger.debug("Instance is not reserved")
else:
regional_running_reservation = list(running_instance)
regional_running_reservation[1] = 'Region'
regional_running_reservation_tuple = tuple(regional_running_reservation)
if regional_running_reservation_tuple in ec2_reserved_instances:
if ec2_reserved_instances[regional_running_reservation_tuple] >= 2:
ec2_reserved_instances[regional_running_reservation_tuple] -= 1
else:
ec2_reserved_instances.pop(regional_running_reservation_tuple)
logger.debug("Instance is not reserved")
print ("Unused reserved instances: %s" % pformat(ec2_reserved_instances))
| 45.728155
| 158
| 0.635881
|
406d75c1bbdd0a3a8947d69ef9f39f580ed500b2
| 2,666
|
py
|
Python
|
Pimoroni/sgp30_simpletest.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | 1
|
2021-03-22T18:38:43.000Z
|
2021-03-22T18:38:43.000Z
|
Pimoroni/sgp30_simpletest.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | null | null | null |
Pimoroni/sgp30_simpletest.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | 1
|
2021-02-06T10:07:36.000Z
|
2021-02-06T10:07:36.000Z
|
# sgp30_simpletest.py
# https://github.com/alexmrqt/micropython-sgp30
# adaption to the I2C bus of Raspberry Pi Pico by
# 2021-03-11 Claus Kühnel info@ckuehnel.ch
"""
Example for using the SGP30 with MicroPython and the Adafruit library.
Uses instructions from "SGP30 Driver Integration (for Software I²C)" to handle
self-calibration of the sensor:
- if no baseline found, wait 12h before storing baseline,
- if baseline found, store baseline every hour.
Baseline is writen in co2eq_baseline.txt and tvoc_baseline.txt.
Note that if the sensor is shut down during more than one week, then baselines
must be manually deleted.
"""
import time
from machine import Pin, I2C
import adafruit_sgp30
# Initialize I2C bus
sda=Pin(0)
scl=Pin(1)
i2c=I2C(0,sda=sda, scl=scl, freq=100000)
# Create library object on our I2C port
sgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)
print("SGP30 serial #", [hex(i) for i in sgp30.serial])
# Initialize SGP-30 internal drift compensation algorithm.
sgp30.iaq_init()
# Wait 15 seconds for the SGP30 to properly initialize
print("Waiting 15 seconds for SGP30 initialization.")
time.sleep(15)
# Retrieve previously stored baselines, if any (helps the compensation algorithm).
has_baseline = False
try:
f_co2 = open('co2eq_baseline.txt', 'r')
f_tvoc = open('tvoc_baseline.txt', 'r')
co2_baseline = int(f_co2.read())
tvoc_baseline = int(f_tvoc.read())
#Use them to calibrate the sensor
sgp30.set_iaq_baseline(co2_baseline, tvoc_baseline)
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('Impossible to read SGP30 baselines!')
#Store the time at which last baseline has been saved
baseline_time = time.time()
while True:
co2eq, tvoc = sgp30.iaq_measure()
print('co2eq = ' + str(co2eq) + ' ppm \t tvoc = ' + str(tvoc) + ' ppb')
# Baselines should be saved after 12 hour the first timen then every hour,
# according to the doc.
if (has_baseline and (time.time() - baseline_time >= 3600)) \
or ((not has_baseline) and (time.time() - baseline_time >= 43200)):
print('Saving baseline!')
baseline_time = time.time()
try:
f_co2 = open('co2eq_baseline.txt', 'w')
f_tvoc = open('tvoc_baseline.txt', 'w')
bl_co2, bl_tvoc = sgp30.get_iaq_baseline()
f_co2.write(str(bl_co2))
f_tvoc.write(str(bl_tvoc))
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('Impossible to write SGP30 baselines!')
#A measurement should be done every 60 seconds, according to the doc.
time.sleep(1)
| 28.666667
| 82
| 0.685296
|
3a3bc6214834229cbe6bae6f721f8789dbc17f85
| 6,866
|
py
|
Python
|
neutron/db/extraroute_db.py
|
iiscsahoo/Neutron
|
2bf91b2f1036e62a6d76bb0f7a82304f15dce1f3
|
[
"Apache-2.0"
] | null | null | null |
neutron/db/extraroute_db.py
|
iiscsahoo/Neutron
|
2bf91b2f1036e62a6d76bb0f7a82304f15dce1f3
|
[
"Apache-2.0"
] | null | null | null |
neutron/db/extraroute_db.py
|
iiscsahoo/Neutron
|
2bf91b2f1036e62a6d76bb0f7a82304f15dce1f3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import utils
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.objects import router as l3_obj
LOG = logging.getLogger(__name__)
extra_route_opts = [
#TODO(nati): use quota framework when it support quota for attributes
cfg.IntOpt('max_routes', default=30,
help=_("Maximum number of routes per router")),
]
cfg.CONF.register_opts(extra_route_opts)
class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
"""Mixin class to support extra route configuration on router."""
def _extend_router_dict_extraroute(self, router_res, router_db):
router_res['routes'] = (ExtraRoute_dbonly_mixin.
_make_extra_route_list(
router_db['route_list']
))
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, ['_extend_router_dict_extraroute'])
def update_router(self, context, id, router):
r = router['router']
with context.session.begin(subtransactions=True):
#check if route exists and have permission to access
router_db = self._get_router(context, id)
if 'routes' in r:
self._update_extra_routes(context, router_db, r['routes'])
routes = self._get_extra_routes_by_router_id(context, id)
router_updated = super(ExtraRoute_dbonly_mixin, self).update_router(
context, id, router)
router_updated['routes'] = routes
return router_updated
def _get_subnets_by_cidr(self, context, cidr):
query_subnets = context.session.query(models_v2.Subnet)
return query_subnets.filter_by(cidr=cidr).all()
def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop):
#Note(nati): Nexthop should be connected,
# so we need to check
# nexthop belongs to one of cidrs of the router ports
if not netaddr.all_matching_cidrs(nexthop, cidrs):
raise extraroute.InvalidRoutes(
routes=routes,
reason=_('the nexthop is not connected with router'))
#Note(nati) nexthop should not be same as fixed_ips
if nexthop in ips:
raise extraroute.InvalidRoutes(
routes=routes,
reason=_('the nexthop is used by router'))
def _validate_routes(self, context,
router_id, routes):
if len(routes) > cfg.CONF.max_routes:
raise extraroute.RoutesExhausted(
router_id=router_id,
quota=cfg.CONF.max_routes)
context = context.elevated()
filters = {'device_id': [router_id]}
ports = self._core_plugin.get_ports(context, filters)
cidrs = []
ips = []
for port in ports:
for ip in port['fixed_ips']:
cidrs.append(self._core_plugin.get_subnet(
context, ip['subnet_id'])['cidr'])
ips.append(ip['ip_address'])
for route in routes:
self._validate_routes_nexthop(
cidrs, ips, routes, route['nexthop'])
def _update_extra_routes(self, context, router, routes):
self._validate_routes(context, router['id'],
routes)
old_routes, routes_dict = self._get_extra_routes_dict_by_router_id(
context, router['id'])
added, removed = helpers.diff_list_of_dict(old_routes,
routes)
LOG.debug('Added routes are %s', added)
for route in added:
l3_obj.RouterRoute(
context,
router_id=router['id'],
destination=utils.AuthenticIPNetwork(route['destination']),
nexthop=netaddr.IPAddress(route['nexthop'])).create()
LOG.debug('Removed routes are %s', removed)
for route in removed:
l3_obj.RouterRoute.get_object(
context,
router_id=router['id'],
destination=route['destination'],
nexthop=route['nexthop']).delete()
@staticmethod
def _make_extra_route_list(extra_routes):
# NOTE(yamamoto): the extra_routes argument is either object or db row
return [{'destination': str(route['destination']),
'nexthop': str(route['nexthop'])}
for route in extra_routes]
def _get_extra_routes_by_router_id(self, context, id):
router_objs = l3_obj.RouterRoute.get_objects(context, router_id=id)
return self._make_extra_route_list(router_objs)
def _get_extra_routes_dict_by_router_id(self, context, id):
router_objs = l3_obj.RouterRoute.get_objects(context, router_id=id)
routes = []
routes_dict = {}
for route in router_objs:
routes.append({'destination': route.destination,
'nexthop': route.nexthop})
routes_dict[(route.destination, route.nexthop)] = route
return routes, routes_dict
def _confirm_router_interface_not_in_use(self, context, router_id,
subnet_id):
super(ExtraRoute_dbonly_mixin,
self)._confirm_router_interface_not_in_use(
context, router_id, subnet_id)
subnet = self._core_plugin.get_subnet(context, subnet_id)
subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
extra_routes = self._get_extra_routes_by_router_id(context, router_id)
for route in extra_routes:
if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]):
raise extraroute.RouterInterfaceInUseByRoute(
router_id=router_id, subnet_id=subnet_id)
class ExtraRoute_db_mixin(ExtraRoute_dbonly_mixin, l3_db.L3_NAT_db_mixin):
"""Mixin class to support extra route configuration on router with rpc."""
pass
| 40.869048
| 78
| 0.641421
|
8091c0a59396a0e5bdc8ee46c4541e81f14adcf9
| 61
|
py
|
Python
|
speedtest_servers/extensions.py
|
Nearata/speedtest-servers
|
21f835be724afffc47b7f3eb1c5bda909063aa15
|
[
"MIT"
] | null | null | null |
speedtest_servers/extensions.py
|
Nearata/speedtest-servers
|
21f835be724afffc47b7f3eb1c5bda909063aa15
|
[
"MIT"
] | null | null | null |
speedtest_servers/extensions.py
|
Nearata/speedtest-servers
|
21f835be724afffc47b7f3eb1c5bda909063aa15
|
[
"MIT"
] | null | null | null |
from flask_assets import Environment
assets = Environment()
| 15.25
| 36
| 0.819672
|
70e99c893b0158fde73adf1d68be485ac035ac1a
| 25,407
|
gyp
|
Python
|
gpu/gpu.gyp
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
gpu/gpu.gyp
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
gpu/gpu.gyp
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2020-04-04T13:34:56.000Z
|
2020-11-04T07:17:52.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'nacl_win64_target': 0,
},
'includes': [
'gpu_common.gypi',
],
'targets': [
{
# Library emulates GLES2 using command_buffers.
# GN version: //gpu/command_buffer/client:gles2_implementation
'target_name': 'gles2_implementation',
'type': '<(component)',
'dependencies': [
'../base/base.gyp:base',
'../third_party/khronos/khronos.gyp:khronos_headers',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gl/gl.gyp:gl',
'command_buffer/command_buffer.gyp:gles2_utils',
'gles2_cmd_helper',
],
'defines': [
'GLES2_IMPL_IMPLEMENTATION',
],
'sources': [
'<@(gles2_implementation_source_files)',
],
'includes': [
# Disable LTO due to ELF section name out of range
# crbug.com/422251
'../build/android/disable_lto.gypi',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# GN version: //gpu/command_buffer/client:gl_in_process_context
'target_name': 'gl_in_process_context',
'type': '<(component)',
'dependencies': [
'command_buffer/command_buffer.gyp:gles2_utils',
'gles2_implementation',
'gpu',
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gl/gl.gyp:gl',
],
'defines': [
'GL_IN_PROCESS_CONTEXT_IMPLEMENTATION',
],
'sources': [
'command_buffer/client/gl_in_process_context.cc',
'command_buffer/client/gl_in_process_context.h',
'command_buffer/client/gl_in_process_context_export.h',
],
},
{
# Library emulates GLES2 using command_buffers.
'target_name': 'gles2_implementation_no_check',
'type': '<(component)',
'defines': [
'GLES2_IMPL_IMPLEMENTATION',
'GLES2_CONFORMANCE_TESTS=1',
],
'dependencies': [
'../base/base.gyp:base',
'../third_party/khronos/khronos.gyp:khronos_headers',
'../ui/gfx/gfx.gyp:gfx',
'../ui/gfx/gfx.gyp:gfx_geometry',
'command_buffer/command_buffer.gyp:gles2_utils',
'gles2_cmd_helper',
],
'sources': [
'<@(gles2_implementation_source_files)',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
# Stub to expose gles2_implemenation in C instead of C++.
# so GLES2 C programs can work with no changes.
# GN version: //gpu/command_buffer/client:gles2_c_lib
'target_name': 'gles2_c_lib',
'type': '<(component)',
'dependencies': [
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'command_buffer/command_buffer.gyp:gles2_utils',
'command_buffer_client',
],
'defines': [
'GLES2_C_LIB_IMPLEMENTATION',
],
'sources': [
'<@(gles2_c_lib_source_files)',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# Same as gles2_c_lib except with no parameter checking. Required for
# OpenGL ES 2.0 conformance tests.
'target_name': 'gles2_c_lib_nocheck',
'type': '<(component)',
'defines': [
'GLES2_C_LIB_IMPLEMENTATION',
'GLES2_CONFORMANCE_TESTS=1',
],
'dependencies': [
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'command_buffer/command_buffer.gyp:gles2_utils',
'command_buffer_client',
'gles2_implementation_no_check',
],
'sources': [
'<@(gles2_c_lib_source_files)',
],
},
{
# GN version: //gpu:angle_unittests
# TODO(kbr): port this refactoring to the GN build.
'target_name': 'angle_unittests',
'type': '<(gtest_target_type)',
'includes': [
'../third_party/angle/build/common_defines.gypi',
'../third_party/angle/tests/angle_unittests.gypi',
],
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
],
'include_dirs': [
'..',
'../third_party/angle/include',
],
'sources':
[
'angle_unittest_main.cc',
],
},
{
# GN version: //gpu:gpu_unittests
'target_name': 'gpu_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'<(angle_path)/src/angle.gyp:translator',
'../ui/gl/gl.gyp:gl',
'../ui/gfx/gfx.gyp:gfx',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gfx/gfx.gyp:gfx_test_support',
'command_buffer/command_buffer.gyp:gles2_utils',
'command_buffer_client',
'command_buffer_common',
'command_buffer_service',
'gpu',
'gpu_unittest_utils',
'gles2_implementation',
'gles2_cmd_helper',
'gles2_c_lib',
],
'sources': [
# Note: sources list duplicated in GN build.
'command_buffer/client/buffer_tracker_unittest.cc',
'command_buffer/client/client_test_helper.cc',
'command_buffer/client/client_test_helper.h',
'command_buffer/client/cmd_buffer_helper_test.cc',
'command_buffer/client/fenced_allocator_test.cc',
'command_buffer/client/gles2_implementation_unittest.cc',
'command_buffer/client/mapped_memory_unittest.cc',
'command_buffer/client/program_info_manager_unittest.cc',
'command_buffer/client/query_tracker_unittest.cc',
'command_buffer/client/ring_buffer_test.cc',
'command_buffer/client/transfer_buffer_unittest.cc',
'command_buffer/client/vertex_array_object_manager_unittest.cc',
'command_buffer/common/bitfield_helpers_test.cc',
'command_buffer/common/command_buffer_mock.cc',
'command_buffer/common/command_buffer_mock.h',
'command_buffer/common/command_buffer_shared_test.cc',
'command_buffer/common/debug_marker_manager_unittest.cc',
'command_buffer/common/gles2_cmd_format_test.cc',
'command_buffer/common/gles2_cmd_format_test_autogen.h',
'command_buffer/common/gles2_cmd_utils_unittest.cc',
'command_buffer/common/id_allocator_test.cc',
'command_buffer/common/trace_event.h',
'command_buffer/common/unittest_main.cc',
'command_buffer/service/async_pixel_transfer_delegate_mock.cc',
'command_buffer/service/async_pixel_transfer_delegate_mock.h',
'command_buffer/service/async_pixel_transfer_manager_mock.cc',
'command_buffer/service/async_pixel_transfer_manager_mock.h',
'command_buffer/service/buffer_manager_unittest.cc',
'command_buffer/service/cmd_parser_test.cc',
'command_buffer/service/command_buffer_service_unittest.cc',
'command_buffer/service/common_decoder_unittest.cc',
'command_buffer/service/context_group_unittest.cc',
'command_buffer/service/feature_info_unittest.cc',
'command_buffer/service/framebuffer_manager_unittest.cc',
'command_buffer/service/gl_surface_mock.cc',
'command_buffer/service/gl_surface_mock.h',
'command_buffer/service/gles2_cmd_decoder_unittest.cc',
'command_buffer/service/gles2_cmd_decoder_unittest.h',
'command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h',
'command_buffer/service/gles2_cmd_decoder_unittest_1.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h',
'command_buffer/service/gles2_cmd_decoder_unittest_2.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h',
'command_buffer/service/gles2_cmd_decoder_unittest_3.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h',
'command_buffer/service/gles2_cmd_decoder_unittest_async_pixel.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_base.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_base.h',
'command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h',
'command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_programs.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_textures.cc',
'command_buffer/service/gles2_cmd_decoder_unittest_valuebuffer.cc',
'command_buffer/service/gpu_scheduler_unittest.cc',
'command_buffer/service/gpu_service_test.cc',
'command_buffer/service/gpu_service_test.h',
'command_buffer/service/gpu_tracer_unittest.cc',
'command_buffer/service/id_manager_unittest.cc',
'command_buffer/service/mailbox_manager_unittest.cc',
'command_buffer/service/memory_program_cache_unittest.cc',
'command_buffer/service/mocks.cc',
'command_buffer/service/mocks.h',
'command_buffer/service/program_cache_unittest.cc',
'command_buffer/service/program_manager_unittest.cc',
'command_buffer/service/query_manager_unittest.cc',
'command_buffer/service/renderbuffer_manager_unittest.cc',
'command_buffer/service/shader_manager_unittest.cc',
'command_buffer/service/shader_translator_cache_unittest.cc',
'command_buffer/service/shader_translator_unittest.cc',
'command_buffer/service/test_helper.cc',
'command_buffer/service/test_helper.h',
'command_buffer/service/texture_manager_unittest.cc',
'command_buffer/service/transfer_buffer_manager_unittest.cc',
'command_buffer/service/valuebuffer_manager_unittest.cc',
'command_buffer/service/vertex_array_manager_unittest.cc',
'command_buffer/service/vertex_attrib_manager_unittest.cc',
'config/gpu_blacklist_unittest.cc',
'config/gpu_control_list_entry_unittest.cc',
'config/gpu_control_list_number_info_unittest.cc',
'config/gpu_control_list_os_info_unittest.cc',
'config/gpu_control_list_unittest.cc',
'config/gpu_control_list_version_info_unittest.cc',
'config/gpu_driver_bug_list_unittest.cc',
'config/gpu_info_collector_unittest.cc',
'config/gpu_info_unittest.cc',
'config/gpu_test_config_unittest.cc',
'config/gpu_test_expectations_parser_unittest.cc',
'config/gpu_util_unittest.cc',
],
'conditions': [
['OS == "android"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
# See http://crbug.com/162998#c4 for why this is needed.
['OS=="linux" and use_allocator!="none"', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
# GN version: //gpu/gpu_perftests
'target_name': 'gpu_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
'../testing/perf/perf_test.gyp:perf_test',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gl/gl.gyp:gl',
'command_buffer_service',
],
'sources': [
'perftests/measurements.cc',
'perftests/run_all_tests.cc',
'perftests/texture_upload_perftest.cc',
],
},
{
# GN version: //gpu:gl_tests
'target_name': 'gl_tests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'<(angle_path)/src/angle.gyp:translator',
'../ui/gfx/gfx.gyp:gfx',
'../ui/gfx/gfx.gyp:gfx_test_support',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gl/gl.gyp:gl',
'command_buffer/command_buffer.gyp:gles2_utils',
'command_buffer_client',
'command_buffer_common',
'command_buffer_service',
'gpu',
'gpu_unittest_utils',
'gles2_implementation',
'gles2_cmd_helper',
'gles2_c_lib',
#'gl_unittests',
],
'defines': [
'GL_GLEXT_PROTOTYPES',
],
'sources': [
# Note: sources list duplicated in GN build.
'command_buffer/tests/compressed_texture_test.cc',
'command_buffer/tests/gl_bind_uniform_location_unittest.cc',
'command_buffer/tests/gl_chromium_framebuffer_multisample_unittest.cc',
'command_buffer/tests/gl_chromium_path_rendering_unittest.cc',
'command_buffer/tests/gl_clear_framebuffer_unittest.cc',
'command_buffer/tests/gl_copy_texture_CHROMIUM_unittest.cc',
'command_buffer/tests/gl_depth_texture_unittest.cc',
'command_buffer/tests/gl_gpu_memory_buffer_unittest.cc',
'command_buffer/tests/gl_lose_context_chromium_unittest.cc',
'command_buffer/tests/gl_manager.cc',
'command_buffer/tests/gl_manager.h',
'command_buffer/tests/gl_pointcoord_unittest.cc',
'command_buffer/tests/gl_program_unittest.cc',
'command_buffer/tests/gl_query_unittest.cc',
'command_buffer/tests/gl_readback_unittest.cc',
'command_buffer/tests/gl_shared_resources_unittest.cc',
'command_buffer/tests/gl_stream_draw_unittest.cc',
'command_buffer/tests/gl_test_utils.cc',
'command_buffer/tests/gl_test_utils.h',
'command_buffer/tests/gl_tests_main.cc',
'command_buffer/tests/gl_texture_mailbox_unittest.cc',
'command_buffer/tests/gl_texture_storage_unittest.cc',
'command_buffer/tests/gl_unittest.cc',
'command_buffer/tests/gl_unittests_android.cc',
'command_buffer/tests/gl_virtual_contexts_unittest.cc',
'command_buffer/tests/occlusion_query_unittest.cc',
],
'conditions': [
['OS == "android"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
['OS == "win"', {
'dependencies': [
'../third_party/angle/src/angle.gyp:libEGL',
'../third_party/angle/src/angle.gyp:libGLESv2',
],
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
# GN version: //gpu:test_support
'target_name': 'gpu_unittest_utils',
'type': 'static_library',
'dependencies': [
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/khronos/khronos.gyp:khronos_headers',
'../ui/gl/gl.gyp:gl_unittest_utils',
'gpu',
],
'include_dirs': [
'..',
],
'sources': [
'command_buffer/client/gles2_interface_stub.cc',
'command_buffer/client/gles2_interface_stub.h',
'command_buffer/service/error_state_mock.cc',
'command_buffer/service/gles2_cmd_decoder_mock.cc',
],
},
],
'conditions': [
['component=="static_library"', {
'targets': [
{
# GN version: //gpu/command_buffer/service:disk_cache_proto
'target_name': 'disk_cache_proto',
'type': 'static_library',
'sources': [ 'command_buffer/service/disk_cache_proto.proto' ],
'variables': {
'proto_in_dir': 'command_buffer/service',
'proto_out_dir': 'gpu/command_buffer/service',
},
'includes': [ '../build/protoc.gypi' ],
},
{
# GN version: //gpu
'target_name': 'gpu',
'type': 'none',
'dependencies': [
'command_buffer_client',
'command_buffer_common',
'command_buffer_service',
'gles2_cmd_helper',
'gpu_config',
'gpu_ipc',
],
'sources': [
'gpu_export.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# GN version: //gpu/command_buffer/common
'target_name': 'command_buffer_common',
'type': 'static_library',
'includes': [
'command_buffer_common.gypi',
],
'dependencies': [
'../base/base.gyp:base',
'command_buffer/command_buffer.gyp:gles2_utils',
],
'export_dependent_settings': [
'../base/base.gyp:base',
],
},
{
# Library helps make GLES2 command buffers.
# GN version: //gpu/command_buffer/client:gles2_cmd_helper
'target_name': 'gles2_cmd_helper',
'type': 'static_library',
'includes': [
'gles2_cmd_helper.gypi',
],
'dependencies': [
'command_buffer_client',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# GN version: //gpu/command_buffer/client
'target_name': 'command_buffer_client',
'type': 'static_library',
'includes': [
'command_buffer_client.gypi',
],
'dependencies': [
'command_buffer_common',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# GN version: //gpu/command_buffer/service
'target_name': 'command_buffer_service',
'type': 'static_library',
'includes': [
'command_buffer_service.gypi',
'../build/android/increase_size_for_speed.gypi',
# Disable LTO due to ELF section name out of range
# crbug.com/422251
'../build/android/disable_lto.gypi',
],
'dependencies': [
'command_buffer_common',
'disk_cache_proto',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# GN version: //gpu/ipc
'target_name': 'gpu_ipc',
'type': 'static_library',
'includes': [
'gpu_ipc.gypi',
],
'dependencies': [
'command_buffer_common',
],
},
{
'target_name': 'gpu_config',
'type': 'static_library',
'includes': [
'gpu_config.gypi',
],
},
],
},
{ # component != static_library
'targets': [
{
# GN version: //gpu/command_buffer/service:disk_cache_proto
'target_name': 'disk_cache_proto',
'type': 'static_library',
'sources': [ 'command_buffer/service/disk_cache_proto.proto' ],
'variables': {
'proto_in_dir': 'command_buffer/service',
'proto_out_dir': 'gpu/command_buffer/service',
},
'includes': [ '../build/protoc.gypi' ],
},
{
# GN version: //gpu
'target_name': 'gpu',
'type': 'shared_library',
'includes': [
'command_buffer_client.gypi',
'command_buffer_common.gypi',
'command_buffer_service.gypi',
'gles2_cmd_helper.gypi',
'gpu_config.gypi',
'gpu_ipc.gypi',
'../build/android/increase_size_for_speed.gypi',
],
'defines': [
'GPU_IMPLEMENTATION',
],
'sources': [
'gpu_export.h',
],
'dependencies': [
'../base/base.gyp:base',
'command_buffer/command_buffer.gyp:gles2_utils',
'disk_cache_proto',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# GN version: //gpu/command_buffer/common
'target_name': 'command_buffer_common',
'type': 'none',
'dependencies': [
'gpu',
],
},
{
# Library helps make GLES2 command buffers.
# GN version: //gpu/command_buffer/client:gles2_cmd_helper
'target_name': 'gles2_cmd_helper',
'type': 'none',
'dependencies': [
'gpu',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
# GN version: //gpu/command_buffer/client
'target_name': 'command_buffer_client',
'type': 'none',
'dependencies': [
'gpu',
],
},
{
# GN version: //gpu/command_buffer/service
'target_name': 'command_buffer_service',
'type': 'none',
'dependencies': [
'gpu',
],
},
{
# GN version: //gpu/ipc
'target_name': 'gpu_ipc',
'type': 'none',
'dependencies': [
'gpu',
],
},
],
}],
['disable_nacl!=1 and OS=="win" and target_arch=="ia32"', {
'targets': [
{
'target_name': 'command_buffer_common_win64',
'type': 'static_library',
'variables': {
'nacl_win64_target': 1,
},
'includes': [
'command_buffer_common.gypi',
],
'dependencies': [
'../base/base.gyp:base_win64',
],
'defines': [
'<@(nacl_win64_defines)',
'GPU_IMPLEMENTATION',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
},
{
'target_name': 'gpu_ipc_win64',
'type': 'static_library',
'variables': {
'nacl_win64_target': 1,
},
'includes': [
'gpu_ipc.gypi',
],
'dependencies': [
'../base/base.gyp:base_win64',
'../ipc/ipc.gyp:ipc_win64',
'command_buffer_common_win64',
],
'defines': [
'<@(nacl_win64_defines)',
'GPU_IMPLEMENTATION',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
},
],
}],
['OS == "android"', {
'targets': [
{
'target_name': 'gl_tests_apk',
'type': 'none',
'dependencies': [
'gl_tests',
],
'variables': {
'test_suite_name': 'gl_tests',
},
'includes': [
'../build/apk_test.gypi',
],
},
{
'target_name': 'gpu_unittests_apk',
'type': 'none',
'dependencies': [
'gpu_unittests',
],
'variables': {
'test_suite_name': 'gpu_unittests',
},
'includes': [ '../build/apk_test.gypi' ],
},
],
}],
['OS == "win"', {
'targets': [
{
# TODO(kbr): port this target to the GN build.
'target_name': 'angle_end2end_tests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
],
'includes':
[
'../third_party/angle/build/common_defines.gypi',
'../third_party/angle/tests/angle_end2end_tests.gypi',
],
'sources':
[
'angle_end2end_tests_main.cc',
],
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'gpu_unittests_run',
'type': 'none',
'dependencies': [
'gpu_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'gpu_unittests.isolate',
],
},
],
}],
],
}
| 35.584034
| 94
| 0.582399
|
535a378abb76d7d35bfbce9262745ab62b54ac5c
| 3,637
|
py
|
Python
|
openpeerpower/components/speedtestdotnet/config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
openpeerpower/components/speedtestdotnet/config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
openpeerpower/components/speedtestdotnet/config_flow.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Config flow for Speedtest.net."""
import voluptuous as vol
from openpeerpower import config_entries
from openpeerpower.const import CONF_MONITORED_CONDITIONS, CONF_SCAN_INTERVAL
from openpeerpower.core import callback
from . import server_id_valid
from .const import (
CONF_MANUAL,
CONF_SERVER_ID,
CONF_SERVER_NAME,
DEFAULT_NAME,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SERVER,
DOMAIN,
)
class SpeedTestFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Speedtest.net config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return SpeedTestOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self.async_show_form(step_id="user")
return self.async_create_entry(title=DEFAULT_NAME, data=user_input)
async def async_step_import(self, import_config):
"""Import from config."""
if (
CONF_SERVER_ID in import_config
and not await self.opp.async_add_executor_job(
server_id_valid, import_config[CONF_SERVER_ID]
)
):
return self.async_abort(reason="wrong_server_id")
import_config[CONF_SCAN_INTERVAL] = int(
import_config[CONF_SCAN_INTERVAL].total_seconds() / 60
)
import_config.pop(CONF_MONITORED_CONDITIONS)
return await self.async_step_user(user_input=import_config)
class SpeedTestOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle SpeedTest options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
self._servers = {}
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
server_name = user_input[CONF_SERVER_NAME]
if server_name != "*Auto Detect":
server_id = self._servers[server_name]["id"]
user_input[CONF_SERVER_ID] = server_id
else:
user_input[CONF_SERVER_ID] = None
return self.async_create_entry(title="", data=user_input)
self._servers = self.opp.data[DOMAIN].servers
server = []
if self.config_entry.options.get(
CONF_SERVER_ID
) and not self.config_entry.options.get(CONF_SERVER_NAME):
server = [
key
for (key, value) in self._servers.items()
if value.get("id") == self.config_entry.options[CONF_SERVER_ID]
]
server_name = server[0] if server else DEFAULT_SERVER
options = {
vol.Optional(
CONF_SERVER_NAME,
default=self.config_entry.options.get(CONF_SERVER_NAME, server_name),
): vol.In(self._servers.keys()),
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): int,
vol.Optional(
CONF_MANUAL, default=self.config_entry.options.get(CONF_MANUAL, False)
): bool,
}
return self.async_show_form(
step_id="init", data_schema=vol.Schema(options), errors=errors
)
| 32.185841
| 86
| 0.630465
|
78f1c3ca6a0e1b43b42bb7df860cb0277a492f62
| 1,334
|
py
|
Python
|
src/pykeen/datasets/db100k.py
|
nhutnamhcmus/pykeen
|
62d4f075fbd39135d6a5c8677d95e1ac46f8318f
|
[
"MIT"
] | 750
|
2020-06-23T08:36:34.000Z
|
2022-03-30T22:53:18.000Z
|
src/pykeen/datasets/db100k.py
|
nhutnamhcmus/pykeen
|
62d4f075fbd39135d6a5c8677d95e1ac46f8318f
|
[
"MIT"
] | 691
|
2020-06-22T11:56:28.000Z
|
2022-03-31T16:07:32.000Z
|
src/pykeen/datasets/db100k.py
|
nhutnamhcmus/pykeen
|
62d4f075fbd39135d6a5c8677d95e1ac46f8318f
|
[
"MIT"
] | 122
|
2020-06-26T14:37:56.000Z
|
2022-03-23T08:25:22.000Z
|
# -*- coding: utf-8 -*-
"""The DB100K dataset."""
from docdata import parse_docdata
from .base import UnpackedRemoteDataset
BASE_URL = "https://raw.githubusercontent.com/iieir-km/ComplEx-NNE_AER/master/datasets/DB100K"
__all__ = [
"DB100K",
]
@parse_docdata
class DB100K(UnpackedRemoteDataset):
"""The DB100K dataset from [ding2018]_.
---
name: DB100K
citation:
author: Ding
year: 2018
link: https://arxiv.org/abs/1805.02408
github: iieir-km/ComplEx-NNE_AER
statistics:
entities: 99604
relations: 470
training: 597482
testing: 50000
validation: 49997
triples: 697479
"""
def __init__(self, create_inverse_triples: bool = False, **kwargs):
"""Initialize the DB100K small dataset.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.UnpackedRemoteDataset`.
"""
super().__init__(
training_url=f"{BASE_URL}/_train.txt",
testing_url=f"{BASE_URL}/_test.txt",
validation_url=f"{BASE_URL}/_valid.txt",
create_inverse_triples=create_inverse_triples,
**kwargs,
)
if __name__ == "__main__":
DB100K.cli()
| 25.169811
| 103
| 0.636432
|
d751fb661305e11553f163240ea881d3254a5beb
| 4,422
|
py
|
Python
|
test/test_container_authenticator.py
|
IBM/python-sdk-core
|
8bbeca9480a022c9f2f752a2a3ca4e22779aebe9
|
[
"Apache-2.0"
] | 14
|
2019-03-13T19:13:48.000Z
|
2021-11-16T11:05:30.000Z
|
test/test_container_authenticator.py
|
IBM/python-sdk-core
|
8bbeca9480a022c9f2f752a2a3ca4e22779aebe9
|
[
"Apache-2.0"
] | 140
|
2019-03-14T19:49:40.000Z
|
2022-03-21T11:14:03.000Z
|
test/test_container_authenticator.py
|
IBM/python-sdk-core
|
8bbeca9480a022c9f2f752a2a3ca4e22779aebe9
|
[
"Apache-2.0"
] | 22
|
2019-05-08T14:50:08.000Z
|
2022-01-13T10:32:06.000Z
|
# pylint: disable=missing-docstring
import pytest
from ibm_cloud_sdk_core.authenticators import ContainerAuthenticator, Authenticator
def test_container_authenticator():
authenticator = ContainerAuthenticator(iam_profile_name='iam-user-123')
assert authenticator is not None
assert authenticator.authentication_type() == Authenticator.AUTHTYPE_CONTAINER
assert authenticator.token_manager.cr_token_filename is None
assert authenticator.token_manager.iam_profile_name == 'iam-user-123'
assert authenticator.token_manager.iam_profile_id is None
assert authenticator.token_manager.client_id is None
assert authenticator.token_manager.client_secret is None
assert authenticator.token_manager.disable_ssl_verification is False
assert authenticator.token_manager.headers is None
assert authenticator.token_manager.proxies is None
assert authenticator.token_manager.scope is None
authenticator.set_cr_token_filename('path/to/token')
assert authenticator.token_manager.cr_token_filename == 'path/to/token'
# Set the IAM profile to None to trigger a validation which will fail,
# because both of the profile and ID are None.
with pytest.raises(ValueError) as err:
authenticator.set_iam_profile_name(None)
assert str(
err.value) == 'At least one of iam_profile_name or iam_profile_id must be specified.'
authenticator.set_iam_profile_id('iam-id-123')
assert authenticator.token_manager.iam_profile_id == 'iam-id-123'
authenticator.set_iam_profile_name('iam-user-123')
assert authenticator.token_manager.iam_profile_name == 'iam-user-123'
authenticator.set_client_id_and_secret('tom', 'jerry')
assert authenticator.token_manager.client_id == 'tom'
assert authenticator.token_manager.client_secret == 'jerry'
authenticator.set_scope('scope1 scope2 scope3')
assert authenticator.token_manager.scope == 'scope1 scope2 scope3'
with pytest.raises(TypeError) as err:
authenticator.set_headers('dummy')
assert str(err.value) == 'headers must be a dictionary'
authenticator.set_headers({'dummy': 'headers'})
assert authenticator.token_manager.headers == {'dummy': 'headers'}
with pytest.raises(TypeError) as err:
authenticator.set_proxies('dummy')
assert str(err.value) == 'proxies must be a dictionary'
authenticator.set_proxies({'dummy': 'proxies'})
assert authenticator.token_manager.proxies == {'dummy': 'proxies'}
def test_disable_ssl_verification():
authenticator = ContainerAuthenticator(
iam_profile_name='iam-user-123', disable_ssl_verification=True)
assert authenticator.token_manager.disable_ssl_verification is True
authenticator.set_disable_ssl_verification(False)
assert authenticator.token_manager.disable_ssl_verification is False
def test_invalid_disable_ssl_verification_type():
with pytest.raises(TypeError) as err:
authenticator = ContainerAuthenticator(
iam_profile_name='iam-user-123', disable_ssl_verification='True')
assert str(err.value) == 'disable_ssl_verification must be a bool'
authenticator = ContainerAuthenticator(iam_profile_name='iam-user-123')
assert authenticator.token_manager.disable_ssl_verification is False
with pytest.raises(TypeError) as err:
authenticator.set_disable_ssl_verification('True')
assert str(err.value) == 'status must be a bool'
def test_container_authenticator_with_scope():
authenticator = ContainerAuthenticator(
iam_profile_name='iam-user-123', scope='scope1 scope2')
assert authenticator is not None
assert authenticator.token_manager.scope == 'scope1 scope2'
def test_authenticator_validate_failed():
with pytest.raises(ValueError) as err:
ContainerAuthenticator(None)
assert str(
err.value) == 'At least one of iam_profile_name or iam_profile_id must be specified.'
with pytest.raises(ValueError) as err:
ContainerAuthenticator(
iam_profile_name='iam-user-123', client_id='my_client_id')
assert str(
err.value) == 'Both client_id and client_secret should be initialized.'
with pytest.raises(ValueError) as err:
ContainerAuthenticator(
iam_profile_name='iam-user-123', client_secret='my_client_secret')
assert str(
err.value) == 'Both client_id and client_secret should be initialized.'
| 41.716981
| 93
| 0.758254
|
e823bd50d6fc17b28e60615fc95c33360669b6a8
| 16,805
|
py
|
Python
|
corehq/apps/reports/filters/users.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/reports/filters/users.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/reports/filters/users.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy, ugettext_noop
from memoized import memoized
from corehq.apps.commtrack.models import SQLLocation
from corehq.apps.domain.models import Domain
from corehq.apps.es import filters
from corehq.apps.es import users as user_es
from corehq.apps.groups.models import Group
from corehq.apps.locations.permissions import user_can_access_other_user
from corehq.apps.users.cases import get_wrapped_owner
from corehq.apps.users.models import CommCareUser, WebUser
from corehq.toggles import FILTER_ON_GROUPS_AND_LOCATIONS
from .. import util
from ..analytics.esaccessors import get_group_stubs, get_user_stubs
from ..models import HQUserType
from .base import (
BaseMultipleOptionFilter,
BaseReportFilter,
BaseSingleOptionFilter,
)
class UserOrGroupFilter(BaseSingleOptionFilter):
slug = "view_by"
label = ugettext_lazy("View by Users or Groups")
default_text = ugettext_lazy("Users")
options = [('groups', ugettext_lazy('Groups'))]
class UserTypeFilter(BaseReportFilter):
# note, don't use this as a guideline for anything.
slug = "ufilter"
label = ugettext_lazy("User Type")
template = "reports/filters/filter_users.html"
@property
def filter_context(self):
toggle, show_filter = self.get_user_filter(self.request)
return {
'show_user_filter': show_filter,
'toggle_users': toggle,
}
@classmethod
def get_user_filter(cls, request):
return get_user_toggle(request)
class SelectMobileWorkerFilter(BaseSingleOptionFilter):
slug = 'individual'
label = ugettext_lazy("Select Mobile Worker")
default_text = ugettext_lazy("All Mobile Workers")
@property
def filter_context(self):
user_filter, _ = UserTypeFilter.get_user_filter(self.request)
context = super(SelectMobileWorkerFilter, self).filter_context
context['select'].update({
'default_text': self.get_default_text(user_filter),
})
return context
@property
def options(self):
users = util.user_list(self.domain)
return [(user.user_id,
"%s%s" % (user.username_in_report, "" if user.is_active else " (Inactive)"))
for user in users]
@classmethod
def get_default_text(cls, user_filter):
default = cls.default_text
if user_filter[HQUserType.ADMIN].show or \
user_filter[HQUserType.DEMO_USER].show or user_filter[HQUserType.UNKNOWN].show:
default = _('%s & Others') % _(default)
return default
class SelectCaseOwnerFilter(SelectMobileWorkerFilter):
label = ugettext_noop("Select Case Owner")
default_text = ugettext_noop("All Case Owners")
@property
def options(self):
options = [(group._id, "%s (Group)" % group.name) for group in Group.get_case_sharing_groups(self.domain)]
user_options = super(SelectCaseOwnerFilter, self).options
options.extend(user_options)
return options
class BaseGroupedMobileWorkerFilter(BaseSingleOptionFilter):
"""
This is a little field for use when a client really wants to filter by
individuals from a specific group. Since by default we still want to
show all the data, no filtering is done unless the special group filter
is selected.
"""
group_names = []
@property
def options(self):
options = []
for group_name in self.group_names:
group = Group.by_name(self.domain, group_name)
if group:
users = group.get_users(is_active=True, only_commcare=True)
options.extend([(u.user_id, u.username_in_report) for u in users])
return options
class EmwfUtils(object):
def __init__(self, domain):
self.domain = domain
def user_tuple(self, u):
user = util._report_user_dict(u)
uid = "u__%s" % user['user_id']
is_active = False
if u['doc_type'] == 'WebUser':
name = "%s [Web User]" % user['username_in_report']
elif user['is_active']:
is_active = True
name = "%s [Active Mobile Worker]" % user['username_in_report']
else:
name = "%s [Deactivated Mobile Worker]" % user['username_in_report']
return uid, name, is_active
def reporting_group_tuple(self, g):
return "g__%s" % g['_id'], '%s [group]' % g['name']
def user_type_tuple(self, t):
return (
"t__%s" % t,
"[%s]" % HQUserType.human_readable[t]
)
def location_tuple(self, location):
return ("l__%s" % location.location_id,
'%s [location]' % location.get_path_display())
@property
@memoized
def static_options(self):
static_options = [("t__0", _("[Active Mobile Workers]"))]
types = ['DEACTIVATED', 'DEMO_USER', 'ADMIN', 'WEB', 'UNKNOWN']
if Domain.get_by_name(self.domain).commtrack_enabled:
types.append('COMMTRACK')
for t in types:
user_type = getattr(HQUserType, t)
static_options.append(self.user_type_tuple(user_type))
return static_options
def _group_to_choice_tuple(self, group):
return self.reporting_group_tuple(group)
def id_to_choice_tuple(self, id_):
for static_id, text in self.static_options:
if (id_ == static_id[3:] and static_id[:3] == "t__") or id_ == static_id:
return (static_id, text)
owner = get_wrapped_owner(id_, support_deleted=True)
if isinstance(owner, Group):
ret = self._group_to_choice_tuple(owner)
elif isinstance(owner, SQLLocation):
ret = self.location_tuple(owner)
elif isinstance(owner, (CommCareUser, WebUser)):
ret = self.user_tuple(owner)
elif owner is None:
return None
else:
raise Exception("Unexpcted id: {}".format(id_))
if hasattr(owner, 'is_deleted'):
if (callable(owner.is_deleted) and owner.is_deleted()) or owner.is_deleted == True:
# is_deleted may be an attr or callable depending on owner type
ret = (ret[0], 'Deleted - ' + ret[1])
return ret
class UsersUtils(EmwfUtils):
def user_tuple(self, u):
user = util._report_user_dict(u)
uid = "%s" % user['user_id']
name = "%s" % user['username_in_report']
return (uid, name)
class ExpandedMobileWorkerFilter(BaseMultipleOptionFilter):
"""
To get raw filter results:
mobile_user_and_group_slugs = request.GET.getlist(ExpandedMobileWorkerFilter.slug)
user_ids = emwf.selected_user_ids(mobile_user_and_group_slugs)
user_types = emwf.selected_user_types(mobile_user_and_group_slugs)
group_ids = emwf.selected_group_ids(mobile_user_and_group_slugs)
"""
slug = "emw"
label = ugettext_lazy("User(s)")
default_options = None
placeholder = ugettext_lazy("Add users and groups to filter this report.")
is_cacheable = False
options_url = 'emwf_options_all_users'
filter_help_inline = ugettext_lazy(mark_safe(
'See <a href="https://confluence.dimagi.com/display/commcarepublic/Report+and+Export+Filters"'
' target="_blank"> Filter Definitions</a>.'
))
search_help_inline = ugettext_lazy(mark_safe(
'To quick search for a '
'<a href="https://confluence.dimagi.com/display/commcarepublic/Exact+Search+for+Locations" '
'target="_blank">location</a>, write your query as "parent"/descendant.'
))
@property
@memoized
def utils(self):
return EmwfUtils(self.domain)
@staticmethod
def selected_user_ids(mobile_user_and_group_slugs):
return [u[3:] for u in mobile_user_and_group_slugs if u.startswith("u__")]
@staticmethod
def selected_user_types(mobile_user_and_group_slugs):
"""
usage: ``HQUserType.DEMO_USER in selected_user_types``
"""
return [int(t[3:]) for t in mobile_user_and_group_slugs
if t.startswith("t__") and t[3:].isdigit()]
@classmethod
def selected_group_ids(cls, mobile_user_and_group_slugs):
return cls.selected_reporting_group_ids(mobile_user_and_group_slugs)
@staticmethod
def selected_reporting_group_ids(mobile_user_and_group_slugs):
return [g[3:] for g in mobile_user_and_group_slugs if g.startswith("g__")]
@staticmethod
def selected_location_ids(mobile_user_and_group_slugs):
return [l[3:] for l in mobile_user_and_group_slugs if l.startswith("l__")]
@staticmethod
def show_all_mobile_workers(mobile_user_and_group_slugs):
return 't__0' in mobile_user_and_group_slugs
@staticmethod
def no_filters_selected(mobile_user_and_group_slugs):
return not any(mobile_user_and_group_slugs)
def _get_assigned_locations_default(self):
user_locations = self.request.couch_user.get_sql_locations(self.domain)
return list(map(self.utils.location_tuple, user_locations))
def get_default_selections(self):
if not self.request.can_access_all_locations:
return self._get_assigned_locations_default()
defaults = [('t__0', _("[Active Mobile Workers]")), ('t__5', _("[Deactivated Mobile Workers]"))]
if self.request.project.commtrack_enabled:
defaults.append(self.utils.user_type_tuple(HQUserType.COMMTRACK))
return defaults
@property
@memoized
def selected(self):
selected_ids = self.request.GET.getlist(self.slug)
if not selected_ids:
return [{'id': url_id, 'text': text}
for url_id, text in self.get_default_selections()]
selected = (self.selected_static_options(selected_ids) +
self._selected_user_entries(selected_ids) +
self._selected_group_entries(selected_ids) +
self._selected_location_entries(selected_ids))
return [
{'id': entry[0], 'text': entry[1]} if len(entry) == 2 else
{'id': entry[0], 'text': entry[1], 'is_active': entry[2]} for entry in selected
]
def selected_static_options(self, mobile_user_and_group_slugs):
return [option for option in self.utils.static_options
if option[0] in mobile_user_and_group_slugs]
def _selected_user_entries(self, mobile_user_and_group_slugs):
user_ids = self.selected_user_ids(mobile_user_and_group_slugs)
if not user_ids:
return []
results = get_user_stubs(user_ids)
return [self.utils.user_tuple(hit) for hit in results]
def _selected_groups_query(self, mobile_user_and_group_slugs):
group_ids = self.selected_group_ids(mobile_user_and_group_slugs)
if not group_ids:
return []
return get_group_stubs(group_ids)
def _selected_group_entries(self, mobile_user_and_group_slugs):
groups = self._selected_groups_query(mobile_user_and_group_slugs)
return [self.utils.reporting_group_tuple(group)
for group in groups
if group.get("reporting", False)]
def _selected_location_entries(self, mobile_user_and_group_slugs):
location_ids = self.selected_location_ids(mobile_user_and_group_slugs)
if not location_ids:
return []
return list(map(self.utils.location_tuple,
SQLLocation.objects.filter(location_id__in=location_ids)))
@property
def filter_context(self):
context = super(ExpandedMobileWorkerFilter, self).filter_context
url = reverse(self.options_url, args=[self.domain])
context.update({'endpoint': url})
context.update({'filter_help_inline': self.filter_help_inline})
if self.request.project.uses_locations:
context.update({'search_help_inline': self.search_help_inline})
return context
@classmethod
def user_es_query(cls, domain, mobile_user_and_group_slugs, request_user):
# The queryset returned by this method is location-safe
q = user_es.UserES().domain(domain)
if ExpandedMobileWorkerFilter.no_filters_selected(mobile_user_and_group_slugs):
return q.show_inactive()
user_ids = cls.selected_user_ids(mobile_user_and_group_slugs)
user_types = cls.selected_user_types(mobile_user_and_group_slugs)
group_ids = cls.selected_group_ids(mobile_user_and_group_slugs)
location_ids = cls.selected_location_ids(mobile_user_and_group_slugs)
user_type_filters = []
if HQUserType.ADMIN in user_types:
user_type_filters.append(user_es.admin_users())
if HQUserType.UNKNOWN in user_types:
user_type_filters.append(user_es.unknown_users())
if HQUserType.WEB in user_types:
user_type_filters.append(user_es.web_users())
if HQUserType.DEMO_USER in user_types:
user_type_filters.append(user_es.demo_users())
if HQUserType.ACTIVE in user_types and HQUserType.DEACTIVATED in user_types:
q = q.show_inactive()
elif HQUserType.DEACTIVATED in user_types:
q = q.show_only_inactive()
if not request_user.has_permission(domain, 'access_all_locations'):
cls._verify_users_are_accessible(domain, request_user, user_ids)
return q.OR(
filters.term("_id", user_ids),
user_es.location(list(SQLLocation.active_objects
.get_locations_and_children(location_ids)
.accessible_to_user(domain, request_user)
.location_ids())),
)
if HQUserType.ACTIVE in user_types or HQUserType.DEACTIVATED in user_types:
# return all users with selected user_types
user_type_filters.append(user_es.mobile_users())
return q.OR(*user_type_filters)
# return matching user types and exact matches
location_ids = list(SQLLocation.active_objects
.get_locations_and_children(location_ids)
.location_ids())
group_id_filter = filters.term("__group_ids", group_ids)
if FILTER_ON_GROUPS_AND_LOCATIONS.enabled(domain) and group_ids and location_ids:
group_and_location_filter = filters.AND(
group_id_filter,
user_es.location(location_ids),
)
else:
group_and_location_filter = filters.OR(
group_id_filter,
user_es.location(location_ids),
)
id_filter = filters.OR(
filters.term("_id", user_ids),
group_and_location_filter,
)
if user_type_filters:
return q.OR(
id_filter,
group_and_location_filter,
filters.OR(*user_type_filters),
)
return q.filter(id_filter)
@staticmethod
def _verify_users_are_accessible(domain, request_user, user_ids):
# This function would be very slow if called with many user ids
for user_id in user_ids:
other_user = CommCareUser.get(user_id)
if not user_can_access_other_user(domain, request_user, other_user):
raise PermissionDenied("One or more users are not accessible")
@property
def options(self):
return [('t__0', _("[Active Mobile Workers]"))]
@classmethod
def for_user(cls, user_id):
return {
cls.slug: 'u__%s' % user_id
}
@classmethod
def for_reporting_group(cls, group_id):
return {
cls.slug: 'g__%s' % group_id
}
def get_user_toggle(request):
ufilter = group = individual = show_commtrack = None
try:
request_obj = request.POST if request.method == 'POST' else request.GET
if request_obj.get('ufilter', ''):
ufilter = request_obj.getlist('ufilter')
group = request_obj.get('group', '')
individual = request_obj.get('individual', '')
show_commtrack = request.project.commtrack_enabled
except (KeyError, AttributeError):
pass
show_filter = True
toggle = HQUserType.commtrack_defaults() if show_commtrack else HQUserType.use_defaults()
if ufilter and not (group or individual):
toggle = HQUserType.use_filter(ufilter)
elif group or individual:
show_filter = False
return toggle, show_filter
| 37.595078
| 114
| 0.660458
|
4d68a97367bfa24b2b32f42773eb76752ac2bc68
| 2,700
|
py
|
Python
|
tests/storage/sqlite/merge_reader.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
tests/storage/sqlite/merge_reader.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
tests/storage/sqlite/merge_reader.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the merge reader for SQLite storage files."""
from __future__ import unicode_literals
import os
import unittest
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.lib import definitions
from plaso.storage.sqlite import merge_reader
from plaso.storage.sqlite import writer
from tests import test_lib as shared_test_lib
from tests.storage import test_lib
class SQLiteStorageMergeReaderTest(test_lib.StorageTestCase):
"""Tests for the SQLite-based storage file reader for merging."""
# pylint: disable=protected-access
def _CreateTaskStorageFile(self, session, path):
"""Creates a task storage file for testing.
Args:
session (Session): session the task storage is part of.
path (str): path to the task storage file that should be merged.
"""
task = tasks.Task(session_identifier=session.identifier)
test_events = self._CreateTestEvents()
storage_file = writer.SQLiteStorageFileWriter(
session, path, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
storage_file.Open()
for event in test_events:
storage_file.AddEvent(event)
storage_file.Close()
def testReadStorageMetadata(self):
"""Tests the _ReadStorageMetadata function."""
session = sessions.Session()
with shared_test_lib.TempDirectory() as temp_directory:
task_storage_path = os.path.join(temp_directory, 'task.sqlite')
self._CreateTaskStorageFile(session, task_storage_path)
session_storage_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = writer.SQLiteStorageFileWriter(
session, session_storage_path)
test_reader = merge_reader.SQLiteStorageMergeReader(
storage_writer, task_storage_path)
test_reader._Open()
test_reader._ReadStorageMetadata()
test_reader._Close()
def testMergeAttributeContainers(self):
"""Tests the MergeAttributeContainers function."""
session = sessions.Session()
with shared_test_lib.TempDirectory() as temp_directory:
task_storage_path = os.path.join(temp_directory, 'task.sqlite')
self._CreateTaskStorageFile(session, task_storage_path)
session_storage_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = writer.SQLiteStorageFileWriter(
session, session_storage_path)
test_reader = merge_reader.SQLiteStorageMergeReader(
storage_writer, task_storage_path)
storage_writer.Open()
result = test_reader.MergeAttributeContainers()
self.assertTrue(result)
storage_writer.Close()
if __name__ == '__main__':
unittest.main()
| 30
| 77
| 0.742222
|
b003ed4bdca432ceee1b619212cc3d99b62bf468
| 3,707
|
py
|
Python
|
software/multifluids_icferst/tests/prescribed_normal_flow_landslide/slide_movement.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | 2
|
2020-05-11T02:39:46.000Z
|
2020-05-11T03:08:38.000Z
|
software/multifluids_icferst/tests/prescribed_normal_flow_landslide/slide_movement.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | null | null | null |
software/multifluids_icferst/tests/prescribed_normal_flow_landslide/slide_movement.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | 2
|
2020-05-21T22:50:19.000Z
|
2020-10-28T17:16:31.000Z
|
#!/usr/bin/nv python
import numpy as np
import math
import argparse
import pylab
import fileinput
slide_start_x = 112500
R = 150000 # slide total runout
U_max = 35.0
T = (math.pi / 2.0) * ( R / U_max)
max_h = 144.0
L = 223000
S = 1000
Ra = 75000
Rc = 0
Rd = 75000
Ta = math.pi*Ra / (2.0 * U_max)
Tc = Rc / U_max
Td = math.pi*Rd / (2.0 * U_max)
cd = 0.0019
def main():
parser = argparse.ArgumentParser(
prog="test slide function",
description="""Test the prescribed slide function"""
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose output: mainly progress reports.",
default=False
)
parser.add_argument(
'-t',
'--time',
type=float,
default=0.1,
help="Which time to use"
)
parser.add_argument(
'-a',
'--animation',
type=float,
default=0,
help="Do animation of slide dynamics from 0 to time specified"
)
parser.add_argument(
'node_file',
metavar='node_file',
help='The node file which provides a list of coords on which to check the funciton'
)
args = parser.parse_args()
verbose = args.verbose
node_file = args.node_file
time = args.time
anim = args.animation
dt = 100
x_coords = []
slide_height = []
vel = []
i = 0
# parse node file and get list of vertex coordinates
for line in fileinput.input([node_file]):
if (line[0] == '#'):
break
data = line.split()
if (i > 0):
X = [float(data[1])]
x_coords.append(X[0])
i=+1
params = {
'legend.fontsize': 18,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'font.size' : 18,
'axes.labelsize' : 18,
'text.fontsize' : 18,
'figure.subplot.hspace' : 0.5
}
pylab.rcParams.update(params)
fig = pylab.figure(figsize=(15,8),dpi=90)
ax = fig.add_subplot(111)
if (anim == 0):
anim = time+1
pylab.ioff()
else:
time = 0
pylab.ion()
for t in np.arange(time,anim,dt):
i = 0
vel = []
h = []
shear = []
slide_front = set_slide_front(t)
old_slide_front = set_slide_front(t-dt)
for c in x_coords:
h.append(set_slide_height([x_coords[i]],t,slide_front))
vel.append((h[-1] - set_slide_height([x_coords[i]],t-dt,old_slide_front)) / dt)
shear.append(set_slide_shear([x_coords[i]],t,((slide_front - old_slide_front) / dt),0.0,slide_front))
i = i+1
pylab.plot(x_coords,shear)
pylab.draw()
if (anim == time + 1):
pylab.show()
def set_slide_shear(X,t,u_s,u_w,x_loc):
form = slide_form(X,x_loc)
tau = 0.5 * 1000 * cd * (u_s - u_w)*form
return tau
def set_slide_front(t):
import math
if t > T:
t = T
if t < 0:
t = 0
if t < Ta:
s = Ra*(1-math.cos(U_max/Ra*t))
elif Ta < t < Tc+Ta:
s = Ra + U_max*(t - Ta)
elif Ta+Tc < t < Ta+Tc+Td:
s = Ra+Rc + Rd*math.sin(U_max/Rd * (t - Ta - Tc))
else:
s = R
x_loc = slide_start_x + s
return x_loc
def set_slide_height(X,t,x_loc):
import math
form = slide_form(X,x_loc)
return max_h * form
def slide_form(X,x_loc):
x_dash = (X[0] - x_loc)
if (-(L+2*S)<x_dash<-(L+S)):
h = math.exp(-(2*(x_dash+S+L)/S)**4)
elif ( -(L+S)<=x_dash<-S):
h = 1.0
elif (-S<=x_dash<0):
h = math.exp(-(2*(x_dash+S)/S)**4)
else:
h = 0
return h
if __name__ == "__main__":
main()
| 21.427746
| 113
| 0.530078
|
4b3a790c44b092ec973ac73c9831061f6994d14a
| 55
|
py
|
Python
|
adapters/__init__.py
|
JVGC/MyFinancesPython
|
5e4ac02ea00c4ddab688dd0093eed3f3fb2ad826
|
[
"MIT"
] | null | null | null |
adapters/__init__.py
|
JVGC/MyFinancesPython
|
5e4ac02ea00c4ddab688dd0093eed3f3fb2ad826
|
[
"MIT"
] | null | null | null |
adapters/__init__.py
|
JVGC/MyFinancesPython
|
5e4ac02ea00c4ddab688dd0093eed3f3fb2ad826
|
[
"MIT"
] | null | null | null |
from .DebtAdapter import *
from .FlaskAdapter import *
| 18.333333
| 27
| 0.781818
|
ec144731dd4bd53b05a17764dcbe7bbde11a77d7
| 376
|
py
|
Python
|
profiles/mono-mac-release/mono-mac-release.py
|
mhutch/bockbuild
|
0d989e2d0259d17d41a195f8d28b3844a4652e7b
|
[
"MIT"
] | null | null | null |
profiles/mono-mac-release/mono-mac-release.py
|
mhutch/bockbuild
|
0d989e2d0259d17d41a195f8d28b3844a4652e7b
|
[
"MIT"
] | null | null | null |
profiles/mono-mac-release/mono-mac-release.py
|
mhutch/bockbuild
|
0d989e2d0259d17d41a195f8d28b3844a4652e7b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python -B -u
import sys
import traceback
sys.path.append('../..')
from MonoReleaseProfile import MonoReleaseProfile
from bockbuild.util.util import *
try:
MonoReleaseProfile().build()
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
error ('%s\n%s' % (str(e), "\n".join (repr(t) for t in traceback.extract_tb(exc_traceback)[-5:])))
| 25.066667
| 99
| 0.718085
|
03e84e5f15b4a1324c2f55c0fa68d22fb569e076
| 5,202
|
py
|
Python
|
onsets_frames_transcription_create_dataset_maps.py
|
treeson-li/onsets_frames_transcription
|
2cebfe738ea23258b3223094ab25d4e130ac2caf
|
[
"Apache-2.0"
] | null | null | null |
onsets_frames_transcription_create_dataset_maps.py
|
treeson-li/onsets_frames_transcription
|
2cebfe738ea23258b3223094ab25d4e130ac2caf
|
[
"Apache-2.0"
] | null | null | null |
onsets_frames_transcription_create_dataset_maps.py
|
treeson-li/onsets_frames_transcription
|
2cebfe738ea23258b3223094ab25d4e130ac2caf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create the tfrecord files necessary for training onsets and frames.
The training files are split in ~20 second chunks by default, the test files
are not split.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import re
import split_audio_and_label_data
from magenta.music import audio_io
from magenta.music import midi_io
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('input_dir', None,
'Directory where the un-zipped MAPS files are.')
tf.app.flags.DEFINE_string('output_dir', './',
'Directory where the two output TFRecord files '
'(train and test) will be placed.')
tf.app.flags.DEFINE_integer('min_length', 5, 'minimum segment length')
tf.app.flags.DEFINE_integer('max_length', 20, 'maximum segment length')
tf.app.flags.DEFINE_integer('sample_rate', 16000, 'desired sample rate')
test_dirs = ['ENSTDkCl/MUS', 'ENSTDkAm/MUS']
train_dirs = [
'AkPnBcht/MUS', 'AkPnBsdf/MUS', 'AkPnCGdD/MUS', 'AkPnStgb/MUS',
'SptkBGAm/MUS', 'SptkBGCl/MUS', 'StbgTGd2/MUS'
]
def filename_to_id(filename):
"""Translate a .wav or .mid path to a MAPS sequence id."""
return re.match(r'.*MUS-(.*)_[^_]+\.\w{3}',
os.path.basename(filename)).group(1)
def generate_train_set(exclude_ids):
"""Generate the train TFRecord."""
train_file_pairs = []
for directory in train_dirs:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
if filename_to_id(wav_file) not in exclude_ids:
train_file_pairs.append((wav_file, mid_file))
train_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_train.tfrecord')
with tf.python_io.TFRecordWriter(train_output_name) as writer:
for idx, pair in enumerate(train_file_pairs):
print('{} of {}: {}'.format(idx, len(train_file_pairs), pair[0]))
# load the wav data
wav_data = tf.gfile.Open(pair[0], 'rb').read()
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
for example in split_audio_and_label_data.process_record(
wav_data, ns, pair[0], FLAGS.min_length, FLAGS.max_length,
FLAGS.sample_rate):
writer.write(example.SerializeToString())
def generate_test_set():
"""Generate the test TFRecord."""
test_file_pairs = []
for directory in test_dirs:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
test_file_pairs.append((wav_file, mid_file))
test_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_test.tfrecord')
with tf.python_io.TFRecordWriter(test_output_name) as writer:
for idx, pair in enumerate(test_file_pairs):
print('{} of {}: {}'.format(idx, len(test_file_pairs), pair[0]))
# load the wav data and resample it.
samples = audio_io.load_audio(pair[0], FLAGS.sample_rate)
wav_data = audio_io.samples_to_wav_data(samples, FLAGS.sample_rate)
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
example = split_audio_and_label_data.create_example(pair[0], ns, wav_data)
writer.write(example.SerializeToString())
return [filename_to_id(wav) for wav, _ in test_file_pairs]
def main(unused_argv):
test_ids = generate_test_set()
generate_train_set(test_ids)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 35.630137
| 80
| 0.705498
|
0a3a7e94805d530b5c984c150106a393926e112f
| 841
|
py
|
Python
|
test/functional/rpc_uptime.py
|
bitcoinlimitededition/bitcoinlimitededition
|
befc25e21abf8e02f260474210bd074a4f0b07b2
|
[
"MIT"
] | null | null | null |
test/functional/rpc_uptime.py
|
bitcoinlimitededition/bitcoinlimitededition
|
befc25e21abf8e02f260474210bd074a4f0b07b2
|
[
"MIT"
] | null | null | null |
test/functional/rpc_uptime.py
|
bitcoinlimitededition/bitcoinlimitededition
|
befc25e21abf8e02f260474210bd074a4f0b07b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoinlimitededition Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import BitcoinlimitededitionTestFramework
class UptimeTest(BitcoinlimitededitionTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_uptime()
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert(self.nodes[0].uptime() >= wait_time)
if __name__ == '__main__':
UptimeTest().main()
| 27.129032
| 76
| 0.722949
|
d1b00503efb287ca5464c6ea31ada1b429e7d66f
| 4,628
|
py
|
Python
|
nets/classifier.py
|
LuckyXie0427/myFasterRCNN
|
0603a6dfdde32be7cec44cf314c3d5b2c5f97add
|
[
"MIT"
] | null | null | null |
nets/classifier.py
|
LuckyXie0427/myFasterRCNN
|
0603a6dfdde32be7cec44cf314c3d5b2c5f97add
|
[
"MIT"
] | null | null | null |
nets/classifier.py
|
LuckyXie0427/myFasterRCNN
|
0603a6dfdde32be7cec44cf314c3d5b2c5f97add
|
[
"MIT"
] | 1
|
2022-03-14T05:29:42.000Z
|
2022-03-14T05:29:42.000Z
|
import warnings
import torch
from torch import nn
from torchvision.ops import RoIPool
warnings.filterwarnings("ignore")
class VGG16RoIHead(nn.Module):
def __init__(self, n_class, roi_size, spatial_scale, classifier):
super(VGG16RoIHead, self).__init__()
self.classifier = classifier
#--------------------------------------#
# 对ROIPooling后的的结果进行回归预测
#--------------------------------------#
self.cls_loc = nn.Linear(4096, n_class * 4)
#-----------------------------------#
# 对ROIPooling后的的结果进行分类
#-----------------------------------#
self.score = nn.Linear(4096, n_class)
#-----------------------------------#
# 权值初始化
#-----------------------------------#
normal_init(self.cls_loc, 0, 0.001)
normal_init(self.score, 0, 0.01)
self.roi = RoIPool((roi_size, roi_size), spatial_scale)
def forward(self, x, rois, roi_indices, img_size):
n, _, _, _ = x.shape
if x.is_cuda:
roi_indices = roi_indices.cuda()
rois = rois.cuda()
rois_feature_map = torch.zeros_like(rois)
rois_feature_map[:, [0,2]] = rois[:, [0,2]] / img_size[1] * x.size()[3]
rois_feature_map[:, [1,3]] = rois[:, [1,3]] / img_size[0] * x.size()[2]
indices_and_rois = torch.cat([roi_indices[:, None], rois_feature_map], dim=1)
#-----------------------------------#
# 利用建议框对公用特征层进行截取
#-----------------------------------#
pool = self.roi(x, indices_and_rois)
#-----------------------------------#
# 利用classifier网络进行特征提取
#-----------------------------------#
pool = pool.view(pool.size(0), -1)
#--------------------------------------------------------------#
# 当输入为一张图片的时候,这里获得的f7的shape为[300, 4096]
#--------------------------------------------------------------#
fc7 = self.classifier(pool)
roi_cls_locs = self.cls_loc(fc7)
roi_scores = self.score(fc7)
roi_cls_locs = roi_cls_locs.view(n, -1, roi_cls_locs.size(1))
roi_scores = roi_scores.view(n, -1, roi_scores.size(1))
return roi_cls_locs, roi_scores
class Resnet50RoIHead(nn.Module):
def __init__(self, n_class, roi_size, spatial_scale, classifier):
super(Resnet50RoIHead, self).__init__()
self.classifier = classifier
#--------------------------------------#
# 对ROIPooling后的的结果进行回归预测
#--------------------------------------#
self.cls_loc = nn.Linear(2048, n_class * 4)
#-----------------------------------#
# 对ROIPooling后的的结果进行分类
#-----------------------------------#
self.score = nn.Linear(2048, n_class)
#-----------------------------------#
# 权值初始化
#-----------------------------------#
normal_init(self.cls_loc, 0, 0.001)
normal_init(self.score, 0, 0.01)
self.roi = RoIPool((roi_size, roi_size), spatial_scale)
def forward(self, x, rois, roi_indices, img_size):
n, _, _, _ = x.shape
if x.is_cuda:
roi_indices = roi_indices.cuda()
rois = rois.cuda()
rois_feature_map = torch.zeros_like(rois)
rois_feature_map[:, [0,2]] = rois[:, [0,2]] / img_size[1] * x.size()[3]
rois_feature_map[:, [1,3]] = rois[:, [1,3]] / img_size[0] * x.size()[2]
indices_and_rois = torch.cat([roi_indices[:, None], rois_feature_map], dim=1)
#-----------------------------------#
# 利用建议框对公用特征层进行截取
#-----------------------------------#
pool = self.roi(x, indices_and_rois)
#-----------------------------------#
# 利用classifier网络进行特征提取
#-----------------------------------#
fc7 = self.classifier(pool)
#--------------------------------------------------------------#
# 当输入为一张图片的时候,这里获得的f7的shape为[300, 2048]
#--------------------------------------------------------------#
fc7 = fc7.view(fc7.size(0), -1)
roi_cls_locs = self.cls_loc(fc7)
roi_scores = self.score(fc7)
roi_cls_locs = roi_cls_locs.view(n, -1, roi_cls_locs.size(1))
roi_scores = roi_scores.view(n, -1, roi_scores.size(1))
return roi_cls_locs, roi_scores
def normal_init(m, mean, stddev, truncated=False):
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
| 39.896552
| 95
| 0.446845
|
e8f87aecc5c5b1b3aa0ee070ecf039e0478dc25e
| 2,694
|
py
|
Python
|
scitbx/math/tests/tst_uniform_rotation_matrix.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
scitbx/math/tests/tst_uniform_rotation_matrix.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
scitbx/math/tests/tst_uniform_rotation_matrix.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import math, random
from scitbx import matrix
from scitbx.array_family import flex
# -----------------------------------------------------------------------------
# simplified form of surface integral for spherical harmonic (l = m)
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
def ylm(lm,c,t,p):
y = c * math.pow(math.sin(t),lm) * complex(math.cos(lm*p),math.sin(lm*p))
return y * y.conjugate() * math.sin(t)
# -----------------------------------------------------------------------------
def add_point(lm,c,R):
x = matrix.col( [0,0,1] )
new_x = R * x
theta = math.acos(new_x[2]) # theta = [0, pi]
phi = math.atan2(new_x[1],new_x[0]) + math.pi # phi = [0, 2pi)
return ylm(lm,c,theta,phi)
# -----------------------------------------------------------------------------
def test_uniform_rotation_matrix(N=10000,choice=2,verbose=False):
"""
The surface integral of a spherical harmonic function with its conjugate
should be 1. (http://mathworld.wolfram.com/SphericalHarmonic.html, Eq 7)
From Mathematica,
l = 10;
m = 10;
y = SphericalHarmonicY[l, m, \[Theta], \[Phi]];
Integrate[y*Conjugate[y]*Sin[\[Theta]], {\[Theta], 0, Pi}, {\[Phi], 0, 2*Pi}]
should yield 1.
By picking uniformly random points on a sphere, the surface integral can be
numerically approximated.
The results in the comments below are for N = 1 000 000.
"""
if (choice == 0):
# l=1, m=1
# result = (0.883199394206+0j) (0.883824001444+0j)
lm = 1
c = -0.5 * math.sqrt(1.5/math.pi)
elif (choice == 1):
# l = 5, m = 5
# result = (0.959557841214+0j) (0.959331535539+0j)
lm = 5
c = -(3/32) * math.sqrt(77/math.pi)
else:
# l = 10, m = 10
# result = (0.977753926603+0j) (0.97686871766+0j)
lm = 10
c = (1/1024) * math.sqrt(969969/math.pi)
result = [ 0.0, 0.0 ]
for i in range(N):
R = [ matrix.sqr(flex.random_double_r3_rotation_matrix()),
matrix.sqr(flex.random_double_r3_rotation_matrix_arvo_1992()) ]
for j in xrange(len(result)):
result[j] += add_point(lm,c,R[j])
# multipy by area at the end, each point has an area of 4pi/N
point_area = 4.0*math.pi/N # surface area of unit sphere / number of points
for i in xrange(len(result)):
result[i] = point_area * result[i]
if (verbose):
print result[i],
if (verbose):
print
assert(result[0].real > 0.85)
assert(result[0].real < 1.15)
assert(result[1].real > 0.85)
assert(result[1].real < 1.15)
if (__name__ == '__main__'):
flex.set_random_seed(0)
for i in xrange(3):
test_uniform_rotation_matrix(N=1000, choice=i, verbose=False)
print 'OK'
| 31.325581
| 79
| 0.580921
|
c23269dd2045c4aba08f46fd0e27d20bb62e97f7
| 3,517
|
py
|
Python
|
visual/assets/racko.py
|
lhr-solar/DataAcquisition
|
ff257396e92eb76d1b4050d41c3c0dc8e7afbf7b
|
[
"MIT"
] | 1
|
2021-09-04T15:26:43.000Z
|
2021-09-04T15:26:43.000Z
|
visual/assets/racko.py
|
lhr-solar/DataAcquisition
|
ff257396e92eb76d1b4050d41c3c0dc8e7afbf7b
|
[
"MIT"
] | 5
|
2020-07-24T21:19:59.000Z
|
2021-05-31T17:16:48.000Z
|
visual/assets/racko.py
|
lhr-solar/DataAcquisition
|
ff257396e92eb76d1b4050d41c3c0dc8e7afbf7b
|
[
"MIT"
] | 1
|
2020-07-16T19:46:56.000Z
|
2020-07-16T19:46:56.000Z
|
# File: racko.py
# Description: A program that simulates the card and number game
# Rack-O. Players use the keyboard and take turns.
# Assignment Number: 10
#
# Name: <YOUR NAME>
# EID: <YOUR EID>
# Email: <YOUR EMAIL>
# Grader: <YOUR GRADER'S NAME Irena OR Noah OR Skyler OR Tyler>
#
# On my honor, <YOUR NAME>, this programming assignment is my own work
# and I have not provided this code to any other student.
import random
# Play one game of Rack-O.
def main():
# Get the rack size, create the deck, and deal the initial racks.
rack_size = prep_game()
deck = list(range(1, 61))
random.shuffle(deck)
player = 1
player_1_rack = get_rack(deck, rack_size)
player_2_rack = get_rack(deck, rack_size)
discard = [deck.pop(0)]
# CS303e students. Complete the main method to play
# one complete game of Rack-O using the specified functions.
# Get ready to play 1 game.
# Show the instructions if the user wants to see them.
# Set the seed for the random number generator.
# Return the size of the rack to use.
def prep_game():
print('----- Welcome to Rack - O! -----')
if input('Enter y to display instructions: ') == 'y':
instructions()
print()
random.seed(eval(input('Enter number for initial seed: ')))
rack_size = eval(input('Enter the size of the rack to use. '
+ 'Must be between 5 and 10: '))
while not 5 <= rack_size <= 10:
print(rack_size, 'is not a valid rack size.')
rack_size = eval(input('Enter the size of the rack to use. '
+ 'Must be between 5 and 10: '))
print()
return rack_size
# Print the instructions of the game.
def instructions():
print()
print('The goal of the game is to get the cards in your rack of cards')
print('into ascending order. Your rack has ten slots numbered 1 to 10.')
print('During your turn you can draw the top card of the deck or take')
print('the top card of the discard pile.')
print('If you draw the top card of the deck, you can use that card to')
print('replace a card in one slot of your rack. The replaced card goes to')
print('the discard pile.')
print('Alternatively you can simply choose to discard the drawn card.')
print('If you take the top card of the discard pile you must use it to')
print('replace a card in one slot of your rack. The replaced card goes')
print('to the top of the discard pile.')
# Take the player's turn. Give them the choice of drawing or taking
# the top card of the discard pile. If they draw they can replace
# a card or discard the draw. If they take the top card of the discard
# pile they must replace a card in their rack.
def take_turn(deck, discard, player_rack):
# Ask the player which card to replace in their rack.
# Replace it with the given new card. Place the card removed
# from the player's rack at the top of the discard pile.
# Error checks until player enters a card that is currently
# in their rack.
def place_card(player_rack, new_card, discard):
# Return True if this rack is sorted in ascending order, False otherwise.
# Do not create any new lists in this function.
def is_sorted(rack):
# Deal the top 10 cards of the deck into a new rack. The first
# card goes in the first slot, the second card goes in the second
# slot, and so forth. We assume len(deck) >= rack_size. Return the
# list of ints representing the rack.
def get_rack(deck, rack_size):
main()
| 35.525253
| 79
| 0.680409
|
c1ebf0ce0e354c1328f3d8804daa4155b0856218
| 3,799
|
py
|
Python
|
docugen/changelog.py
|
adsfgg/LazarusMod
|
47525094eaf175fe1f11c624c1536581ded8cfa8
|
[
"MIT"
] | 3
|
2019-10-27T14:10:30.000Z
|
2021-01-13T18:29:51.000Z
|
docugen/changelog.py
|
adsfgg/LazarusMod
|
47525094eaf175fe1f11c624c1536581ded8cfa8
|
[
"MIT"
] | 2
|
2019-10-07T22:11:47.000Z
|
2019-10-10T00:14:33.000Z
|
docugen/changelog.py
|
adsfgg/LazarusMod
|
47525094eaf175fe1f11c624c1536581ded8cfa8
|
[
"MIT"
] | 9
|
2019-10-03T17:44:59.000Z
|
2021-12-20T18:01:38.000Z
|
class ChangeLogTree:
def __init__(self, raw_changelog):
self.root_node = ChangeLogNode("root")
for key,value in raw_changelog:
self.injestkey(self.root_node, key, value)
def injestkey(self, root, key, value):
if "." in key:
subkeys = key.split(".")
root_subkey = subkeys[0]
node = None
if root.haschild(root_subkey):
node = root.getchild(root_subkey)
else:
node = root.addchild(root_subkey, root)
if not node:
raise "Failed to injest key"
self.injestkey(node, ".".join(subkeys[1:]), value)
else:
if root.haschild(key):
node = root.getchild(key)
else:
node = root.addchild(key, root)
node.addvalue(value)
class ChangeLogNode:
def __init__(self, key, parent=None):
self.key = key
self.parent = parent
self.children = []
self.values = []
def getchild(self, key):
for child in self.children:
if child.key == key:
return child
return None
def haschild(self, key):
return self.getchild(key) != None
def addchild(self, key, value):
child = ChangeLogNode(key, value)
self.children.append(child)
return child
def addvalue(self, value):
self.values.append(value)
def diff(curr, old):
diff = []
last_neutral_indent_key = ""
last_neutral_indent_value = ""
# Iterate through all key/value pairs in current_changelog
for key,value in curr:
found_key = False
found_value = False
if not value.startswith(">"):
last_neutral_indent_key = key
last_neutral_indent_value = value
# With a single key/value pair in curr, look for a matching one
# in the old changelog
for key2,value2 in old:
if key == key2:
found_key = True
if value2 == value:
found_value = True
elif found_key:
break
# If we didn't find a match for the key it means that the key/value was added
# If we did find a key but didn't find a value, it means that a key/value pair was modified
if not found_key or not found_value:
# Append the last neutral key/value pair if our value is manually indented.
# This is to preserve semantic information that would be lost without the header
if value.startswith(">") and last_neutral_indent_key != "" and last_neutral_indent_value != "":
diff.append((last_neutral_indent_key, last_neutral_indent_value))
last_neutral_indent_key = ""
last_neutral_indent_value = ""
diff.append((key, value))
if key == last_neutral_indent_key and value == last_neutral_indent_value:
last_neutral_indent_key = ""
last_neutral_indent_value = ""
# Check for any deletions
for key,value in old:
found_key = False
found_value = False
# Find matching key in curr
for key2,value2 in curr:
if key == key2:
found_key = True
if value2 == value:
found_value = True
elif found_key:
break
# If a key exists in the old changelog but not in the current one, it's been removed
# If we did find a key in the old changelog but didn't find the value in the new changelog
# it's been removed
if not found_key or not found_value:
diff.append((key, "== REMOVED == " + value))
return diff
| 33.619469
| 107
| 0.564622
|
dfe1334669248a003a4ef2d23f0f2effa5b9313a
| 3,247
|
py
|
Python
|
seniorProjectApp/views.py
|
mpreyes/SeniorProject
|
cbcfb132dc24c63c0bb4c43cc2817309c9ea0d2b
|
[
"MIT"
] | 1
|
2019-02-08T21:15:15.000Z
|
2019-02-08T21:15:15.000Z
|
seniorProjectApp/views.py
|
mpreyes/SeniorProject
|
cbcfb132dc24c63c0bb4c43cc2817309c9ea0d2b
|
[
"MIT"
] | null | null | null |
seniorProjectApp/views.py
|
mpreyes/SeniorProject
|
cbcfb132dc24c63c0bb4c43cc2817309c9ea0d2b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse,HttpResponseRedirect
from django.core.cache import caches
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views import generic
from .forms import CustomUserCreationForm, ProgressForm
from seniorProjectApp.models import *
def index(request):
return render(request, 'seniorProjectApp/index.html')
# def signup(request):
# return render(request,'seniorProjectApp/signup.html')
# #Signup MUST request a degree: Computer Science = 1
class SignUp(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'seniorProjectApp/signup.html'
def redirectme(request):
degreeID = 1 #defaults
userID = 1
if request.user.is_authenticated:
degreeID = request.user.degreeID
userID = request.user.id
return redirect("/seniorProjectApp/" + str(userID) + "/" + str(degreeID)+ "/")
def dashboard(request,userID_id,degreeID):
#if changing degree, change degreeID for both
degree = Degree.objects.get(degreeID = degreeID)
courses = Courses.objects.filter(degreeID = degreeID)
context = { "degree_list": degree, "courses_list": courses}
return render(request,'seniorProjectApp/dashboard.html',context)
def links(request,userID_id,degreeID_id,courseID):
#testUser = 1
courses = Courses.objects.get(courseID = courseID)
topics = Topics.objects.filter(courseID = courseID)
links = Links.objects.all()
progress = Progress.objects.filter(userID = userID_id)
link_progress = list(zip(links,progress))
# for i in links:
# print(i.linksID)
# for j in progress:
# print(j.notes)
# for i,j in link_progress:
# print(i.linksID,j.progressID, j.isCompleted)
context = {"degreeID": degreeID_id, "course": courses, "topics": topics, "links": links, "link_progress": link_progress}
print(progress)
return render(request,'seniorProjectApp/links.html',context)
def progress(request,userID_id,degreeID_id,courseID,linksID, progressID):
#testUser = 1
courses = Courses.objects.get(courseID = courseID)
topics = Topics.objects.filter(courseID = courseID)
user = CustomUser.objects.get(id = userID_id)
progress = Progress.objects.get(userID_id = userID_id,linkID_id = linksID,progressID = progressID)
link = Links.objects.get( linksID = linksID)
context = {"user" : user, "degreeID": degreeID_id,"course": courses, "topics": topics, "link": link, "progress": progress}
if request.method == 'POST':
print("got a post request")
form = ProgressForm(request.POST,instance = progress)
print(form.is_valid())
if form.is_valid():
form.save()
print("form got saved")
context["form"] = form
return HttpResponseRedirect(request.META.get('HTTP_REFERER'),'seniorProjectApp/progress.html',context)
else:
print("Form did not get saved")
form = ProgressForm(instance = progress)
context["form"] = form
return render(request,'seniorProjectApp/progress.html',context)
| 32.148515
| 127
| 0.695103
|
896d29075ce3ce1505523f55764103afa8355b2c
| 15,637
|
py
|
Python
|
REMARKs/GanongNoelUI/code/estimate_models.py
|
ngkratts/REMARK
|
92c057a93a7d10a890696db55f874d5fde394b91
|
[
"Apache-2.0"
] | 18
|
2019-01-28T13:17:39.000Z
|
2021-09-10T16:29:55.000Z
|
REMARKs/GanongNoelUI/code/estimate_models.py
|
ngkratts/REMARK
|
92c057a93a7d10a890696db55f874d5fde394b91
|
[
"Apache-2.0"
] | 89
|
2019-01-06T19:32:34.000Z
|
2021-08-30T13:30:48.000Z
|
REMARKs/GanongNoelUI/code/estimate_models.py
|
ngkratts/REMARK
|
92c057a93a7d10a890696db55f874d5fde394b91
|
[
"Apache-2.0"
] | 50
|
2018-08-01T16:33:06.000Z
|
2021-10-05T20:20:26.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 08:48:22 2018
@author: Xian_Work
"""
import scipy
from scipy.optimize import fmin, brute
from model_plotting import norm , compute_dist,gen_evolve_share_series, mk_mix_agent
#Import parameters and other UI modules
param_path="../Parameters/params_ui.json"
execfile("prelim.py")
####################
###### Setup ######
####################
opt_process = "serial" #Runs only the minimum number of estimations required to replicate models in the paper. Other option is "parallel"
in_path = './est_models_in/'
out_path = './est_models_out/'
##############################
### Input arguments for optimization ###
init_conditions_list = []
final_conditions_dict = {}
if opt_process == "serial":
#Creates a list of dicts; each dict corresponds to an optimization to perform
#Later pipes all the optimization results to one json
master_infile = 'initial_conditions_master.json'
master_outfile = '../../Parameters/model_params_main.json'
with open(in_path+master_infile, "r") as read_file:
init_conditions_master = json.load(read_file)
for i in init_conditions_master:
init_conditions_list.append(i)
elif opt_process == "parallel":
#Creates a length-1 list from a json, containing an optimization dict
#Then deletes the input json
#Later pipes output of optimization to a json in the output directory
#If running parallel, this python script must be run once for each optimization;
#grid_sims_helper.py contains func to convery csvs of initial conditions
#to json input for this script, and func to convert json outputs of this
#script into csvs
in_path = './est_models_in/'
out_path = './est_models_out/'
def get_files(start_str = "sim_"):
"""Return files that start with start_str"""
n = len(start_str)
file_list = [f for f in os.listdir(in_path) if f[0:n] == start_str]
return file_list
#Get input dict and delete from input directory
filename = get_files()[0]
with open(in_path + filename, 'r') as f:
opt_input = json.load(f)
init_conditions_list.append(opt_input)
os.remove(in_path + filename)
####################################
#Model Target and Base Plots
####################################
#Data Series to plot against
data_tminus5 = norm(param.JPMC_cons_moments, param.plt_norm_index)
data_tminus5_search = param.JPMC_search_moments
###Use vcv weights###
cons_se_vcv_tminus5 =param.JPMC_cons_SE
search_se_vcv = param.JPMC_search_SE
#Targets
opt_target_cons = data_tminus5
cons_wmat = np.linalg.inv(np.asmatrix(np.diag(np.square(cons_se_vcv_tminus5))))
opt_target_search = data_tminus5_search[param.moments_len_diff: param.moments_len_diff + param.s_moments_len]
search_wmat = np.linalg.inv(np.asmatrix(np.diag(np.square(search_se_vcv))))
################################################
#Function to generate agent
################################################
def gen_agent(vals_dict):
#Optimizing the model with two search types and hyperbolic discounting
#Common parameters to be updated in all models
agent_pd = copy.deepcopy(pd_base)
if 'phi' in vals_dict.keys():
agent_pd.update({'phi':vals_dict['phi']})
#Representative Agent
if vals_dict['opt_type'] == '1b1k':
agent_pd.update({'beta_var':vals_dict['beta_var'],
'beta_hyp':vals_dict['beta_hyp'],
'L_':vals_dict['L_'],
'k':vals_dict['k']})
agent = [(1, agent_pd),]
return(agent)
#Heterogeneity in Search Costs
if vals_dict['opt_type'] == '1b2k':
agent_pd.update({'beta_var':vals_dict['beta_var'],
'beta_hyp':vals_dict['beta_hyp'],
'L_':vals_dict['L_']})
k0 = vals_dict['k0']
k1 = vals_dict['k1']
agent_weights = (vals_dict['w_lo_k'], 1- vals_dict['w_lo_k'])
agent_params = ('k', )
agent_vals = ((k0, ),
(k1, ),)
#Heterogeneity in Consumption Preferences and in Search Costs
if vals_dict['opt_type'] == '2b2k' or vals_dict['opt_type'] == '2b2k_fix_xi' or vals_dict['opt_type'] == '2b2k_fix_b1':
agent_pd.update({'beta_var':vals_dict['beta_var'],
'L_':vals_dict['L_']})
#Heterogeneous k types
k0 = vals_dict['k0']
k1 = vals_dict['k1']
#Heterogeneous beta types
b0 = vals_dict['b0']
b1 = vals_dict['b1']
#Weights
w_lo_k = vals_dict['w_lo_k']
w_hi_k = 1 - w_lo_k
w_lo_beta = vals_dict['w_lo_beta']
w_hi_beta = 1 - w_lo_beta
w_b0_k0 = w_lo_k * w_lo_beta
w_b1_k0 = w_lo_k * w_hi_beta
w_b0_k1 = w_hi_k * w_lo_beta
w_b1_k1 = w_hi_k * w_hi_beta
#Make agent
agent_weights = (w_b0_k0, w_b1_k0, w_b0_k1, w_b1_k1)
agent_params = ('beta_hyp', 'k' )
agent_vals = ((b0, k0),
(b1, k0),
(b0, k1),
(b1, k1))
### Additional models - robustness checks to different types of heterogeneity
if vals_dict['opt_type'] == '2d2k':
agent_pd.update({'L_':vals_dict['L_']})
agent_pd.update({'beta_hyp':1})
#Heterogeneous k types
k0 = vals_dict['k0']
k1 = vals_dict['k1']
#Heterogeneous beta types
d0 = vals_dict['d0']
d1 = vals_dict['d1']
#Weights
w_lo_k = vals_dict['w_lo_k']
w_hi_k = 1 - w_lo_k
w_lo_delta = vals_dict['w_lo_delta']
w_hi_delta = 1 - w_lo_delta
w_d0_k0 = w_lo_k * w_lo_delta
w_d1_k0 = w_lo_k * w_hi_delta
w_d0_k1 = w_hi_k * w_lo_delta
w_d1_k1 = w_hi_k * w_hi_delta
#Make agent
agent_weights = (w_d0_k0, w_d1_k0, w_d0_k1, w_d1_k1)
agent_params = ('beta_var', 'k' )
agent_vals = ((d0, k0),
(d1, k0),
(d0, k1),
(d1, k1))
agent = mk_mix_agent(agent_pd, agent_params, agent_vals, agent_weights)
return agent
################################################
#Function to generate Consumption and Search behaviour
################################################
def gen_agent_bhvr(agent):
c_start = param.c_plt_start_index
s_start = param.s_plt_start_index
series_dict = gen_evolve_share_series(pd_base['e'],
c_start, s_start,
len(opt_target_cons),
param.plt_norm_index,
*agent,
verbose = True,
normalize = True)
cons_out = series_dict['w_cons_out']
search_out = series_dict['w_search_out'][s_start-c_start : s_start-c_start+len(opt_target_search)]
return {'cons_out':cons_out, 'search_out':search_out}
########################################
#Main estimation execution starts here
#######################################
for opt_input in init_conditions_list:
####################################
#Baseline parameters
####################################
pd_base = {"a0": param.a0_data, "T_series":T_series, "T_solve":param.TT,
"e":param.e_extend,
"beta_var":param.beta, "beta_hyp": param.beta_hyp, "a_size": param.a_size,
"rho":param.rho, "verbose":False, "L_":param.L,
"constrained":param.constrained, "Pi_":np.zeros((param.TT+1,param.a_size+1,9,9)),
"z_vals" : param.z_vals, "R" : param.R, "Rbor" : param.R,
"phi": param.phi, "k":param.k, "spline_k":param.spline_k, "solve_V": True,
"solve_search": True}
for t in range(param.TT+1):
for a_index in range(param.a_size+1):
pd_base['Pi_'][t][a_index] = param.Pi
pd_base['T_series']=len(pd_base['e'])-1
if 'fix_gamma' in opt_input:
pd_base.update({'rho':opt_input['fix_gamma']})
if opt_input['opt_type'] == '2b2k_fix_xi':
pd_base.update({'phi':opt_input['phi']})
#######################################################
#### Indexing of arguments that will be optimized over
#######################################################
#Representative Agent
if opt_input['opt_type'] == '1b1k':
args_index = {'beta_var':0,
'beta_hyp':1,
'k':2,
'L_':3,
'phi':4}
args_bounds = [(0.9, 1.0),
(0.2, 1.0),
(1.0, 300.0),
(0.0, 12.0),
(0.5, 2.0)]
#Heterogeneity in Search Costs
elif opt_input['opt_type'] == '1b2k':
args_index = {'beta_var':0,
'beta_hyp':1,
'k0':2,
'k1':3,
'L_':4,
'w_lo_k':5,
'phi':6}
args_bounds = [(0.9, 1.0),
(0.2, 1.0),
(1.0, 300.0),
(1.0, 300.0),
(0.0, 12.0),
(0.001, 0.999),
(0.5, 2.0)]
#Heterogeneity in Consumption Preferences and in Search Costs
elif opt_input['opt_type'] == '2b2k':
args_index = {'beta_var':0,
'L_':1,
'b0':2,
'b1':3,
'w_lo_beta':4,
'k0':5,
'k1':6,
'w_lo_k':7,
'phi':8}
args_bounds = [(0.9, 1.0),
(0.0, 12.0),
(0.2, 1.0),
(0.2, 1.0),
(0.001, 0.999),
(1.0, 300.0),
(1.0, 300.0),
(0.001, 0.999),
(0.5, 2.0)]
elif opt_input['opt_type'] == '2b2k_fix_xi':
args_index = {'beta_var':0,
'L_':1,
'b0':2,
'b1':3,
'w_lo_beta':4,
'k0':5,
'k1':6,
'w_lo_k':7}
args_bounds = [(0.8, 1.0),
(0.0, 12.0),
(0.1, 1.0),
(0.1, 1.0),
(0.001, 0.999),
(1.0, 300.0),
(1.0, 300.0),
(0.001, 0.999)]
elif opt_input['opt_type'] == '2b2k_fix_b1':
args_index = {'beta_var':0,
'L_':1,
'b0':2,
'w_lo_beta':3,
'k0':4,
'k1':5,
'w_lo_k':6,
'phi':7}
args_bounds = [(0.9, 1.0),
(0.0, 12.0),
(0.2, 1.0),
(0.001, 0.999),
(1.0, 300.0),
(1.0, 300.0),
(0.001, 0.999),
(1.001, 2.0)]
#Heterogeneity in Delta
elif opt_input['opt_type'] == '2d2k':
args_index = {'L_':0,
'd0':1,
'd1':2,
'w_lo_delta':3,
'k0':4,
'k1':5,
'w_lo_k':6,
'phi':7}
args_bounds = [(0.0, 12.0),
(0.2, 1.0),
(0.2, 1.0),
(0.001, 0.999),
(1.0, 300.0),
(1.0, 300.0),
(0.001, 0.999),
(0.5, 2.0)]
args_index_rv = {v: k for k, v in args_index.iteritems()} #For processing opt_args_in
####################################################
#Objective Function
####################################################
def obj_func(opt_args_in = [], opt_type = None, verbose = False):
###Generate agent ###
vals_dict = {'opt_type':opt_type}
for key, value in args_index.iteritems():
vals_dict.update({key:opt_args_in[value]})
if opt_input['opt_type'] == '2b2k_fix_b1'.decode('utf8'):
print('adding fixed b1 value')
vals_dict.update({'b1':opt_input['b1']})
agent = gen_agent(vals_dict)
#Generate consumption and search behaviour
series_out = gen_agent_bhvr(agent)
cons_out = series_out['cons_out']
search_out = series_out['search_out']
#Calculate distance from targets
cons_dist = compute_dist(cons_out, opt_target_cons, cons_wmat)
search_dist = compute_dist(search_out, opt_target_search, search_wmat)
if verbose==True:
return (cons_dist, search_dist, cons_dist + search_dist)
return cons_dist + search_dist
###########################
#Optimization
###########################
opt_args_in = []
for i in range(len(args_index_rv)):
opt_args_in.append(opt_input[args_index_rv[i]])
opt_out = scipy.optimize.minimize(obj_func, opt_args_in,
args = (opt_input['opt_type'], False),
bounds=args_bounds,
options = {'maxiter':13, 'ftol':0.001})
distances = obj_func(opt_out['x'], opt_type = opt_input['opt_type'], verbose=True)
###########################
#Write results
###########################
opt_args_out = copy.deepcopy(opt_input)
#Initial Params
for key, val in opt_input.iteritems():
init_key = "init_" + key
opt_args_out.update({init_key: val})
#Optimized Params
for key, val in args_index.iteritems():
opt_args_out.update({key:opt_out['x'][val]})
###For robustness checks where we estimate with different risk aversion
if 'fix_gamma' in opt_input:
opt_args_out.update({'rho':opt_input['fix_gamma']})
#Optimization Statistics
opt_args_out.update({'GOF': distances[2],
'GOF_cons': distances[0],
'GOF_search': distances[1],
'term_stat': opt_out['message'],
'num_iters': opt_out['nit']})
###Write output
if opt_process == "parallel":
with open(out_path+filename, 'w') as f:
json.dump(opt_args_out, f)
elif opt_process == "serial":
key = "est_params_" + opt_args_out['opt_type']
if 'fix_gamma' in opt_input:
key = key + '_fix_gamma_' + str(int(np.round(opt_input['fix_gamma'],0)))
if opt_args_out['opt_type'] == ['2b2k_fix_xi']:
key = key + '_' + str(opt_input['phi'])
if opt_args_out['opt_type'] == ['2b2k_fix_b1']:
key = key + '_' + str(opt_input['b1'])
final_conditions_dict.update({key: opt_args_out})
###Final dump when optimizing in serial mode
if opt_process == "serial":
with open(out_path + master_outfile, 'w') as f:
json.dump(final_conditions_dict, f, indent=0)
| 37.498801
| 137
| 0.483724
|
278a311c09b050885b75e32e99d072cafb3b59a1
| 1,708
|
py
|
Python
|
python/tvm/relay/op/contrib/register.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
python/tvm/relay/op/contrib/register.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 3,022
|
2020-11-24T14:02:31.000Z
|
2022-03-31T23:55:31.000Z
|
python/tvm/relay/op/contrib/register.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Register utilities for external codegen."""
_PATTERN_TABLES = {}
def register_pattern_table(compiler, table=None):
"""Register a pattern table for an external compiler.
Pattern tables are used to create composite functions.
See the MergeComposite pass.
Parameters
----------
compiler : str
The name of compiler
table : function, optional
A function that returns the pattern table
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(t):
"""internal register function"""
_PATTERN_TABLES[compiler] = t()
return t
return _register(table) if table is not None else _register
def get_pattern_table(compiler):
"""Get the pattern table associated with a compiler (if it's registered)."""
return _PATTERN_TABLES[compiler] if compiler in _PATTERN_TABLES else None
| 32.846154
| 80
| 0.720726
|
384d3180a9ec40eb66beba3c5322e55df3ca0d35
| 9,154
|
py
|
Python
|
workflows/pipe-templates/__SYSTEM/data_loader/src/sys-data-upload.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 126
|
2019-03-22T19:40:38.000Z
|
2022-02-16T13:01:44.000Z
|
workflows/pipe-templates/__SYSTEM/data_loader/src/sys-data-upload.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 1,189
|
2019-03-25T10:39:27.000Z
|
2022-03-31T12:50:33.000Z
|
workflows/pipe-templates/__SYSTEM/data_loader/src/sys-data-upload.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 62
|
2019-03-22T22:09:49.000Z
|
2022-03-08T12:05:56.000Z
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pipeline import Logger, TaskStatus
from model.entities_api import EntitiesAPI
import os
import time
import multiprocessing
from multiprocessing.pool import ThreadPool
import subprocess
import shlex
UPLOAD_TASK_NAME = 'Upload'
INPUT_CHECK_TASK_NAME = 'InputParametersCheck'
METADATA_TASK_NAME = 'MetadataValuesExtraction'
# TODO: Move this settings to the GUI
UPLOAD_RETRY_COUNT = 5
UPLOAD_RETRY_TIMEOUT_SEC = 5
def upload_data(src, dst, f_name_format, c_name, c_type, create_folders, entity_id, m_id, ent_api, upd_paths):
if not dst.endswith('/'):
dst = dst + '/'
if f_name_format is not None and c_name is not None:
if create_folders:
dst = dst + c_name + '/' + f_name_format
else:
dst = dst + f_name_format.format(c_name)
elif f_name_format is None and c_name is not None and create_folders:
dst = dst + c_name + '/' + src.split('/')[-1:][0]
else:
dst = dst + src.split('/')[-1:][0]
code = 1
for upload_try_num in range(1, UPLOAD_RETRY_COUNT+1):
Logger.info("Attempt #{}. Uploading {} to {}...".format(upload_try_num, src, dst), task_name=UPLOAD_TASK_NAME)
Logger.info('Executing command \'pipe storage cp "{}" "{}" -f > /dev/null\''.format(src, dst), task_name=UPLOAD_TASK_NAME)
code = os.system('pipe storage cp "{}" "{}" -f > /dev/null'.format(src, dst))
if code != 0:
Logger.fail("Attempt #{}. Error uploading {} to {}".format(upload_try_num, src, dst), task_name=UPLOAD_TASK_NAME)
if upload_try_num < UPLOAD_RETRY_COUNT:
time.sleep(UPLOAD_RETRY_TIMEOUT_SEC)
else:
Logger.fail("All {} attempts failed for {}. Source is not uploaded".format(UPLOAD_RETRY_COUNT, src), task_name=UPLOAD_TASK_NAME)
else:
Logger.info("Uploading {} to {} done".format(src, dst), task_name=UPLOAD_TASK_NAME)
if upd_paths:
ent_api.update_key(m_id, entity_id, c_name, c_type, dst)
break
return code
if __name__ == '__main__':
Logger.info("Checking input parameters", task_name=INPUT_CHECK_TASK_NAME)
scripts_dir = os.environ['SCRIPTS_DIR']
if 'DESTINATION_DIRECTORY' not in os.environ:
Logger.fail("DESTINATION_DIRECTORY parameter is missing", task_name=INPUT_CHECK_TASK_NAME)
exit(1)
if 'METADATA_ID' not in os.environ:
Logger.fail("METADATA_ID parameter is missing", task_name=INPUT_CHECK_TASK_NAME)
exit(1)
if 'METADATA_CLASS' not in os.environ:
Logger.fail("METADATA_CLASS parameter is missing", task_name=INPUT_CHECK_TASK_NAME)
exit(1)
if 'METADATA_COLUMNS' not in os.environ:
Logger.fail("METADATA_COLUMNS parameter is missing or invalid", task_name=INPUT_CHECK_TASK_NAME)
exit(1)
destination = os.environ['DESTINATION_DIRECTORY']
api_path = os.environ['API']
api_token = os.environ['API_TOKEN']
metadata_id = os.environ['METADATA_ID']
metadata_class = os.environ['METADATA_CLASS']
metadata_columns_str = os.environ['METADATA_COLUMNS']
metadata_entities = []
file_name_format_column = None
if 'METADATA_ENTITIES' in os.environ:
metadata_entities = map(lambda e: e.strip(), os.environ['METADATA_ENTITIES'].split(','))
if metadata_columns_str is None:
metadata_columns_str = ''
if 'FILE_NAME_FORMAT_COLUMN' in os.environ:
file_name_format_column = os.environ['FILE_NAME_FORMAT_COLUMN']
create_folders_for_columns = \
os.environ['CREATE_FOLDERS_FOR_COLUMNS'].lower() == 'true' if 'CREATE_FOLDERS_FOR_COLUMNS' in os.environ else False
update_paths = os.environ['UPDATE_PATH_VALUES'].lower() == 'true' if 'UPDATE_PATH_VALUES' in os.environ else False
metadata_column_names = metadata_columns_str.split(',')
metadata_columns_values = {}
metadata_columns = []
for column in metadata_column_names:
column_name = column.strip()
if len(column_name) > 0:
metadata_columns.append(column_name)
metadata_columns_values[column_name] = []
Logger.info('Input parameters checked', task_name=INPUT_CHECK_TASK_NAME)
Logger.info('Destination: {}'.format(destination), task_name=INPUT_CHECK_TASK_NAME)
Logger.info('Metadata ID: {}'.format(metadata_id), task_name=INPUT_CHECK_TASK_NAME)
Logger.info('Metadata Class: {}'.format(metadata_class), task_name=INPUT_CHECK_TASK_NAME)
Logger.info('Metadata columns: {}'.format(', '.join(metadata_columns)), task_name=INPUT_CHECK_TASK_NAME)
Logger.success("Done", task_name=INPUT_CHECK_TASK_NAME)
Logger.info('Extracting metadata values (#{}, {}) for columns {}...'.format(
metadata_id,
metadata_class,
', '.join(metadata_columns)
), task_name=METADATA_TASK_NAME)
api = EntitiesAPI(api_path, api_token)
for el in api.load_all(metadata_id, metadata_class):
if len(metadata_entities) > 0 and str(el.id) not in metadata_entities:
continue
if el.data is not None:
for column in metadata_columns:
if column in el.data and 'value' in el.data[column]:
value = el.data[column]['value'].encode("utf-8")
if not value.lower().startswith('http://') and not value.lower().startswith('https://') and not value.lower().startswith('ftp://'):
Logger.info('Skipping {} ({}, #{}) - not http, https or ftp source'.format(
el.data[column]['value'].encode("utf-8"),column,el.external_id),
task_name=METADATA_TASK_NAME)
continue
column_type = el.data[column]['type']
file_name_format = None
if file_name_format_column is not None and file_name_format_column in el.data and 'value' in el.data[file_name_format_column]:
file_name_format = el.data[file_name_format_column]['value'].encode("utf-8") + '_{}' if not create_folders_for_columns else el.data[file_name_format_column]['value'].encode("utf-8")
metadata_columns_values[column].append((el.external_id, el.id, value, column_type, file_name_format))
Logger.info('{} ({}, #{})'.format(
el.data[column]['value'].encode("utf-8"),
column,
el.external_id
), task_name=METADATA_TASK_NAME)
Logger.success("Done", task_name=METADATA_TASK_NAME)
Logger.info("Starting uploading task", task_name=UPLOAD_TASK_NAME)
cpu_count = multiprocessing.cpu_count()
if 'MAX_THREADS_COUNT' in os.environ:
max_threads_count = int(os.environ['MAX_THREADS_COUNT'])
cpu_count = max_threads_count if max_threads_count < cpu_count else cpu_count
pool = ThreadPool(cpu_count)
pool_results = []
for column in metadata_columns:
for (external_id, internal_id, url, column_type, file_name_format) in metadata_columns_values[column]:
upload_result = pool.apply_async(
upload_data,
(
url,
destination,
file_name_format,
column,
column_type,
create_folders_for_columns,
internal_id,
metadata_id,
api,
update_paths
)
)
pool_results.append(upload_result)
pool.close()
pool.join()
successes_count = sum([1 for x in pool_results if x.get() == 0])
if successes_count == len(pool_results):
Logger.success("Upload done. All transfers completed successfully", task_name=UPLOAD_TASK_NAME)
exit(0)
elif successes_count == 0:
Logger.fail("Upload completed with errors. ALL transfers FAILED\nPlease review errors above", task_name=UPLOAD_TASK_NAME)
exit(1)
else:
Logger.warn("Upload completed with errors. SOME of the transfers failed to complete\nPlease review errors above", task_name=UPLOAD_TASK_NAME)
exit(0)
| 48.951872
| 205
| 0.629342
|
5793e9e40749d896a72fc834c79370f6c8a5390d
| 5,076
|
py
|
Python
|
pychron/hardware/gauges/mks/base_mks_gauge.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/hardware/gauges/mks/base_mks_gauge.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/hardware/gauges/mks/base_mks_gauge.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
# ============= views ===================================
from __future__ import absolute_import
from pychron.hardware.gauges.base_gauge import BaseGauge
class BaseMKSGauge(BaseGauge):
def set_transducer_identify(self, value):
"""
sends command to transducer to toggle LED pulse
@type value: C{str}
@param value: ON or OFF
@see: L{MKSComs}
"""
m = "Setting %s, %s identify to %s" % (self.name, self.address, value)
q = self._build_command(self.address, "pulse", value)
self.info(m)
self.ask(q)
def _build_query(self, addr, typetag, setpointindex=1):
"""
build a query
@type addr: C{s}
@param addr: RS-485 address
@type typetag: C{s}
@param typetag: query type
@rtype: C{s}
@return: a valid HPS serial command
"""
if typetag == "pressure":
s = "PR1"
elif typetag == "filament":
s = "FS"
elif typetag == "setpoint_value":
s = "SP%i" % setpointindex
elif typetag == "setpoint_state":
s = "SS%i" % setpointindex
elif typetag == "setpoint_enable":
s = "EN%i" % setpointindex
rs = "@%s%s?;FF" % (addr, s)
return rs
def _build_command(self, addr, typetag, value, setpointindex=1):
"""
build a command
@type addr: C{str}
@param addr: RS-485 address
@type typetag: C{str}
@param typetag: query type
@type value: C{str}
@param value: command value
@rtype: C{str}
@return: a valid HPS serial command
"""
base = "@%s%s!%s;FF"
if typetag == "power":
tag = "wfile"
s = base % (addr, tag, ("ON" if value else "OFF"))
# s='@%s%s!%s;FF' % (addr, tag, value)
elif typetag == "address":
tag = "AD"
s = base % (addr, tag, value)
elif typetag == "pulse":
tag = "TST"
s = base % (addr, tag, value)
# s='@%s%s!%s;FF' % (addr, tag, value)
elif typetag == "setpoint_enable":
tag = "EN%i" % setpointindex
s = base % (addr, tag, ("ON" if value else "OFF"))
elif typetag == "setpoint":
tag = "SP%i" % setpointindex
# for some reason mks gauges 925 do not like x.xxe-xx as sci notation
# likes x.xxe-x
# convert value
scivalue = "%0.2e" % value
a, b = scivalue.split("e")
sign = b[:1]
ex = b[-1:]
v = "%sE%s%s" % (a, sign, ex)
s = "@%s%s!%s;FF" % (addr, tag, v)
# s='@%s%s!%0.1e;FF' % (addr, tag, value)
elif typetag == "hysteresis":
tag = "SH%i" % setpointindex
s = "@%s%s!%s;FF" % (addr, tag, value)
elif typetag == "degas":
tag = "DG"
s = "@%s%s!%s;FF" % (addr, tag, ("ON" if value else "OFF"))
return s
def _parse_response(self, type_, raw):
"""
parse a serial response
@type_ type_: C{str}
@param type_: the response type_
@type_ raw: C{str}
@param raw: the raw response C{str}
@rtype: C{str or boolean}
@return: a float for pressure, boolean otherwise
"""
if self.simulation:
return float(self.get_random_value(0, 10))
if raw == None:
return
data = raw.split(";")
i = 0 if len(data) <= 2 else len(data) - 2
value = data[i]
si = value.find("ACK")
if si == -1:
self.warning("%s" % raw)
return
else:
si += 3
if type_ in ["pressure", "setpoint_value"]:
v = value[si:]
try:
return float(v)
except ValueError as e:
self.warning(e)
return
elif type_ in ["filament", "setpoint_enable"]:
return True if value[si:] == "ON" else False
# ============= EOF ====================================
| 30.214286
| 81
| 0.48227
|
36585b8e103ed8c7adbe70e9f48f8dc616fbaead
| 22,344
|
py
|
Python
|
homeassistant/components/sensor/buienradar.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 4
|
2019-01-10T14:47:54.000Z
|
2021-04-22T02:06:27.000Z
|
homeassistant/components/sensor/buienradar.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:02:40.000Z
|
2022-03-12T00:52:16.000Z
|
homeassistant/components/sensor/buienradar.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 3
|
2018-08-29T19:26:20.000Z
|
2020-01-19T11:58:22.000Z
|
"""
Support for Buienradar.nl weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.buienradar/
"""
import asyncio
from datetime import datetime, timedelta
import logging
import async_timeout
import aiohttp
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE,
CONF_MONITORED_CONDITIONS, CONF_NAME, TEMP_CELSIUS)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
async_track_point_in_utc_time)
from homeassistant.util import dt as dt_util
REQUIREMENTS = ['buienradar==0.91']
_LOGGER = logging.getLogger(__name__)
MEASURED_LABEL = 'Measured'
TIMEFRAME_LABEL = 'Timeframe'
SYMBOL = 'symbol'
# Schedule next call after (minutes):
SCHEDULE_OK = 10
# When an error occurred, new call after (minutes):
SCHEDULE_NOK = 2
# Supported sensor types:
# Key: ['label', unit, icon]
SENSOR_TYPES = {
'stationname': ['Stationname', None, None],
'condition': ['Condition', None, None],
'conditioncode': ['Condition code', None, None],
'conditiondetailed': ['Detailed condition', None, None],
'conditionexact': ['Full condition', None, None],
'symbol': ['Symbol', None, None],
'humidity': ['Humidity', '%', 'mdi:water-percent'],
'temperature': ['Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'groundtemperature': ['Ground temperature', TEMP_CELSIUS,
'mdi:thermometer'],
'windspeed': ['Wind speed', 'm/s', 'mdi:weather-windy'],
'windforce': ['Wind force', 'Bft', 'mdi:weather-windy'],
'winddirection': ['Wind direction', None, 'mdi:compass-outline'],
'windazimuth': ['Wind direction azimuth', '°', 'mdi:compass-outline'],
'pressure': ['Pressure', 'hPa', 'mdi:gauge'],
'visibility': ['Visibility', 'm', None],
'windgust': ['Wind gust', 'm/s', 'mdi:weather-windy'],
'precipitation': ['Precipitation', 'mm/h', 'mdi:weather-pouring'],
'irradiance': ['Irradiance', 'W/m2', 'mdi:sunglasses'],
'precipitation_forecast_average': ['Precipitation forecast average',
'mm/h', 'mdi:weather-pouring'],
'precipitation_forecast_total': ['Precipitation forecast total',
'mm', 'mdi:weather-pouring'],
'temperature_1d': ['Temperature 1d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_2d': ['Temperature 2d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_3d': ['Temperature 3d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_4d': ['Temperature 4d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_5d': ['Temperature 5d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_1d': ['Minimum temperature 1d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_2d': ['Minimum temperature 2d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_3d': ['Minimum temperature 3d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_4d': ['Minimum temperature 4d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_5d': ['Minimum temperature 5d', TEMP_CELSIUS, 'mdi:thermometer'],
'rain_1d': ['Rain 1d', 'mm', 'mdi:weather-pouring'],
'rain_2d': ['Rain 2d', 'mm', 'mdi:weather-pouring'],
'rain_3d': ['Rain 3d', 'mm', 'mdi:weather-pouring'],
'rain_4d': ['Rain 4d', 'mm', 'mdi:weather-pouring'],
'rain_5d': ['Rain 5d', 'mm', 'mdi:weather-pouring'],
'snow_1d': ['Snow 1d', 'cm', 'mdi:snowflake'],
'snow_2d': ['Snow 2d', 'cm', 'mdi:snowflake'],
'snow_3d': ['Snow 3d', 'cm', 'mdi:snowflake'],
'snow_4d': ['Snow 4d', 'cm', 'mdi:snowflake'],
'snow_5d': ['Snow 5d', 'cm', 'mdi:snowflake'],
'rainchance_1d': ['Rainchance 1d', '%', 'mdi:weather-pouring'],
'rainchance_2d': ['Rainchance 2d', '%', 'mdi:weather-pouring'],
'rainchance_3d': ['Rainchance 3d', '%', 'mdi:weather-pouring'],
'rainchance_4d': ['Rainchance 4d', '%', 'mdi:weather-pouring'],
'rainchance_5d': ['Rainchance 5d', '%', 'mdi:weather-pouring'],
'sunchance_1d': ['Sunchance 1d', '%', 'mdi:weather-partlycloudy'],
'sunchance_2d': ['Sunchance 2d', '%', 'mdi:weather-partlycloudy'],
'sunchance_3d': ['Sunchance 3d', '%', 'mdi:weather-partlycloudy'],
'sunchance_4d': ['Sunchance 4d', '%', 'mdi:weather-partlycloudy'],
'sunchance_5d': ['Sunchance 5d', '%', 'mdi:weather-partlycloudy'],
'windforce_1d': ['Wind force 1d', 'Bft', 'mdi:weather-windy'],
'windforce_2d': ['Wind force 2d', 'Bft', 'mdi:weather-windy'],
'windforce_3d': ['Wind force 3d', 'Bft', 'mdi:weather-windy'],
'windforce_4d': ['Wind force 4d', 'Bft', 'mdi:weather-windy'],
'windforce_5d': ['Wind force 5d', 'Bft', 'mdi:weather-windy'],
'condition_1d': ['Condition 1d', None, None],
'condition_2d': ['Condition 2d', None, None],
'condition_3d': ['Condition 3d', None, None],
'condition_4d': ['Condition 4d', None, None],
'condition_5d': ['Condition 5d', None, None],
'conditioncode_1d': ['Condition code 1d', None, None],
'conditioncode_2d': ['Condition code 2d', None, None],
'conditioncode_3d': ['Condition code 3d', None, None],
'conditioncode_4d': ['Condition code 4d', None, None],
'conditioncode_5d': ['Condition code 5d', None, None],
'conditiondetailed_1d': ['Detailed condition 1d', None, None],
'conditiondetailed_2d': ['Detailed condition 2d', None, None],
'conditiondetailed_3d': ['Detailed condition 3d', None, None],
'conditiondetailed_4d': ['Detailed condition 4d', None, None],
'conditiondetailed_5d': ['Detailed condition 5d', None, None],
'conditionexact_1d': ['Full condition 1d', None, None],
'conditionexact_2d': ['Full condition 2d', None, None],
'conditionexact_3d': ['Full condition 3d', None, None],
'conditionexact_4d': ['Full condition 4d', None, None],
'conditionexact_5d': ['Full condition 5d', None, None],
'symbol_1d': ['Symbol 1d', None, None],
'symbol_2d': ['Symbol 2d', None, None],
'symbol_3d': ['Symbol 3d', None, None],
'symbol_4d': ['Symbol 4d', None, None],
'symbol_5d': ['Symbol 5d', None, None],
}
CONF_TIMEFRAME = 'timeframe'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS,
default=['symbol', 'temperature']): vol.All(
cv.ensure_list, vol.Length(min=1),
[vol.In(SENSOR_TYPES.keys())]),
vol.Inclusive(CONF_LATITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.longitude,
vol.Optional(CONF_TIMEFRAME, default=60):
vol.All(vol.Coerce(int), vol.Range(min=5, max=120)),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Create the buienradar sensor."""
from homeassistant.components.weather.buienradar import DEFAULT_TIMEFRAME
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
timeframe = config.get(CONF_TIMEFRAME, DEFAULT_TIMEFRAME)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in HomeAssistant config")
return False
coordinates = {CONF_LATITUDE: float(latitude),
CONF_LONGITUDE: float(longitude)}
_LOGGER.debug("Initializing buienradar sensor coordinate %s, timeframe %s",
coordinates, timeframe)
dev = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
dev.append(BrSensor(sensor_type, config.get(CONF_NAME, 'br'),
coordinates))
async_add_entities(dev)
data = BrData(hass, coordinates, timeframe, dev)
# schedule the first update in 1 minute from now:
await data.schedule_update(1)
class BrSensor(Entity):
"""Representation of an Buienradar sensor."""
def __init__(self, sensor_type, client_name, coordinates):
"""Initialize the sensor."""
from buienradar.buienradar import (PRECIPITATION_FORECAST, CONDITION)
self.client_name = client_name
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self._entity_picture = None
self._attribution = None
self._measured = None
self._stationname = None
self._unique_id = self.uid(coordinates)
# All continuous sensors should be forced to be updated
self._force_update = self.type != SYMBOL and \
not self.type.startswith(CONDITION)
if self.type.startswith(PRECIPITATION_FORECAST):
self._timeframe = None
def uid(self, coordinates):
"""Generate a unique id using coordinates and sensor type."""
# The combination of the location, name and sensor type is unique
return "%2.6f%2.6f%s" % (coordinates[CONF_LATITUDE],
coordinates[CONF_LONGITUDE],
self.type)
def load_data(self, data):
"""Load the sensor with relevant data."""
# Find sensor
from buienradar.buienradar import (ATTRIBUTION, CONDITION, CONDCODE,
DETAILED, EXACT, EXACTNL, FORECAST,
IMAGE, MEASURED,
PRECIPITATION_FORECAST, STATIONNAME,
TIMEFRAME)
# Check if we have a new measurement,
# otherwise we do not have to update the sensor
if self._measured == data.get(MEASURED):
return False
self._attribution = data.get(ATTRIBUTION)
self._stationname = data.get(STATIONNAME)
self._measured = data.get(MEASURED)
if self.type.endswith('_1d') or \
self.type.endswith('_2d') or \
self.type.endswith('_3d') or \
self.type.endswith('_4d') or \
self.type.endswith('_5d'):
fcday = 0
if self.type.endswith('_2d'):
fcday = 1
if self.type.endswith('_3d'):
fcday = 2
if self.type.endswith('_4d'):
fcday = 3
if self.type.endswith('_5d'):
fcday = 4
# update all other sensors
if self.type.startswith(SYMBOL) or self.type.startswith(CONDITION):
try:
condition = data.get(FORECAST)[fcday].get(CONDITION)
except IndexError:
_LOGGER.warning("No forecast for fcday=%s...", fcday)
return False
if condition:
new_state = condition.get(CONDITION, None)
if self.type.startswith(SYMBOL):
new_state = condition.get(EXACTNL, None)
if self.type.startswith('conditioncode'):
new_state = condition.get(CONDCODE, None)
if self.type.startswith('conditiondetailed'):
new_state = condition.get(DETAILED, None)
if self.type.startswith('conditionexact'):
new_state = condition.get(EXACT, None)
img = condition.get(IMAGE, None)
if new_state != self._state or img != self._entity_picture:
self._state = new_state
self._entity_picture = img
return True
return False
try:
self._state = data.get(FORECAST)[fcday].get(self.type[:-3])
return True
except IndexError:
_LOGGER.warning("No forecast for fcday=%s...", fcday)
return False
if self.type == SYMBOL or self.type.startswith(CONDITION):
# update weather symbol & status text
condition = data.get(CONDITION, None)
if condition:
if self.type == SYMBOL:
new_state = condition.get(EXACTNL, None)
if self.type == CONDITION:
new_state = condition.get(CONDITION, None)
if self.type == 'conditioncode':
new_state = condition.get(CONDCODE, None)
if self.type == 'conditiondetailed':
new_state = condition.get(DETAILED, None)
if self.type == 'conditionexact':
new_state = condition.get(EXACT, None)
img = condition.get(IMAGE, None)
if new_state != self._state or img != self._entity_picture:
self._state = new_state
self._entity_picture = img
return True
return False
if self.type.startswith(PRECIPITATION_FORECAST):
# update nested precipitation forecast sensors
nested = data.get(PRECIPITATION_FORECAST)
self._timeframe = nested.get(TIMEFRAME)
self._state = nested.get(self.type[len(PRECIPITATION_FORECAST)+1:])
return True
# update all other sensors
self._state = data.get(self.type)
return True
@property
def attribution(self):
"""Return the attribution."""
return self._attribution
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def entity_picture(self):
"""Weather symbol if type is symbol."""
return self._entity_picture
@property
def device_state_attributes(self):
"""Return the state attributes."""
from buienradar.buienradar import (PRECIPITATION_FORECAST)
if self.type.startswith(PRECIPITATION_FORECAST):
result = {ATTR_ATTRIBUTION: self._attribution}
if self._timeframe is not None:
result[TIMEFRAME_LABEL] = "%d min" % (self._timeframe)
return result
result = {
ATTR_ATTRIBUTION: self._attribution,
SENSOR_TYPES['stationname'][0]: self._stationname,
}
if self._measured is not None:
# convert datetime (Europe/Amsterdam) into local datetime
local_dt = dt_util.as_local(self._measured)
result[MEASURED_LABEL] = local_dt.strftime("%c")
return result
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return possible sensor specific icon."""
return SENSOR_TYPES[self.type][2]
@property
def force_update(self):
"""Return true for continuous sensors, false for discrete sensors."""
return self._force_update
class BrData:
"""Get the latest data and updates the states."""
def __init__(self, hass, coordinates, timeframe, devices):
"""Initialize the data object."""
self.devices = devices
self.data = {}
self.hass = hass
self.coordinates = coordinates
self.timeframe = timeframe
async def update_devices(self):
"""Update all devices/sensors."""
if self.devices:
tasks = []
# Update all devices
for dev in self.devices:
if dev.load_data(self.data):
tasks.append(dev.async_update_ha_state())
if tasks:
await asyncio.wait(tasks, loop=self.hass.loop)
async def schedule_update(self, minute=1):
"""Schedule an update after minute minutes."""
_LOGGER.debug("Scheduling next update in %s minutes.", minute)
nxt = dt_util.utcnow() + timedelta(minutes=minute)
async_track_point_in_utc_time(self.hass, self.async_update,
nxt)
async def get_data(self, url):
"""Load data from specified url."""
from buienradar.buienradar import (CONTENT,
MESSAGE, STATUS_CODE, SUCCESS)
_LOGGER.debug("Calling url: %s...", url)
result = {SUCCESS: False, MESSAGE: None}
resp = None
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(10, loop=self.hass.loop):
resp = await websession.get(url)
result[STATUS_CODE] = resp.status
result[CONTENT] = await resp.text()
if resp.status == 200:
result[SUCCESS] = True
else:
result[MESSAGE] = "Got http statuscode: %d" % (resp.status)
return result
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
result[MESSAGE] = "%s" % err
return result
finally:
if resp is not None:
await resp.release()
async def async_update(self, *_):
"""Update the data from buienradar."""
from buienradar.buienradar import (parse_data, CONTENT,
DATA, MESSAGE, STATUS_CODE, SUCCESS)
content = await self.get_data('http://xml.buienradar.nl')
if not content.get(SUCCESS, False):
content = await self.get_data('http://api.buienradar.nl')
if content.get(SUCCESS) is not True:
# unable to get the data
_LOGGER.warning("Unable to retrieve xml data from Buienradar."
"(Msg: %s, status: %s,)",
content.get(MESSAGE),
content.get(STATUS_CODE),)
# schedule new call
await self.schedule_update(SCHEDULE_NOK)
return
# rounding coordinates prevents unnecessary redirects/calls
rainurl = 'http://gadgets.buienradar.nl/data/raintext/?lat={}&lon={}'
rainurl = rainurl.format(
round(self.coordinates[CONF_LATITUDE], 2),
round(self.coordinates[CONF_LONGITUDE], 2)
)
raincontent = await self.get_data(rainurl)
if raincontent.get(SUCCESS) is not True:
# unable to get the data
_LOGGER.warning("Unable to retrieve raindata from Buienradar."
"(Msg: %s, status: %s,)",
raincontent.get(MESSAGE),
raincontent.get(STATUS_CODE),)
# schedule new call
await self.schedule_update(SCHEDULE_NOK)
return
result = parse_data(content.get(CONTENT),
raincontent.get(CONTENT),
self.coordinates[CONF_LATITUDE],
self.coordinates[CONF_LONGITUDE],
self.timeframe)
_LOGGER.debug("Buienradar parsed data: %s", result)
if result.get(SUCCESS) is not True:
if int(datetime.now().strftime('%H')) > 0:
_LOGGER.warning("Unable to parse data from Buienradar."
"(Msg: %s)",
result.get(MESSAGE),)
await self.schedule_update(SCHEDULE_NOK)
return
self.data = result.get(DATA)
await self.update_devices()
await self.schedule_update(SCHEDULE_OK)
@property
def attribution(self):
"""Return the attribution."""
from buienradar.buienradar import ATTRIBUTION
return self.data.get(ATTRIBUTION)
@property
def stationname(self):
"""Return the name of the selected weatherstation."""
from buienradar.buienradar import STATIONNAME
return self.data.get(STATIONNAME)
@property
def condition(self):
"""Return the condition."""
from buienradar.buienradar import CONDITION
return self.data.get(CONDITION)
@property
def temperature(self):
"""Return the temperature, or None."""
from buienradar.buienradar import TEMPERATURE
try:
return float(self.data.get(TEMPERATURE))
except (ValueError, TypeError):
return None
@property
def pressure(self):
"""Return the pressure, or None."""
from buienradar.buienradar import PRESSURE
try:
return float(self.data.get(PRESSURE))
except (ValueError, TypeError):
return None
@property
def humidity(self):
"""Return the humidity, or None."""
from buienradar.buienradar import HUMIDITY
try:
return int(self.data.get(HUMIDITY))
except (ValueError, TypeError):
return None
@property
def visibility(self):
"""Return the visibility, or None."""
from buienradar.buienradar import VISIBILITY
try:
return int(self.data.get(VISIBILITY))
except (ValueError, TypeError):
return None
@property
def wind_speed(self):
"""Return the windspeed, or None."""
from buienradar.buienradar import WINDSPEED
try:
return float(self.data.get(WINDSPEED))
except (ValueError, TypeError):
return None
@property
def wind_bearing(self):
"""Return the wind bearing, or None."""
from buienradar.buienradar import WINDAZIMUTH
try:
return int(self.data.get(WINDAZIMUTH))
except (ValueError, TypeError):
return None
@property
def forecast(self):
"""Return the forecast data."""
from buienradar.buienradar import FORECAST
return self.data.get(FORECAST)
| 39.338028
| 79
| 0.595641
|
e5800524522c3c4476faf2637092e0bc17a3cc04
| 81
|
py
|
Python
|
quad_ws/src/quad_simulation/quad_simulation/urdf/__init__.py
|
reubenstr/zuko
|
0d6733c1dce6ec16236323b0a2fdbd2a3b488b29
|
[
"MIT"
] | 1
|
2022-03-09T00:37:09.000Z
|
2022-03-09T00:37:09.000Z
|
quad_ws/src/quad_simulation/quad_simulation/urdf/__init__.py
|
reubenstr/zuko
|
0d6733c1dce6ec16236323b0a2fdbd2a3b488b29
|
[
"MIT"
] | null | null | null |
quad_ws/src/quad_simulation/quad_simulation/urdf/__init__.py
|
reubenstr/zuko
|
0d6733c1dce6ec16236323b0a2fdbd2a3b488b29
|
[
"MIT"
] | null | null | null |
import os
def getDataPath():
return os.path.join(os.path.dirname(__file__))
| 16.2
| 50
| 0.728395
|
d016d8895445bc84489daa6747daa419c2b1b433
| 2,501
|
py
|
Python
|
tests/api-rate-limit-test.py
|
caltechlibrary/pindu
|
61a8407a706cfed425d797b9533ec703c5040c09
|
[
"BSD-3-Clause"
] | 16
|
2019-03-15T15:45:00.000Z
|
2022-01-17T18:40:02.000Z
|
tests/api-rate-limit-test.py
|
caltechlibrary/pindu
|
61a8407a706cfed425d797b9533ec703c5040c09
|
[
"BSD-3-Clause"
] | 5
|
2019-03-16T20:39:15.000Z
|
2019-03-17T03:17:27.000Z
|
tests/api-rate-limit-test.py
|
caltechlibrary/pindu
|
61a8407a706cfed425d797b9533ec703c5040c09
|
[
"BSD-3-Clause"
] | 1
|
2019-04-08T15:56:15.000Z
|
2019-04-08T15:56:15.000Z
|
#!/usr/bin/env python3
# =============================================================================
# @file test-api-rate-limit.py
# @brief Test the raw rate limit, without Sidewall
# @author Michael Hucka <mhucka@caltech.edu>
# @license Please see the file named LICENSE in the project directory
# @website https://github.com/caltechlibrary/sidewall
# =============================================================================
import getpass
import os
import requests
import sys
from time import sleep
# Global variables.
# .............................................................................
# Counter.
count = 0
# Utility functions
# .............................................................................
def post_request(url, data = None, headers = None, json = None):
'''Call requests.post(...) with arguments, and update count.'''
global count
count += 1
print('About to make post #{}'.format(count))
if json:
return requests.post(url, json = json)
else:
return requests.post(url, data = data, headers = headers)
def test():
'''Loop until we get a code 429, pause for 90 seconds, and do it again.'''
global count
while True:
resp = post_request('https://app.dimensions.ai/api/dsl.json',
data = 'search publications for "SBML" return publications limit 1',
headers = headers)
if resp.status_code != 200:
print('Status code {} -- pausing for 90 sec ...'.format(resp.status_code))
sleep(90)
print('Done sleeping -- resetting counter and resuming loop')
count = 0
# Main code
# .............................................................................
# Get the login info from the command line.
try:
user = input('Login name: ')
# If it's a tty, use the version that doesn't echo the password.
if sys.stdin.isatty():
password = getpass.getpass('Password: ')
else:
sys.stdout.write('Password: ')
sys.stdout.flush()
password = sys.stdin.readline().rstrip()
except:
print('Quitting')
sys.exit(1)
login = {
'username': user,
'password': password,
}
# Send credentials to Dimensions and get the session token.
resp = post_request('https://app.dimensions.ai/api/auth.json', json = login)
resp.raise_for_status()
headers = {'Authorization': "JWT " + resp.json()['token']}
# Loop repeatedly until the user hits ^C.
test()
| 31.2625
| 96
| 0.534986
|
f8e3c880e21c01baa2276781b6728c345478e137
| 4,843
|
py
|
Python
|
brewerslab-orig-master/fakeheat.py
|
allena29/brewerslabng
|
f47e671971436b7af806b54f6019c5b185d7d194
|
[
"Apache-2.0"
] | 1
|
2020-04-12T10:08:10.000Z
|
2020-04-12T10:08:10.000Z
|
brewerslab-orig-master/fakeheat.py
|
allena29/brewerslabng
|
f47e671971436b7af806b54f6019c5b185d7d194
|
[
"Apache-2.0"
] | 2
|
2021-12-13T20:09:45.000Z
|
2022-03-08T21:09:57.000Z
|
brewerslab-orig-master/fakeheat.py
|
allena29/brewerslabng
|
f47e671971436b7af806b54f6019c5b185d7d194
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import sys
import time
print "starting fake heater"
tempc={'hltSetTemp': False,'mashSetTemp':False,'boilSetTemp':False,'fermSetTemp':False}
tempx={'hltSetTemp': 0,'mashSetTemp':0,'boilSetTemp':0,'fermSetTemp':19.2}
def handleTemp(probeId):
global tempx
if os.path.exists("ipc/fakeelement_%s" %(probeId)):
o=open("ipc/fakeelement_%s" %(probeId))
settemp=float(o.read())
o.close()
if not tempx[ probeId ] == settemp:
tempc[probeId]=True
tempx[probeId]=settemp
try:
a=1
os.unlink("ipc/fakeelement_%s" %(probeId))
except:
pass
print "setting %s to %s " %(probeId,settemp)
def convertTemp(probe,probeid,settemp):
global tempc
settemp="%s" %(settemp)
tmp="%s" %( settemp )
print probeid,tmp
if tmp.count(".") < 1:
tmp2 = "000"
else:
tmp2 = "%s0000 " %(tmp.split(".")[1])
tmp="000%s" %( settemp.split(".")[0])
if settemp < 100:
temp="%s%s" %(tmp[-1:],tmp2[0:3])
else:
temp="%s%s" %(tmp[-2:],tmp2[0:3])
print tempc[probe]
if tempc[probe]:
try:
os.mkdir("ipc/fake1wire/%s/" %(probeid))
except:
pass
o=open("ipc/fake1wire/%s/w1_slave" %(probeid),"w")
o.write("ff ff ff ff ff ff ff ff ff : crc=25 YES\n")
print " writing probe id %s --> %s\n" %(probeid,temp)
o.write("ff ff ff ff ff ff ff ff ff t=%s\n" %(temp))
o.close()
return temp
while 1:
# handle temp reads in ipc files to see if we have been given an override from the gui
handleTemp("hltSetTemp")
handleTemp("hltSetTemp")
handleTemp("mashSetTemp")
handleTemp("fermSetTemp")
handleTemp("boilSetTemp")
# if our elements are on then we increase temps
if not os.path.exists("ipc/relayZoneUseA") and os.path.exists("ipc/relayZoneA") and os.path.exists("ipc/gpioSsrA"):
if tempx['hltSetTemp'] < 95:
if tempx['hltSetTemp'] > 80:
tempx['hltSetTemp']=tempx['hltSetTemp']+0.381
else:
tempx['hltSetTemp']=tempx['hltSetTemp']+3.81
tempc['hltSetTemp']=True
elif not os.path.exists("ipc/relayZoneUseA") and os.path.exists("ipc/relayZoneA") and not os.path.exists("ipc/gpioSsrA"):
if tempx['hltSetTemp'] > 10:
tempx['hltSetTemp']=tempx['hltSetTemp']- 0.2
tempc['hltSetTemp']=True
if not os.path.exists("ipc/relayZoneUseB") and os.path.exists("ipc/relayZoneB") and os.path.exists("ipc/gpioSsrB"):
if tempx['hltSetTemp'] < 95:
if tempx['hltSetTemp'] > 80:
tempx['hltSetTemp']=tempx['hltSetTemp']+0.381
else:
tempx['hltSetTemp']=tempx['hltSetTemp']+3.81
tempc['hltSetTemp']=True
elif not os.path.exists("ipc/relayZoneUseB") and os.path.exists("ipc/relayZoneB") and not os.path.exists("ipc/gpioSsrB"):
if tempx['hltSetTemp'] > 10:
tempx['hltSetTemp']=tempx['hltSetTemp']- 0.2
tempc['hltSetTemp']=True
# Boil Element A on
if os.path.exists("ipc/relayZoneUseA") and os.path.exists("ipc/relayZoneA") and os.path.exists("ipc/gpioSsrA"):
if tempx['boilSetTemp'] < 97:
if tempx['boilSetTemp'] > 80:
tempx['boilSetTemp']=tempx['boilSetTemp']+0.181
else:
tempx['boilSetTemp']=tempx['boilSetTemp']+1.91
tempc['boilSetTemp']=True
if os.path.exists("ipc/relayZoneUseA") and os.path.exists("ipc/relayZoneA") and not os.path.exists("ipc/gpioSsrA"):
if tempx['boilSetTemp'] > 10:
tempx['boilSetTemp']=tempx['boilSetTemp']-0.2
tempc['boilSetTemp']=True
if os.path.exists("ipc/relayZoneUseB") and os.path.exists("ipc/relayZoneB") and os.path.exists("ipc/gpioSsrB"):
if tempx['boilSetTemp'] < 97:
if tempx['boilSetTemp'] > 80:
tempx['boilSetTemp']=tempx['boilSetTemp']+0.181
else:
tempx['boilSetTemp']=tempx['boilSetTemp']+1.91
tempc['boilSetTemp']=True
if os.path.exists("ipc/relayZoneUseB") and os.path.exists("ipc/relayZoneB") and not os.path.exists("ipc/gpioSsrB"):
if tempx['boilSetTemp'] > 10:
tempx['boilSetTemp']=tempx['boilSetTemp']-0.2
tempc['boilSetTemp']=True
if os.path.exists("ipc/swFerm"):
if os.path.exists('ipc/pinfermCool'):
tempx['fermSetTemp']=tempx['fermSetTemp']-0.1
tempc['fermSetTemp']=True
elif os.path.exists('ipc/pinfermHeat'):
tempx['fermSetTemp']=tempx['fermSetTemp']+0.3
tempc['fermSetTemp']=True
elif os.path.exists("ipc/fermdone"):
tempx['fermSetTemp']=tempx['fermSetTemp']-0.02
tempc['fermSetTemp']=True
else:
tempx['fermSetTemp']=tempx['fermSetTemp']+0.02
tempc['fermSetTemp']=True
# now writes out the files
from pitmCfg import *
cfg=pitmCfg()
probeid=cfg.hltProbe
temp=convertTemp( 'hltSetTemp',probeid,tempx['hltSetTemp'] )
probeid=cfg.mashAProbe
temp=convertTemp( 'mashSetTemp',probeid,tempx['mashSetTemp'] )
probeid=cfg.mashBProbe
temp=convertTemp( 'mashSetTemp',probeid, tempx['mashSetTemp'] )
probeid=cfg.boilProbe
temp=convertTemp( 'boilSetTemp',probeid, tempx['boilSetTemp'] )
probeid=cfg.fermProbe
temp=convertTemp( 'fermSetTemp',probeid, tempx['fermSetTemp'] )
time.sleep(2)
| 31.044872
| 122
| 0.683874
|
70d0e39ee30bfbcf2a75c9146ab02618069b136b
| 1,914
|
py
|
Python
|
pyNastran/dev/h5/h5_nastran_io.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/dev/h5/h5_nastran_io.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/dev/h5/h5_nastran_io.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
from collections import OrderedDict
from pyNastran.dev.h5.h5_nastran2 import get_gui_nastran_ugrid
from pyNastran.gui.gui_objects.gui_result import GuiResult
from pyNastran.gui.qt_files.colors import (
RED_FLOAT, BLUE_FLOAT, GREEN_FLOAT, LIGHT_GREEN_FLOAT, PINK_FLOAT, PURPLE_FLOAT,
YELLOW_FLOAT, ORANGE_FLOAT)
class H5NastranIO():
def __init__(self, gui):
self.gui = gui
def get_h5nastran_wildcard_geometry_results_functions(self):
data = ('VSPAero',
'H5Nastran (*.h5)', self.load_h5nastran_geometry,
None, None
)
return data
def load_h5nastran_geometry(self, hdf5_filename, name='main', plot=True, **kwargs):
out = get_gui_nastran_ugrid(
hdf5_filename,
self.gui.grid,
add_property_info=True,
add_material_info=True,
subcases=None, # default=None -> all
modes=None, # default=None -> all
results=None, # default=None -> all,
)
model, ugrid, root, alt_grids, node_ids, element_ids, form, cases = out
self.node_ids = node_ids
ugrid = alt_grids['main']
del alt_grids['main']
for name, ugrid in alt_grids.items():
self.gui.create_alternate_vtk_grid(
name, color=ORANGE_FLOAT, line_width=5, opacity=1., point_size=4,
representation='point', follower_function=None)
if self.gui.alt_grids:
self.gui._add_alt_actors(self.gui.alt_grids)
model_name = name
self.gui.isubcase_name_map = {1: ['OpenVSP', '']}
ID = 1
self.gui.node_ids = node_ids
self.gui.element_ids = element_ids
#form, cases = self._fill_cases(cases, ID, model)
self.gui._finish_results_io2(model_name, form, cases)
#self.gui.grid = ugrid
def _fill_cases(cases, ID, model):
pass
| 36.113208
| 87
| 0.631139
|
a05b0f7b06f2b6826d9ee1964f5fd42da3edd2e4
| 11,917
|
py
|
Python
|
main.py
|
FightingFranky/PKUAutoSubmit
|
012712f299efeac8d65f60ec51e288da0d2cce99
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
FightingFranky/PKUAutoSubmit
|
012712f299efeac8d65f60ec51e288da0d2cce99
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
FightingFranky/PKUAutoSubmit
|
012712f299efeac8d65f60ec51e288da0d2cce99
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import Firefox, Chrome
from selenium import webdriver
from argparse import ArgumentParser
from urllib.parse import quote
import time
import copy
import sys
import os
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
TIMEOUT = 20
TIMESLP = 3
my_sender = '1692484707@qq.com' # 发件人邮箱账号
my_pass = 'fujkixpkjiyhcaji' # 发件人邮箱密码
my_user = '1071273936@qq.com' # 收件人邮箱账号
def mail():
ret = True
try:
cur_time = time.strftime("%d/%m/%Y")
msgRoot = MIMEMultipart('related')
msgRoot['From'] = Header('PKU-AutoSubmit', 'utf-8')
msgRoot['To'] = Header('student', 'utf-8')
subject = cur_time + ' 报备成功!'
msgRoot['Subject'] = Header(subject, 'utf-8')
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
mail_msg = """
<p>自动报备成功!</p>
<p>截图:</p>
<p><img src="cid:image1"></p>
"""
msgAlternative.attach(MIMEText(mail_msg, 'html', 'utf-8'))
# 指定图片为当前目录
fp = open('result.png', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# 定义图片 ID,在 HTML 文本中引用
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是25
server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender, [my_user, ], msgRoot.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret = False
return ret
def login(driver, username, password, failed=0):
if failed == 3:
raise Exception('门户登录失败')
iaaaUrl = 'https://iaaa.pku.edu.cn/iaaa/oauth.jsp'
appName = quote('北京大学校内信息门户新版')
redirectUrl = 'https://portal.pku.edu.cn/portal2017/ssoLogin.do'
driver.get('https://portal.pku.edu.cn/portal2017/')
driver.get(
f'{iaaaUrl}?appID=portal2017&appName={appName}&redirectUrl={redirectUrl}'
)
print('门户登陆中...')
driver.find_element_by_id('user_name').send_keys(username)
time.sleep(TIMESLP)
driver.find_element_by_id('password').send_keys(password)
time.sleep(TIMESLP)
driver.find_element_by_id('logon_button').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.LINK_TEXT, '我知道了')))
except:
pass
else:
driver.find_element_by_link_text('我知道了').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
except:
login(driver, username, password, failed + 1)
else:
print('门户登录成功!')
def go_to_application_out(driver):
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-input__inner')))
def go_to_application_in(driver):
driver.get('https://portal.pku.edu.cn/portal2017/#/bizCenter')
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-select')))
def select_past_out(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"出校")]').click()
time.sleep(TIMESLP)
def select_past_in(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"入校")]').click()
time.sleep(TIMESLP)
def select_in_out(driver, way):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{way}"]').click()
def select_reason(driver, choice):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{choice}"]').click()
def select_campus(driver, campus):
driver.find_elements_by_class_name('el-select')[1].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{campus}"]').click()
def select_destination(driver, destination):
driver.find_elements_by_class_name('el-select')[2].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{destination}"]').click()
def select_district(driver, district):
driver.find_elements_by_class_name('el-select')[3].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{district}"]').click()
def write_reason(driver, reason):
driver.find_element_by_class_name('el-textarea__inner').send_keys(
f'{reason}')
time.sleep(TIMESLP)
def write_track(driver, track):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{track}')
time.sleep(TIMESLP)
def write_street(driver, street):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{street}')
time.sleep(TIMESLP)
def click_check(driver):
driver.find_element_by_class_name('el-checkbox__label').click()
time.sleep(TIMESLP)
def click_inPeking(driver):
driver.find_element_by_class_name('el-radio__inner').click()
time.sleep(TIMESLP)
def submit(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"保存")]').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located(
(By.XPATH, '(//button/span[contains(text(),"提交")])[3]')))
driver.find_element_by_xpath(
'(//button/span[contains(text(),"提交")])[3]').click()
time.sleep(TIMESLP)
def screen_capture(driver):
driver.back()
driver.back()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
driver.find_elements_by_class_name('el-card__body')[1].click()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located(
(By.XPATH, '//button/span[contains(text(),"加载更多")]')))
driver.maximize_window()
time.sleep(0.1)
driver.save_screenshot('result.png')
print('备案历史截图已保存')
def fill_out(driver, campus, reason, destination, track):
print('开始填报出校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '出校')
print('Done')
print('选择校区 ', end='')
select_campus(driver, campus)
print('Done')
print('选择出入校理由 ', end='')
select_reason(driver, '学业')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
print('选择出校目的地 ', end='')
select_destination(driver, destination)
print('Done')
print('填写出校行动轨迹 ', end='')
write_track(driver, track)
print('Done')
click_check(driver)
submit(driver)
print('出校备案填报完毕!')
def fill_in(driver, campus, reason, habitation, district, street):
print('开始填报入校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '入校')
print('Done')
print('选择出入校事由 ', end='')
select_reason(driver, '学业')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
if habitation != '北京':
raise Exception('暂不支持京外入校备案,请手动填写')
print('选择居住地所在区 ', end='')
select_district(driver, district)
print('Done')
print('填写居住地所在街道 ', end='')
write_street(driver, street)
print('Done')
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案填报完毕!')
def new_run(driver, username, password):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
select_past_out(driver)
click_check(driver)
submit(driver)
print('出校备案完成')
print('=================================')
go_to_application_in(driver)
select_past_in(driver)
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案完成')
print('=================================')
screen_capture(driver)
print('=================================')
ret = mail()
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
print('可以愉快的玩耍啦!')
def run(driver, username, password, campus, reason, destination, track,
habitation, district, street):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
fill_out(driver, campus, reason, destination, track)
print('=================================')
go_to_application_in(driver)
fill_in(driver, campus, reason, habitation, district, street)
print('=================================')
screen_capture(driver)
print('=================================')
ret = mail()
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
print('可以愉快的玩耍啦!')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--username', '-u', type=str, help='用户名')
parser.add_argument('--password', '-p', type=str, help='密码')
parser.add_argument('--campus', type=str, help='所在校区, 燕园、万柳、畅春园、圆明园、中关新园', default='燕园')
parser.add_argument('--reason', type=str, help='出校原因, eg. 吃饭', default='回宿舍')
parser.add_argument('--destination', type=str, help='出校目的地, eg. 北京', default='北京')
parser.add_argument('--track', type=str, help='出校轨迹, eg. 畅春园食堂', default='东南门-理教-家园-东门')
parser.add_argument('--habitation', type=str, help='入校前居住地, eg. 北京', default='北京')
parser.add_argument('--district', type=str, help='入校前居住所在区, eg. 海淀区', default='海淀区')
parser.add_argument('--street', type=str, help='入校前居住所在街道, eg. 燕园街道', default='燕园街道')
args = parser.parse_args()
args_public = copy.deepcopy(args)
args_public.password = 'xxxxxxxx'
print('Arguments: {}'.format(args_public))
print('Driver Launching...')
# driver = Firefox()
# driver = Chrome()
if sys.platform == 'darwin': # macOS
phantomjs_path = os.path.join('phantomjs', 'phantomjs-darwin')
elif sys.platform == 'linux': # linux
phantomjs_path = os.path.join('phantomjs', 'phantomjs-linux-x86_64')
else: # windows
phantomjs_path = os.path.join('phantomjs', 'phantomjs-windows.exe')
driver = webdriver.PhantomJS(executable_path=phantomjs_path)
# run(driver, args.username, args.password, args.campus, args.reason,
# args.destination, args.track, args.habitation, args.district,
# args.street)
# new_run(driver, args.username, args.password)
driver.close()
| 30.634961
| 99
| 0.652178
|
88180399d81cc7a74c19afedd76112c4a9f87cfb
| 17,456
|
py
|
Python
|
venv/lib/python3.6/site-packages/alembic/testing/plugin/plugin_base.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/alembic/testing/plugin/plugin_base.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | 1
|
2021-06-01T23:32:38.000Z
|
2021-06-01T23:32:38.000Z
|
venv/lib/python3.6/site-packages/alembic/testing/plugin/plugin_base.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
# plugin/plugin_base.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Testing extensions.
this module is designed to work as a testing-framework-agnostic library,
so that we can continue to support nose and also begin adding new
functionality via py.test.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0
"""
from __future__ import absolute_import
try:
# unitttest has a SkipTest also but pytest doesn't
# honor it unless nose is imported too...
from nose import SkipTest
except ImportError:
from pytest import skip
SkipTest = skip.Exception
import sys
import re
py3k = sys.version_info.major >= 3
if py3k:
import configparser
else:
import ConfigParser as configparser
# late imports
fixtures = None
engines = None
provision = None
exclusions = None
warnings = None
assertions = None
requirements = None
config = None
util = None
file_config = None
logging = None
include_tags = set()
exclude_tags = set()
options = None
def setup_options(make_option):
make_option(
"--log-info",
action="callback",
type="string",
callback=_log,
help="turn on info logging for <LOG> (multiple OK)",
)
make_option(
"--log-debug",
action="callback",
type="string",
callback=_log,
help="turn on debug logging for <LOG> (multiple OK)",
)
make_option(
"--db",
action="append",
type="string",
dest="db",
help="Use prefab database uri. Multiple OK, " "first one is run by default.",
)
make_option(
"--dbs",
action="callback",
zeroarg_callback=_list_dbs,
help="List available prefab dbs",
)
make_option(
"--dburi",
action="append",
type="string",
dest="dburi",
help="Database uri. Multiple OK, " "first one is run by default.",
)
make_option(
"--dropfirst",
action="store_true",
dest="dropfirst",
help="Drop all tables in the target database first",
)
make_option(
"--backend-only",
action="store_true",
dest="backend_only",
help="Run only tests marked with __backend__",
)
make_option(
"--postgresql-templatedb",
type="string",
help="name of template database to use for Postgresql "
"CREATE DATABASE (defaults to current database)",
)
make_option(
"--low-connections",
action="store_true",
dest="low_connections",
help="Use a low number of distinct connections - " "i.e. for Oracle TNS",
)
make_option(
"--write-idents",
type="string",
dest="write_idents",
help="write out generated follower idents to <file>, " "when -n<num> is used",
)
make_option(
"--reversetop",
action="store_true",
dest="reversetop",
default=False,
help="Use a random-ordering set implementation in the ORM "
"(helps reveal dependency issues)",
)
make_option(
"--requirements",
action="callback",
type="string",
callback=_requirements_opt,
help="requirements class for testing, overrides setup.cfg",
)
make_option(
"--with-cdecimal",
action="store_true",
dest="cdecimal",
default=False,
help="Monkeypatch the cdecimal library into Python 'decimal' " "for all tests",
)
make_option(
"--include-tag",
action="callback",
callback=_include_tag,
type="string",
help="Include tests with tag <tag>",
)
make_option(
"--exclude-tag",
action="callback",
callback=_exclude_tag,
type="string",
help="Exclude tests with tag <tag>",
)
make_option(
"--mysql-engine",
action="store",
dest="mysql_engine",
default=None,
help="Use the specified MySQL storage engine for all tables, "
"default is a db-default/InnoDB combo.",
)
def configure_follower(follower_ident):
"""Configure required state for a follower.
This invokes in the parent process and typically includes
database creation.
"""
from alembic.testing import provision
provision.FOLLOWER_IDENT = follower_ident
def memoize_important_follower_config(dict_):
"""Store important configuration we will need to send to a follower.
This invokes in the parent process after normal config is set up.
This is necessary as py.test seems to not be using forking, so we
start with nothing in memory, *but* it isn't running our argparse
callables, so we have to just copy all of that over.
"""
dict_["memoized_config"] = {
"include_tags": include_tags,
"exclude_tags": exclude_tags,
}
def restore_important_follower_config(dict_):
"""Restore important configuration needed by a follower.
This invokes in the follower process.
"""
include_tags.update(dict_["memoized_config"]["include_tags"])
exclude_tags.update(dict_["memoized_config"]["exclude_tags"])
def read_config():
global file_config
file_config = configparser.ConfigParser()
file_config.read(["setup.cfg", "test.cfg"])
def pre_begin(opt):
"""things to set up early, before coverage might be setup."""
global options
options = opt
for fn in pre_configure:
fn(options, file_config)
def set_coverage_flag(value):
options.has_coverage = value
def post_begin():
"""things to set up later, once we know coverage is running."""
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(options, file_config)
# late imports, has to happen after config as well
# as nose plugins like coverage
global util, fixtures, engines, exclusions, assertions, warnings, profiling, config, testing
from alembic.testing import config, warnings, exclusions # noqa
from alembic.testing import engines, fixtures # noqa
from sqlalchemy import util # noqa
warnings.setup_filters()
def _log(opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith("-info"):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith("-debug"):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print("Available --db options (use --dburi to override)")
for macro in sorted(file_config.options("db")):
print("%20s\t%s" % (macro, file_config.get("db", macro)))
sys.exit(0)
def _requirements_opt(opt_str, value, parser):
_setup_requirements(value)
def _exclude_tag(opt_str, value, parser):
exclude_tags.add(value.replace("-", "_"))
def _include_tag(opt_str, value, parser):
include_tags.add(value.replace("-", "_"))
pre_configure = []
post_configure = []
def pre(fn):
pre_configure.append(fn)
return fn
def post(fn):
post_configure.append(fn)
return fn
@pre
def _setup_options(opt, file_config):
global options
options = opt
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import cdecimal
sys.modules["decimal"] = cdecimal
@post
def _engine_uri(options, file_config):
from alembic.testing import config
from alembic.testing import provision
if options.dburi:
db_urls = list(options.dburi)
else:
db_urls = []
if options.db:
for db_token in options.db:
for db in re.split(r"[,\s]+", db_token):
if db not in file_config.options("db"):
raise RuntimeError(
"Unknown URI specifier '%s'. "
"Specify --dbs for known uris." % db
)
else:
db_urls.append(file_config.get("db", db))
if not db_urls:
db_urls.append(file_config.get("db", "default"))
for db_url in db_urls:
if options.write_idents and provision.FOLLOWER_IDENT: # != 'master':
with open(options.write_idents, "a") as file_:
file_.write(provision.FOLLOWER_IDENT + " " + db_url + "\n")
cfg = provision.setup_config(
db_url, options, file_config, provision.FOLLOWER_IDENT
)
if not config._current:
cfg.set_as_current(cfg)
@post
def _requirements(options, file_config):
requirement_cls = file_config.get("sqla_testing", "requirement_cls")
_setup_requirements(requirement_cls)
def _setup_requirements(argument):
from alembic.testing import config
if config.requirements is not None:
return
modname, clsname = argument.split(":")
# importlib.import_module() only introduced in 2.7, a little
# late
mod = __import__(modname)
for component in modname.split(".")[1:]:
mod = getattr(mod, component)
req_cls = getattr(mod, clsname)
config.requirements = req_cls()
@post
def _prep_testing_database(options, file_config):
from alembic.testing import config
from alembic.testing.exclusions import against
from sqlalchemy import schema
from alembic import util
from sqlalchemy import inspect
if options.dropfirst:
for cfg in config.Config.all_configs():
e = cfg.db
inspector = inspect(e)
try:
view_names = inspector.get_view_names()
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(schema._DropView(schema.Table(vname, schema.MetaData())))
if config.requirements.schemas.enabled_for_config(cfg):
try:
view_names = inspector.get_view_names(schema="test_schema")
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(
schema._DropView(
schema.Table(
vname, schema.MetaData(), schema="test_schema"
)
)
)
for tname in reversed(inspector.get_table_names(order_by="foreign_key")):
e.execute(schema.DropTable(schema.Table(tname, schema.MetaData())))
if config.requirements.schemas.enabled_for_config(cfg):
for tname in reversed(
inspector.get_table_names(
order_by="foreign_key", schema="test_schema"
)
):
e.execute(
schema.DropTable(
schema.Table(tname, schema.MetaData(), schema="test_schema")
)
)
if against(cfg, "postgresql") and util.sqla_100:
from sqlalchemy.dialects import postgresql
for enum in inspector.get_enums("*"):
e.execute(
postgresql.DropEnumType(
postgresql.ENUM(name=enum["name"], schema=enum["schema"])
)
)
@post
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm.util import randomize_unitofwork
randomize_unitofwork()
@post
def _post_setup_options(opt, file_config):
from alembic.testing import config
config.options = options
config.file_config = file_config
def want_class(cls):
if not issubclass(cls, fixtures.TestBase):
return False
elif cls.__name__.startswith("_"):
return False
elif config.options.backend_only and not getattr(cls, "__backend__", False):
return False
else:
return True
def want_method(cls, fn):
if not fn.__name__.startswith("test_"):
return False
elif fn.__module__ is None:
return False
elif include_tags:
return (
hasattr(cls, "__tags__")
and exclusions.tags(cls.__tags__).include_test(include_tags, exclude_tags)
) or (
hasattr(fn, "_sa_exclusion_extend")
and fn._sa_exclusion_extend.include_test(include_tags, exclude_tags)
)
elif exclude_tags and hasattr(cls, "__tags__"):
return exclusions.tags(cls.__tags__).include_test(include_tags, exclude_tags)
elif exclude_tags and hasattr(fn, "_sa_exclusion_extend"):
return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags)
else:
return True
def generate_sub_tests(cls, module):
if getattr(cls, "__backend__", False):
for cfg in _possible_configs_for_cls(cls):
orig_name = cls.__name__
# we can have special chars in these names except for the
# pytest junit plugin, which is tripped up by the brackets
# and periods, so sanitize
alpha_name = re.sub(r"[_\[\]\.]+", "_", cfg.name)
alpha_name = re.sub("_+$", "", alpha_name)
name = "%s_%s" % (cls.__name__, alpha_name)
subcls = type(
name,
(cls,),
{"_sa_orig_cls_name": orig_name, "__only_on_config__": cfg},
)
setattr(module, name, subcls)
yield subcls
else:
yield cls
def start_test_class(cls):
_do_skips(cls)
_setup_engine(cls)
def stop_test_class(cls):
# from sqlalchemy import inspect
# assert not inspect(testing.db).get_table_names()
_restore_engine()
def _restore_engine():
config._current.reset()
def _setup_engine(cls):
if getattr(cls, "__engine_options__", None):
eng = engines.testing_engine(options=cls.__engine_options__)
config._current.push_engine(eng)
def before_test(test, test_module_name, test_class, test_name):
pass
def after_test(test):
pass
def _possible_configs_for_cls(cls, reasons=None):
all_configs = set(config.Config.all_configs())
if cls.__unsupported_on__:
spec = exclusions.db_spec(*cls.__unsupported_on__)
for config_obj in list(all_configs):
if spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, "__only_on__", None):
spec = exclusions.db_spec(*util.to_list(cls.__only_on__))
for config_obj in list(all_configs):
if not spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, "__only_on_config__", None):
all_configs.intersection_update([cls.__only_on_config__])
if hasattr(cls, "__requires__"):
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__requires__:
check = getattr(requirements, requirement)
skip_reasons = check.matching_config_reasons(config_obj)
if skip_reasons:
all_configs.remove(config_obj)
if reasons is not None:
reasons.extend(skip_reasons)
break
if hasattr(cls, "__prefer_requires__"):
non_preferred = set()
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__prefer_requires__:
check = getattr(requirements, requirement)
if not check.enabled_for_config(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
return all_configs
def _do_skips(cls):
reasons = []
all_configs = _possible_configs_for_cls(cls, reasons)
if getattr(cls, "__skip_if__", False):
for c in getattr(cls, "__skip_if__"):
if c():
raise SkipTest("'%s' skipped by %s" % (cls.__name__, c.__name__))
if not all_configs:
msg = "'%s' unsupported on any DB implementation %s%s" % (
cls.__name__,
", ".join(
"'%s(%s)+%s'"
% (
config_obj.db.name,
".".join(
str(dig) for dig in config_obj.db.dialect.server_version_info
),
config_obj.db.driver,
)
for config_obj in config.Config.all_configs()
),
", ".join(reasons),
)
raise SkipTest(msg)
elif hasattr(cls, "__prefer_backends__"):
non_preferred = set()
spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__))
for config_obj in all_configs:
if not spec(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if config._current not in all_configs:
_setup_config(all_configs.pop(), cls)
def _setup_config(config_obj, ctx):
config._current.push(config_obj)
| 28.337662
| 96
| 0.610907
|
e6813863dc09412eb63ac6859e7b165769171c28
| 142
|
py
|
Python
|
Code/odooerp/odoo-8.0/openerp/addons/test_impex/tests/__init__.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/openerp/addons/test_impex/tests/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/openerp/addons/test_impex/tests/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
from . import test_export, test_import, test_load
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 23.666667
| 65
| 0.739437
|
dcf99296809e0cfce7fd722a47d82fa664197090
| 1,440
|
py
|
Python
|
pythonbox.py
|
slacke/red-text
|
2e9a492b67392e7379550b336ae9559c4f63c824
|
[
"MIT"
] | 3
|
2021-01-16T15:07:41.000Z
|
2022-03-09T09:24:17.000Z
|
pythonbox.py
|
slacke/pythonbox
|
64bb30a6efa8a28c7eacc919395d8be9ea26942d
|
[
"MIT"
] | null | null | null |
pythonbox.py
|
slacke/pythonbox
|
64bb30a6efa8a28c7eacc919395d8be9ea26942d
|
[
"MIT"
] | null | null | null |
import sys,os
# get current path
pythonbox_path = os.path.dirname(__file__)
customs_path = pythonbox_path + r"\customs"
packages_path = pythonbox_path + r"\packages"
# add path to python sys path
sys.path.append(customs_path)
sys.path.append(packages_path)
print("Modify PYTHONPATH...")
# read pythonbox.ini and add to environment path
if(os.path.exists("pythonbox.ini")):
f = open("pythonbox.ini", "r")
raw_lines = f.read()
f.close()
raw_lines = raw_lines.replace("\r", "")
raw_lines = raw_lines.rstrip("\n ")
raw_lines = raw_lines.split("\n")
os_path_splitted = os.environ["PATH"].split(";")
reading_path = False
for line in raw_lines:
if (len(line) == 0 or line[0] == "*"):
continue
if (len(line) == 0 or line[0] == "["):
if reading_path:
break
if line == "[System environment path]":
reading_path = True
continue
if reading_path:
path_splitted = line.split("\\")
if path_splitted[0] == ".":
path_splitted[0] = pythonbox_path
line = "\\".join(path_splitted)
os_path_splitted.append(line)
os.environ["PATH"] = ";".join(os_path_splitted)
print("Modify system environment PATH")
else:
print("File: pythonbox.ini not found, system environment PATH will not be modified.")
| 34.285714
| 90
| 0.5875
|
9b9d2e70d145a7dd8c06d8dcc58b5f5b66068cfa
| 3,958
|
py
|
Python
|
src/tests/fidl/source_compatibility/gen/scaffolding.py
|
liexusong/fuchsia
|
81897680af92a1848a063e3c20ff3a4892ccff07
|
[
"BSD-2-Clause"
] | 3
|
2021-09-02T07:21:06.000Z
|
2022-03-12T03:20:10.000Z
|
src/tests/fidl/source_compatibility/gen/scaffolding.py
|
liexusong/fuchsia
|
81897680af92a1848a063e3c20ff3a4892ccff07
|
[
"BSD-2-Clause"
] | 56
|
2021-06-03T03:16:25.000Z
|
2022-03-20T01:07:44.000Z
|
src/tests/fidl/source_compatibility/gen/scaffolding.py
|
liexusong/fuchsia
|
81897680af92a1848a063e3c20ff3a4892ccff07
|
[
"BSD-2-Clause"
] | 2
|
2022-02-25T12:22:49.000Z
|
2022-03-12T03:20:10.000Z
|
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Functions responsible for actually writing out source files, driven by the
generate_test.py code.
"""
import datetime
import os
from pathlib import Path
from types_ import (HLCPP, LLCPP, RUST, DART, GO)
year = datetime.datetime.now().year
fuchsia_copyright = '''
// Copyright {year} The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'''.format(year=year).strip()
fidl_file = '''
{lib_decl};
// [START contents]
// INSERT FIDL HERE
// [END contents]
'''
hlcpp_init = '''
#include <fidl/test/{library_name}/cpp/fidl.h> // nogncheck
namespace fidl_test = fidl::test::{library_name};
// [START contents]
// INSERT TEST CODE HERE
// [END contents]
int main(int argc, const char** argv) {{ return 0; }}
'''
llcpp_init = '''
#include <fidl/test/{library_name}/llcpp/fidl.h> // nogncheck
namespace fidl_test = fidl_test::{library_name};
// [START contents]
// INSERT TEST CODE HERE
// [END contents]
int main(int argc, const char** argv) {{ return 0; }}
'''
rust_init = '''
#![allow(dead_code)]
use fidl_fidl_test_{library_name} as fidl_lib;
// [START contents]
// INSERT TEST CODE HERE
// [END contents]
fn main() {{}}
'''
go_init = '''
// +build !build_with_native_toolchain
package main
import (
lib "fidl/fidl/test/{library_name}"
"syscall/zx/fidl"
)
// [START contents]
// INSERT TEST CODE HERE
// [END contents]
func main() {{}}
'''
dart_init = '''
import 'package:fidl_fidl_test_{library_name}/fidl_async.dart' as fidllib;
// [START contents]
// INSERT TEST CODE HERE
// [END contents]
'''
init_by_binding = {
HLCPP: hlcpp_init,
LLCPP: llcpp_init,
RUST: rust_init,
GO: go_init,
DART: dart_init,
}
gn_template = f'# Copyright {year}' + ''' The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//src/tests/fidl/source_compatibility/fidl_source_compatibility.gni")
source_compatibility_test("{library_name}") {{
json = "test.json"
sidecar = "test_gn_sidecar.json"
}}
group("tests") {{
deps = [ ":{library_name}" ]
}}
'''
def initialize_fidl(path: Path, library_name: str):
os.makedirs(path.parent, exist_ok=True)
initialize_file(path, get_fidl(library_name))
def initialize_src(path: Path, binding: str, fidl_library_name: str):
os.makedirs(path.parent, exist_ok=True)
initialize_file(path, get_src(binding, fidl_library_name))
def initialize_file(path: Path, contents: str):
if path.exists():
return
with open(path, 'w') as f:
f.write(contents)
block_on_prompt(
f'Add starter code to {path.name} as desired, then press enter to continue '
)
def add_file(src_dir, prev: str, curr: str):
if (src_dir / curr).exists():
return
with open(src_dir / prev, 'r') as previous_f:
with open(src_dir / curr, 'w+') as current_f:
contents = previous_f.read()
current_f.write(contents)
block_on_prompt(f'Modify {curr} as desired, then press enter to continue ')
def get_fidl(library_name: str) -> str:
return fuchsia_copyright + fidl_file.format(
lib_decl=fidl_lib_decl(library_name))
def fidl_lib_decl(library_name: str) -> str:
return f'library fidl.test.{library_name}'
def get_src(binding: str, library_name: str) -> str:
return fuchsia_copyright + init_by_binding[binding].format(
library_name=library_name)
def block_on_prompt(prompt: str):
""" Prints the prompt, and blocks on user input, then clears the prompt. """
input(prompt)
# clear the prompt
move_cursor_up_n(2)
print('\r' + ' ' * len(prompt), end='\r')
def move_cursor_up_n(n: int):
print(f'\033[{n}A')
| 22.617143
| 84
| 0.683931
|
efe826f9128f5e9ba85859ea625e6cd154f25d7e
| 224
|
py
|
Python
|
submissions/arc024/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/arc024/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/arc024/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
import sys
input = sys.stdin.readline
L, R= map(int, input().split())
l = list(map(int, input().split()))
r = list(map(int, input().split()))
ans = 0
for i in l:
if i in r:
r.remove(i)
ans += 1
print(ans)
| 14.933333
| 35
| 0.566964
|
7f40af94eed4c69feac235dac0359f4933bd827e
| 630
|
py
|
Python
|
huobi/service/wallet/get_account_withdraw_address.py
|
xujunhuii/huobi_Python
|
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
|
[
"Apache-2.0"
] | null | null | null |
huobi/service/wallet/get_account_withdraw_address.py
|
xujunhuii/huobi_Python
|
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
|
[
"Apache-2.0"
] | null | null | null |
huobi/service/wallet/get_account_withdraw_address.py
|
xujunhuii/huobi_Python
|
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
|
[
"Apache-2.0"
] | null | null | null |
from huobi.connection.restapi_sync_client import RestApiSyncClient
from huobi.constant import *
from huobi.model.wallet import *
from huobi.utils import *
class GetAccountWithdrawAddressService:
def __init__(self, params):
self.params = params
def request(self, **kwargs):
channel = "/v2/account/withdraw/address"
def parse(dict_data):
data_list = dict_data.get("data", [])
return default_parse_list_dict(data_list, ChainWithdrawAddress)
return RestApiSyncClient(**kwargs).request_process(
HttpMethod.GET_SIGN, channel, self.params, parse
)
| 30
| 75
| 0.696825
|
0f5f568e55cfa4aa27360728d9e6b15a02e059d2
| 39,098
|
py
|
Python
|
vtgs/v1.1/vtgs_trx_file.py
|
vt-rocksat-2017/waveforms
|
a44638ad79744007cf58aaf54f5d9517742004cc
|
[
"MIT"
] | null | null | null |
vtgs/v1.1/vtgs_trx_file.py
|
vt-rocksat-2017/waveforms
|
a44638ad79744007cf58aaf54f5d9517742004cc
|
[
"MIT"
] | null | null | null |
vtgs/v1.1/vtgs_trx_file.py
|
vt-rocksat-2017/waveforms
|
a44638ad79744007cf58aaf54f5d9517742004cc
|
[
"MIT"
] | 1
|
2020-04-30T05:03:34.000Z
|
2020-04-30T05:03:34.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: VTGS Rocksat-X 2017 Transceiver v2.0
# Generated: Sat Aug 12 12:10:38 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from grc_gnuradio import blks2 as grc_blks2
from optparse import OptionParser
import kiss
import mapper
import pmt,struct,numpy,math ; from datetime import datetime as dt; import string
import pyqt
import sip
import sys
import vtgs
from gnuradio import qtgui
class vtgs_trx_file(gr.top_block, Qt.QWidget):
def __init__(self, gs_name='VTGS', ip='0.0.0.0', meta_rate=.1, port='52003', record_iq=0, record_rfo=0, record_snr=0, tx_freq=1265e6, tx_offset=250e3):
gr.top_block.__init__(self, "VTGS Rocksat-X 2017 Transceiver v2.0")
Qt.QWidget.__init__(self)
self.setWindowTitle("VTGS Rocksat-X 2017 Transceiver v2.0")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "vtgs_trx_file")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Parameters
##################################################
self.gs_name = gs_name
self.ip = ip
self.meta_rate = meta_rate
self.port = port
self.record_iq = record_iq
self.record_rfo = record_rfo
self.record_snr = record_snr
self.tx_freq = tx_freq
self.tx_offset = tx_offset
##################################################
# Variables
##################################################
self.ts_str = ts_str = dt.strftime(dt.utcnow(), "%Y%m%d_%H%M%S.%f" )+'_UTC'
self.samp_rate = samp_rate = 500e3
self.baud = baud = 125e3
self.snr_fn = snr_fn = "{:s}_{:s}.snr".format(gs_name, ts_str)
self.samps_per_symb = samps_per_symb = int(samp_rate/baud)
self.rx_freq = rx_freq = 2395e6
self.rfo_fn = rfo_fn = "{:s}_{:s}.rfo".format(gs_name, ts_str)
self.iq_fn = iq_fn = "{:s}_{:s}_{:s}k.fc32".format(gs_name, ts_str, str(int(samp_rate)/1000))
self.alpha = alpha = 0.5
self.uplink_label = uplink_label = ''
self.tx_gain = tx_gain = 25
self.tx_correct = tx_correct = 2000
self.snr_fp = snr_fp = "/captures/rocksat/{:s}".format(snr_fn)
self.rx_offset = rx_offset = 250e3
self.rx_gain = rx_gain = 1
self.rx_freq_lbl = rx_freq_lbl = "{:4.3f}".format(rx_freq/1e6)
self.rrc_filter_taps = rrc_filter_taps = firdes.root_raised_cosine(32, 1.0, 1.0/(samps_per_symb*32), alpha, samps_per_symb*32)
self.rfo_fp = rfo_fp = "/captures/rocksat/{:s}".format(rfo_fn)
self.mult = mult = (samp_rate)/2/3.141593
self.lpf_taps = lpf_taps = firdes.low_pass(1.0, samp_rate, samp_rate/2, 1000, firdes.WIN_HAMMING, 6.76)
self.lo = lo = 1833e6
self.khz_offset = khz_offset = 0
self.iq_fp = iq_fp = "/captures/rocksat/{:s}".format(iq_fn)
self.bb_gain = bb_gain = .75
##################################################
# Blocks
##################################################
self._khz_offset_range = Range(-150, 150, 1, 0, 200)
self._khz_offset_win = RangeWidget(self._khz_offset_range, self.set_khz_offset, 'Offset [kHz]', "counter_slider", float)
self.top_grid_layout.addWidget(self._khz_offset_win, 4,8,1,4)
self._bb_gain_range = Range(0, 1, .01, .75, 200)
self._bb_gain_win = RangeWidget(self._bb_gain_range, self.set_bb_gain, 'bb_gain', "counter_slider", float)
self.top_grid_layout.addWidget(self._bb_gain_win, 11,8,1,4)
self.vtgs_mult_descrambler_0 = vtgs.mult_descrambler(17, 0x3FFFF)
self.vtgs_ao40_decoder_0_0 = vtgs.ao40_decoder()
self._uplink_label_tool_bar = Qt.QToolBar(self)
if None:
self._uplink_label_formatter = None
else:
self._uplink_label_formatter = lambda x: str(x)
self._uplink_label_tool_bar.addWidget(Qt.QLabel('TX MSG'+": "))
self._uplink_label_label = Qt.QLabel(str(self._uplink_label_formatter(self.uplink_label)))
self._uplink_label_tool_bar.addWidget(self._uplink_label_label)
self.top_grid_layout.addWidget(self._uplink_label_tool_bar, 9,8,1,1)
self._tx_gain_range = Range(0, 86, 1, 25, 200)
self._tx_gain_win = RangeWidget(self._tx_gain_range, self.set_tx_gain, 'TX Gain', "counter_slider", float)
self.top_grid_layout.addWidget(self._tx_gain_win, 10,8,1,4)
self._tx_correct_range = Range(-10000, 10000, 1, 2000, 200)
self._tx_correct_win = RangeWidget(self._tx_correct_range, self.set_tx_correct, "tx_correct", "counter_slider", float)
self.top_grid_layout.addWidget(self._tx_correct_win, 12,8,1,4)
self._rx_gain_range = Range(0, 86, 1, 1, 200)
self._rx_gain_win = RangeWidget(self._rx_gain_range, self.set_rx_gain, 'RX Gain', "counter_slider", float)
self.top_grid_layout.addWidget(self._rx_gain_win, 3,8,1,4)
self._rx_freq_lbl_tool_bar = Qt.QToolBar(self)
if None:
self._rx_freq_lbl_formatter = None
else:
self._rx_freq_lbl_formatter = lambda x: str(x)
self._rx_freq_lbl_tool_bar.addWidget(Qt.QLabel('RX Freq [MHz]'+": "))
self._rx_freq_lbl_label = Qt.QLabel(str(self._rx_freq_lbl_formatter(self.rx_freq_lbl)))
self._rx_freq_lbl_tool_bar.addWidget(self._rx_freq_lbl_label)
self.top_grid_layout.addWidget(self._rx_freq_lbl_tool_bar, 0,10,1,2)
self.rational_resampler_xxx_2 = filter.rational_resampler_ccc(
interpolation=1,
decimation=10,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_1 = filter.rational_resampler_ccc(
interpolation=1,
decimation=8,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0 = filter.rational_resampler_ccc(
interpolation=1,
decimation=8,
taps=None,
fractional_bw=None,
)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
4096, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
'', #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0.enable_grid(True)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not False:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['pre-d', 'post', '', '', '',
'', '', '', '', '']
colors = [0, 1, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-130, -20)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_win, 5,0,4,8)
self.qtgui_number_sink_2 = qtgui.number_sink(
gr.sizeof_float,
0,
qtgui.NUM_GRAPH_HORIZ,
1
)
self.qtgui_number_sink_2.set_update_time(0.10)
self.qtgui_number_sink_2.set_title("")
labels = ['EVM', '', '', '', '',
'', '', '', '', '']
units = ['', '', '', '', '',
'', '', '', '', '']
colors = [("blue", "red"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"),
("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black")]
factor = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
for i in xrange(1):
self.qtgui_number_sink_2.set_min(i, -1)
self.qtgui_number_sink_2.set_max(i, 1)
self.qtgui_number_sink_2.set_color(i, colors[i][0], colors[i][1])
if len(labels[i]) == 0:
self.qtgui_number_sink_2.set_label(i, "Data {0}".format(i))
else:
self.qtgui_number_sink_2.set_label(i, labels[i])
self.qtgui_number_sink_2.set_unit(i, units[i])
self.qtgui_number_sink_2.set_factor(i, factor[i])
self.qtgui_number_sink_2.enable_autoscale(False)
self._qtgui_number_sink_2_win = sip.wrapinstance(self.qtgui_number_sink_2.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_number_sink_2_win, 2,8,1,4)
self.qtgui_number_sink_0_0_0_0 = qtgui.number_sink(
gr.sizeof_float,
0,
qtgui.NUM_GRAPH_HORIZ,
1
)
self.qtgui_number_sink_0_0_0_0.set_update_time(0.10)
self.qtgui_number_sink_0_0_0_0.set_title("")
labels = ['SNR', '', '', '', '',
'', '', '', '', '']
units = ['dB', '', '', '', '',
'', '', '', '', '']
colors = [("blue", "red"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"),
("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black")]
factor = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
for i in xrange(1):
self.qtgui_number_sink_0_0_0_0.set_min(i, 0)
self.qtgui_number_sink_0_0_0_0.set_max(i, 30)
self.qtgui_number_sink_0_0_0_0.set_color(i, colors[i][0], colors[i][1])
if len(labels[i]) == 0:
self.qtgui_number_sink_0_0_0_0.set_label(i, "Data {0}".format(i))
else:
self.qtgui_number_sink_0_0_0_0.set_label(i, labels[i])
self.qtgui_number_sink_0_0_0_0.set_unit(i, units[i])
self.qtgui_number_sink_0_0_0_0.set_factor(i, factor[i])
self.qtgui_number_sink_0_0_0_0.enable_autoscale(False)
self._qtgui_number_sink_0_0_0_0_win = sip.wrapinstance(self.qtgui_number_sink_0_0_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_number_sink_0_0_0_0_win, 1,8,1,4)
self.qtgui_number_sink_0 = qtgui.number_sink(
gr.sizeof_float,
0,
qtgui.NUM_GRAPH_NONE,
1
)
self.qtgui_number_sink_0.set_update_time(0.10)
self.qtgui_number_sink_0.set_title("")
labels = ['RX Freq Offset', 'SNR', '', '', '',
'', '', '', '', '']
units = ['Hz', 'dB', '', '', '',
'', '', '', '', '']
colors = [("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"),
("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black")]
factor = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
for i in xrange(1):
self.qtgui_number_sink_0.set_min(i, -1)
self.qtgui_number_sink_0.set_max(i, 1)
self.qtgui_number_sink_0.set_color(i, colors[i][0], colors[i][1])
if len(labels[i]) == 0:
self.qtgui_number_sink_0.set_label(i, "Data {0}".format(i))
else:
self.qtgui_number_sink_0.set_label(i, labels[i])
self.qtgui_number_sink_0.set_unit(i, units[i])
self.qtgui_number_sink_0.set_factor(i, factor[i])
self.qtgui_number_sink_0.enable_autoscale(False)
self._qtgui_number_sink_0_win = sip.wrapinstance(self.qtgui_number_sink_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_number_sink_0_win, 0,8,1,2)
self.qtgui_freq_sink_x_1 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/10, #bw
"TX Spectrum", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_1.set_update_time(0.10)
self.qtgui_freq_sink_x_1.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_1.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1.enable_autoscale(True)
self.qtgui_freq_sink_x_1.enable_grid(False)
self.qtgui_freq_sink_x_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_1.enable_axis_labels(True)
self.qtgui_freq_sink_x_1.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_1.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_1.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_1_win, 9,0,4,8)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
1024*4, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate , #bw
"", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.0010)
self.qtgui_freq_sink_x_0.set_y_axis(-140, -20)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(0.2)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['pre-d', 'post', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_win, 0,0,5,8)
self.qtgui_const_sink_x_0 = qtgui.const_sink_c(
1024, #size
"", #name
1 #number of inputs
)
self.qtgui_const_sink_x_0.set_update_time(0.10)
self.qtgui_const_sink_x_0.set_y_axis(-1, 1)
self.qtgui_const_sink_x_0.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0.enable_autoscale(False)
self.qtgui_const_sink_x_0.enable_grid(True)
self.qtgui_const_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_const_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_win, 5,8,4,4)
self.pyqt_text_input_0 = pyqt.text_input()
self._pyqt_text_input_0_win = self.pyqt_text_input_0;
self.top_grid_layout.addWidget(self._pyqt_text_input_0_win, 9,9,1,3)
self.mapper_demapper_soft_0 = mapper.demapper_soft(mapper.BPSK, ([0,1]))
self.low_pass_filter_0_0 = filter.fir_filter_ccf(1, firdes.low_pass(
1, samp_rate, (baud *(1+alpha) )/2, 1000, firdes.WIN_HAMMING, 6.76))
self.kiss_hdlc_framer_0 = kiss.hdlc_framer(preamble_bytes=64, postamble_bytes=16)
self.freq_xlating_fir_filter_xxx_0 = filter.freq_xlating_fir_filter_ccc(1, (lpf_taps), khz_offset*1000, samp_rate)
self.digital_scrambler_bb_0 = digital.scrambler_bb(0x21, 0x0, 16)
self.digital_pfb_clock_sync_xxx_0_0 = digital.pfb_clock_sync_ccf(samps_per_symb, math.pi*2/100, (rrc_filter_taps), 32, 16, 1.5, 1)
self.digital_gmsk_mod_0 = digital.gmsk_mod(
samples_per_symbol=50,
bt=alpha,
verbose=False,
log=False,
)
self.digital_diff_decoder_bb_0 = digital.diff_decoder_bb(2)
self.digital_costas_loop_cc_0_0 = digital.costas_loop_cc(math.pi*2/100, 2, False)
self.digital_costas_loop_cc_0 = digital.costas_loop_cc(math.pi*2/100, 2, False)
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_socket_pdu_0_2 = blocks.socket_pdu("UDP_SERVER", ip, '52002', 1024, False)
self.blocks_socket_pdu_0_1 = blocks.socket_pdu("TCP_SERVER", ip, port, 1024, False)
self.blocks_pdu_to_tagged_stream_0_0 = blocks.pdu_to_tagged_stream(blocks.byte_t, 'packet_len')
self.blocks_pack_k_bits_bb_0 = blocks.pack_k_bits_bb(8)
self.blocks_null_sink_0_0_0 = blocks.null_sink(gr.sizeof_float*1)
self.blocks_null_sink_0_0 = blocks.null_sink(gr.sizeof_float*1)
self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_gr_complex*1)
self.blocks_nlog10_ff_0_1 = blocks.nlog10_ff(10, 1, 0)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_multiply_const_vxx_0_0 = blocks.multiply_const_vcc((bb_gain, ))
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((mult, ))
self.blocks_moving_average_xx_0_0_1 = blocks.moving_average_ff(100000, 0.00001, 4000)
self.blocks_moving_average_xx_0_0 = blocks.moving_average_ff(1000, 0.001, 4000)
self.blocks_moving_average_xx_0 = blocks.moving_average_ff(100000, 0.00001, 4000)
self.blocks_keep_one_in_n_0_0 = blocks.keep_one_in_n(gr.sizeof_float*1, int(samp_rate*meta_rate))
self.blocks_keep_one_in_n_0 = blocks.keep_one_in_n(gr.sizeof_float*1, int(samp_rate/8*meta_rate))
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, '/captures/rocksat/testing/trimmed_500k.fc32', True)
self.blocks_file_sink_1_0 = blocks.file_sink(gr.sizeof_float*1, rfo_fp, False)
self.blocks_file_sink_1_0.set_unbuffered(False)
self.blocks_file_sink_1 = blocks.file_sink(gr.sizeof_float*1, snr_fp, False)
self.blocks_file_sink_1.set_unbuffered(False)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_gr_complex*1, iq_fp, False)
self.blocks_file_sink_0.set_unbuffered(False)
self.blocks_divide_xx_0 = blocks.divide_ff(1)
self.blocks_complex_to_mag_squared_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
self.blocks_add_const_vxx_0 = blocks.add_const_vff((-1, ))
self.blks2_selector_0_0_0 = grc_blks2.selector(
item_size=gr.sizeof_float*1,
num_inputs=1,
num_outputs=2,
input_index=0,
output_index=int(record_snr),
)
self.blks2_selector_0_0 = grc_blks2.selector(
item_size=gr.sizeof_float*1,
num_inputs=1,
num_outputs=2,
input_index=0,
output_index=int(record_rfo),
)
self.blks2_selector_0 = grc_blks2.selector(
item_size=gr.sizeof_gr_complex*1,
num_inputs=1,
num_outputs=2,
input_index=0,
output_index=int(record_iq),
)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, 125e3, 1, 0)
self.analog_agc2_xx_0_0 = analog.agc2_cc(1e-3, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0_0.set_max_gain(65536)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_socket_pdu_0_1, 'pdus'), (self.kiss_hdlc_framer_0, 'in'))
self.msg_connect((self.blocks_socket_pdu_0_2, 'pdus'), (self.kiss_hdlc_framer_0, 'in'))
self.msg_connect((self.kiss_hdlc_framer_0, 'out'), (self.blocks_pdu_to_tagged_stream_0_0, 'pdus'))
self.msg_connect((self.pyqt_text_input_0, 'pdus'), (self.kiss_hdlc_framer_0, 'in'))
self.msg_connect((self.vtgs_ao40_decoder_0_0, 'valid_frames'), (self.blocks_socket_pdu_0_1, 'pdus'))
self.connect((self.analog_agc2_xx_0_0, 0), (self.digital_costas_loop_cc_0_0, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blks2_selector_0, 1), (self.blocks_file_sink_0, 0))
self.connect((self.blks2_selector_0, 0), (self.blocks_null_sink_0, 0))
self.connect((self.blks2_selector_0_0, 1), (self.blocks_file_sink_1_0, 0))
self.connect((self.blks2_selector_0_0, 0), (self.blocks_null_sink_0_0, 0))
self.connect((self.blks2_selector_0_0_0, 1), (self.blocks_file_sink_1, 0))
self.connect((self.blks2_selector_0_0_0, 0), (self.blocks_null_sink_0_0_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.qtgui_number_sink_2, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.blocks_moving_average_xx_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_divide_xx_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0, 0), (self.blocks_divide_xx_0, 1))
self.connect((self.blocks_divide_xx_0, 0), (self.blocks_nlog10_ff_0_1, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_keep_one_in_n_0, 0), (self.blks2_selector_0_0_0, 0))
self.connect((self.blocks_keep_one_in_n_0_0, 0), (self.blks2_selector_0_0, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.blocks_keep_one_in_n_0_0, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.qtgui_number_sink_0, 0))
self.connect((self.blocks_moving_average_xx_0_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.blocks_moving_average_xx_0_0_1, 0), (self.blocks_keep_one_in_n_0, 0))
self.connect((self.blocks_moving_average_xx_0_0_1, 0), (self.qtgui_number_sink_0_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.blocks_moving_average_xx_0, 0))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.rational_resampler_xxx_2, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.rational_resampler_xxx_1, 0))
self.connect((self.blocks_nlog10_ff_0_1, 0), (self.blocks_moving_average_xx_0_0_1, 0))
self.connect((self.blocks_pack_k_bits_bb_0, 0), (self.digital_gmsk_mod_0, 0))
self.connect((self.blocks_pdu_to_tagged_stream_0_0, 0), (self.digital_scrambler_bb_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.blks2_selector_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.freq_xlating_fir_filter_xxx_0, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.digital_diff_decoder_bb_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.mapper_demapper_soft_0, 0))
self.connect((self.digital_costas_loop_cc_0, 0), (self.qtgui_const_sink_x_0, 0))
self.connect((self.digital_costas_loop_cc_0_0, 1), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.digital_costas_loop_cc_0_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.digital_costas_loop_cc_0_0, 0), (self.low_pass_filter_0_0, 0))
self.connect((self.digital_costas_loop_cc_0_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.vtgs_mult_descrambler_0, 0))
self.connect((self.digital_gmsk_mod_0, 0), (self.blocks_multiply_const_vxx_0_0, 0))
self.connect((self.digital_pfb_clock_sync_xxx_0_0, 0), (self.digital_costas_loop_cc_0, 0))
self.connect((self.digital_scrambler_bb_0, 0), (self.blocks_pack_k_bits_bb_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.analog_agc2_xx_0_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.qtgui_waterfall_sink_x_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.digital_pfb_clock_sync_xxx_0_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.qtgui_freq_sink_x_0, 1))
self.connect((self.mapper_demapper_soft_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.blocks_complex_to_mag_squared_0_0, 0))
self.connect((self.rational_resampler_xxx_2, 0), (self.qtgui_freq_sink_x_1, 0))
self.connect((self.vtgs_mult_descrambler_0, 0), (self.vtgs_ao40_decoder_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "vtgs_trx_file")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_gs_name(self):
return self.gs_name
def set_gs_name(self, gs_name):
self.gs_name = gs_name
self.set_snr_fn("{:s}_{:s}.snr".format(self.gs_name, self.ts_str))
self.set_rfo_fn("{:s}_{:s}.rfo".format(self.gs_name, self.ts_str))
self.set_iq_fn("{:s}_{:s}_{:s}k.fc32".format(self.gs_name, self.ts_str, str(int(self.samp_rate)/1000)))
def get_ip(self):
return self.ip
def set_ip(self, ip):
self.ip = ip
def get_meta_rate(self):
return self.meta_rate
def set_meta_rate(self, meta_rate):
self.meta_rate = meta_rate
self.blocks_keep_one_in_n_0_0.set_n(int(self.samp_rate*self.meta_rate))
self.blocks_keep_one_in_n_0.set_n(int(self.samp_rate/8*self.meta_rate))
def get_port(self):
return self.port
def set_port(self, port):
self.port = port
def get_record_iq(self):
return self.record_iq
def set_record_iq(self, record_iq):
self.record_iq = record_iq
self.blks2_selector_0.set_output_index(int(int(self.record_iq)))
def get_record_rfo(self):
return self.record_rfo
def set_record_rfo(self, record_rfo):
self.record_rfo = record_rfo
self.blks2_selector_0_0.set_output_index(int(int(self.record_rfo)))
def get_record_snr(self):
return self.record_snr
def set_record_snr(self, record_snr):
self.record_snr = record_snr
self.blks2_selector_0_0_0.set_output_index(int(int(self.record_snr)))
def get_tx_freq(self):
return self.tx_freq
def set_tx_freq(self, tx_freq):
self.tx_freq = tx_freq
def get_tx_offset(self):
return self.tx_offset
def set_tx_offset(self, tx_offset):
self.tx_offset = tx_offset
def get_ts_str(self):
return self.ts_str
def set_ts_str(self, ts_str):
self.ts_str = ts_str
self.set_snr_fn("{:s}_{:s}.snr".format(self.gs_name, self.ts_str))
self.set_rfo_fn("{:s}_{:s}.rfo".format(self.gs_name, self.ts_str))
self.set_iq_fn("{:s}_{:s}_{:s}k.fc32".format(self.gs_name, self.ts_str, str(int(self.samp_rate)/1000)))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_samps_per_symb(int(self.samp_rate/self.baud))
self.set_mult((self.samp_rate)/2/3.141593)
self.qtgui_waterfall_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_1.set_frequency_range(0, self.samp_rate/10)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate )
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate, (self.baud *(1+self.alpha) )/2, 1000, firdes.WIN_HAMMING, 6.76))
self.set_iq_fn("{:s}_{:s}_{:s}k.fc32".format(self.gs_name, self.ts_str, str(int(self.samp_rate)/1000)))
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.blocks_keep_one_in_n_0_0.set_n(int(self.samp_rate*self.meta_rate))
self.blocks_keep_one_in_n_0.set_n(int(self.samp_rate/8*self.meta_rate))
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
def get_baud(self):
return self.baud
def set_baud(self, baud):
self.baud = baud
self.set_samps_per_symb(int(self.samp_rate/self.baud))
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate, (self.baud *(1+self.alpha) )/2, 1000, firdes.WIN_HAMMING, 6.76))
def get_snr_fn(self):
return self.snr_fn
def set_snr_fn(self, snr_fn):
self.snr_fn = snr_fn
self.set_snr_fp("/captures/rocksat/{:s}".format(self.snr_fn))
def get_samps_per_symb(self):
return self.samps_per_symb
def set_samps_per_symb(self, samps_per_symb):
self.samps_per_symb = samps_per_symb
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
self.set_rx_freq_lbl(self._rx_freq_lbl_formatter("{:4.3f}".format(self.rx_freq/1e6)))
def get_rfo_fn(self):
return self.rfo_fn
def set_rfo_fn(self, rfo_fn):
self.rfo_fn = rfo_fn
self.set_rfo_fp("/captures/rocksat/{:s}".format(self.rfo_fn))
def get_iq_fn(self):
return self.iq_fn
def set_iq_fn(self, iq_fn):
self.iq_fn = iq_fn
self.set_iq_fp("/captures/rocksat/{:s}".format(self.iq_fn))
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate, (self.baud *(1+self.alpha) )/2, 1000, firdes.WIN_HAMMING, 6.76))
def get_uplink_label(self):
return self.uplink_label
def set_uplink_label(self, uplink_label):
self.uplink_label = uplink_label
Qt.QMetaObject.invokeMethod(self._uplink_label_label, "setText", Qt.Q_ARG("QString", self.uplink_label))
def get_tx_gain(self):
return self.tx_gain
def set_tx_gain(self, tx_gain):
self.tx_gain = tx_gain
def get_tx_correct(self):
return self.tx_correct
def set_tx_correct(self, tx_correct):
self.tx_correct = tx_correct
def get_snr_fp(self):
return self.snr_fp
def set_snr_fp(self, snr_fp):
self.snr_fp = snr_fp
self.blocks_file_sink_1.open(self.snr_fp)
def get_rx_offset(self):
return self.rx_offset
def set_rx_offset(self, rx_offset):
self.rx_offset = rx_offset
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
def get_rx_freq_lbl(self):
return self.rx_freq_lbl
def set_rx_freq_lbl(self, rx_freq_lbl):
self.rx_freq_lbl = rx_freq_lbl
Qt.QMetaObject.invokeMethod(self._rx_freq_lbl_label, "setText", Qt.Q_ARG("QString", self.rx_freq_lbl))
def get_rrc_filter_taps(self):
return self.rrc_filter_taps
def set_rrc_filter_taps(self, rrc_filter_taps):
self.rrc_filter_taps = rrc_filter_taps
self.digital_pfb_clock_sync_xxx_0_0.update_taps((self.rrc_filter_taps))
def get_rfo_fp(self):
return self.rfo_fp
def set_rfo_fp(self, rfo_fp):
self.rfo_fp = rfo_fp
self.blocks_file_sink_1_0.open(self.rfo_fp)
def get_mult(self):
return self.mult
def set_mult(self, mult):
self.mult = mult
self.blocks_multiply_const_vxx_0.set_k((self.mult, ))
def get_lpf_taps(self):
return self.lpf_taps
def set_lpf_taps(self, lpf_taps):
self.lpf_taps = lpf_taps
self.freq_xlating_fir_filter_xxx_0.set_taps((self.lpf_taps))
def get_lo(self):
return self.lo
def set_lo(self, lo):
self.lo = lo
def get_khz_offset(self):
return self.khz_offset
def set_khz_offset(self, khz_offset):
self.khz_offset = khz_offset
self.freq_xlating_fir_filter_xxx_0.set_center_freq(self.khz_offset*1000)
def get_iq_fp(self):
return self.iq_fp
def set_iq_fp(self, iq_fp):
self.iq_fp = iq_fp
self.blocks_file_sink_0.open(self.iq_fp)
def get_bb_gain(self):
return self.bb_gain
def set_bb_gain(self, bb_gain):
self.bb_gain = bb_gain
self.blocks_multiply_const_vxx_0_0.set_k((self.bb_gain, ))
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--gs-name", dest="gs_name", type="string", default='VTGS',
help="Set gs_name [default=%default]")
parser.add_option(
"-a", "--ip", dest="ip", type="string", default='0.0.0.0',
help="Set 0.0.0.0 [default=%default]")
parser.add_option(
"", "--meta-rate", dest="meta_rate", type="eng_float", default=eng_notation.num_to_str(.1),
help="Set meta_rate [default=%default]")
parser.add_option(
"-p", "--port", dest="port", type="string", default='52003',
help="Set 52003 [default=%default]")
parser.add_option(
"", "--record-iq", dest="record_iq", type="intx", default=0,
help="Set record_iq [default=%default]")
parser.add_option(
"", "--record-rfo", dest="record_rfo", type="intx", default=0,
help="Set record_rfo [default=%default]")
parser.add_option(
"", "--record-snr", dest="record_snr", type="intx", default=0,
help="Set record_snr [default=%default]")
parser.add_option(
"", "--tx-freq", dest="tx_freq", type="eng_float", default=eng_notation.num_to_str(1265e6),
help="Set tx_freq [default=%default]")
parser.add_option(
"", "--tx-offset", dest="tx_offset", type="eng_float", default=eng_notation.num_to_str(250e3),
help="Set tx_offset [default=%default]")
return parser
def main(top_block_cls=vtgs_trx_file, options=None):
if options is None:
options, _ = argument_parser().parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls(gs_name=options.gs_name, ip=options.ip, meta_rate=options.meta_rate, port=options.port, record_iq=options.record_iq, record_rfo=options.record_rfo, record_snr=options.record_snr, tx_freq=options.tx_freq, tx_offset=options.tx_offset)
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| 45.304751
| 255
| 0.640877
|
c790b97615e2a41170f1dfe1d138d73030454665
| 2,367
|
py
|
Python
|
tests/test_visitors/test_ast/test_exceptions/test_duplicate_exceptions.py
|
cdhiraj40/wemake-python-styleguide
|
7cef9be081d594c30045b7a98cae77a9be46e1aa
|
[
"MIT"
] | 1,931
|
2018-03-17T13:52:45.000Z
|
2022-03-27T09:39:17.000Z
|
tests/test_visitors/test_ast/test_exceptions/test_duplicate_exceptions.py
|
amansr02/wemake-python-styleguide
|
681035ed21fbe28ebfb32b8807b98e8de76b64aa
|
[
"MIT"
] | 2,231
|
2018-03-09T21:19:05.000Z
|
2022-03-31T08:35:37.000Z
|
tests/test_visitors/test_ast/test_exceptions/test_duplicate_exceptions.py
|
amansr02/wemake-python-styleguide
|
681035ed21fbe28ebfb32b8807b98e8de76b64aa
|
[
"MIT"
] | 492
|
2018-05-18T21:20:28.000Z
|
2022-03-20T14:11:50.000Z
|
import pytest
from wemake_python_styleguide.violations.best_practices import (
DuplicateExceptionViolation,
)
from wemake_python_styleguide.visitors.ast.exceptions import (
WrongTryExceptVisitor,
)
# Correct:
correct_bare_except = """
try:
...
except:
...
"""
correct_simple_except = """
try:
...
except Exception:
...
"""
correct_simple_except_with_name = """
try:
...
except Exception as ex:
...
"""
correct_two_exceptions = """
try:
...
except (IndexError, ValueError):
...
"""
correct_two_exceptions_with_names = """
try:
...
except (IndexError, ValueError) as ex:
...
"""
correct_two_excepts = """
try:
...
except ValueError:
...
except IndexError:
...
"""
correct_two_excepts_with_names = """
try:
...
except ValueError as ex:
...
except IndexError as ex:
...
"""
correct_two_complex_excepts = """
try:
...
except ValueError as ex:
...
except (IndexError, model.DoesNotExist) as ex:
...
"""
# Wrong:
wrong_simple = """
try:
...
except ValueError as ex:
...
except ValueError:
...
"""
wrong_single_tuple = """
try:
...
except (some.ValueError, some.ValueError) as ex:
...
"""
wrong_different_tuples = """
try:
...
except (exc['type'], ValueError) as ex:
...
except (exc['type'], IndexError):
...
"""
@pytest.mark.parametrize('code', [
correct_bare_except,
correct_simple_except,
correct_simple_except_with_name,
correct_two_exceptions,
correct_two_exceptions_with_names,
correct_two_excepts,
correct_two_excepts_with_names,
correct_two_complex_excepts,
])
def test_correct_exceptions(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Violations without duplicates."""
tree = parse_ast_tree(code)
visitor = WrongTryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
wrong_simple,
wrong_single_tuple,
wrong_different_tuples,
])
def test_duplicate_exceptions(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Duplicate exception classes should raise a violation."""
tree = parse_ast_tree(code)
visitor = WrongTryExceptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [DuplicateExceptionViolation])
| 16.324138
| 64
| 0.666244
|
0dac5ba15c7a842fc7b191ecf676879c24c43fbd
| 1,478
|
py
|
Python
|
Code/tcga_normalizedDataToPickle.py
|
HalforcNull/Research_PatternRecognition
|
e9cbe8df75ae775e0ed813ac4956973b4e857979
|
[
"MIT"
] | null | null | null |
Code/tcga_normalizedDataToPickle.py
|
HalforcNull/Research_PatternRecognition
|
e9cbe8df75ae775e0ed813ac4956973b4e857979
|
[
"MIT"
] | null | null | null |
Code/tcga_normalizedDataToPickle.py
|
HalforcNull/Research_PatternRecognition
|
e9cbe8df75ae775e0ed813ac4956973b4e857979
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 06 15:36:31 2017
@author: runan.yao
"""
import csv
import pickle
import numpy as np
def __loadData(dataFile, isNumericData = False):
data = []
with open(dataFile, 'rt') as csvfile:
datas = csv.reader(csvfile, delimiter = ',')
for row in datas:
if row is None or len(row) == 0:
continue
if isNumericData:
data.append(map(float,row))
else:
data.append(row)
return data
def fileLoad():
DataSet = {}
DataList = __loadData('tcga_data.csv', isNumericData = True)
LabelList = __loadData('tcga_label.csv')
#LabelList = __loadData('label.csv')
for i in range(0, 9662):
lb = LabelList[i][0]
#if lb in ['small', 'minor', 'whole']:
# meaning less label are removed from the tests
# continue
if lb in DataSet.keys():
DataSet[lb].append(DataList[i])
else:
DataSet[lb] = [DataList[i]]
return DataSet
def normalization(sample):
"""one sample pass in"""
sample = sample + 100
# 2^20 = 1048576
return np.log2(sample * 1048576/np.sum(sample))
DataSet = fileLoad()
for label in DataSet.keys():
with open('./data/tcga/pickledNormalizedData/'+label+'.pkl', 'wb') as tr:
t = np.array(DataSet[label]).astype(np.float)
pickle.dump(normalization(t), tr, pickle.HIGHEST_PROTOCOL)
| 25.929825
| 77
| 0.577131
|
1e2ff05b6031910294b63cb8e0e4e15add184a2f
| 9,845
|
py
|
Python
|
airflow/providers/opsgenie/operators/opsgenie.py
|
holly-evans/airflow
|
865406cbab4defd35c95afbf0a8d5987ff7788b1
|
[
"Apache-2.0"
] | 3
|
2017-08-06T23:19:48.000Z
|
2018-05-11T05:50:25.000Z
|
airflow/providers/opsgenie/operators/opsgenie.py
|
holly-evans/airflow
|
865406cbab4defd35c95afbf0a8d5987ff7788b1
|
[
"Apache-2.0"
] | 20
|
2016-11-21T22:54:28.000Z
|
2018-10-28T20:03:13.000Z
|
airflow/providers/opsgenie/operators/opsgenie.py
|
holly-evans/airflow
|
865406cbab4defd35c95afbf0a8d5987ff7788b1
|
[
"Apache-2.0"
] | 3
|
2016-10-13T09:59:18.000Z
|
2017-02-22T11:29:09.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from airflow.models import BaseOperator
from airflow.providers.opsgenie.hooks.opsgenie import OpsgenieAlertHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class OpsgenieCreateAlertOperator(BaseOperator):
"""
This operator allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieCreateAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param message: The Message of the Opsgenie alert (templated)
:param alias: Client-defined identifier of the alert (templated)
:param description: Description field of the alert (templated)
:param responders: Teams, users, escalations and schedules that
the alert will be routed to send notifications.
:param visible_to: Teams and users that the alert will become visible
to without sending any notification.
:param actions: Custom actions that will be available for the alert.
:param tags: Tags of the alert.
:param details: Map of key-value pairs to use as custom properties of the alert.
:param entity: Entity field of the alert that is
generally used to specify which domain alert is related to. (templated)
:param source: Source field of the alert. Default value is
IP address of the incoming request.
:param priority: Priority level of the alert. Default value is P3. (templated)
:param user: Display name of the request owner.
:param note: Additional note that will be added while creating the alert. (templated)
"""
template_fields: Sequence[str] = ('message', 'alias', 'description', 'entity', 'priority', 'note')
def __init__(
self,
*,
message: str,
opsgenie_conn_id: str = 'opsgenie_default',
alias: Optional[str] = None,
description: Optional[str] = None,
responders: Optional[List[dict]] = None,
visible_to: Optional[List[dict]] = None,
actions: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
details: Optional[dict] = None,
entity: Optional[str] = None,
source: Optional[str] = None,
priority: Optional[str] = None,
user: Optional[str] = None,
note: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.message = message
self.opsgenie_conn_id = opsgenie_conn_id
self.alias = alias
self.description = description
self.responders = responders
self.visible_to = visible_to
self.actions = actions
self.tags = tags
self.details = details
self.entity = entity
self.source = source
self.priority = priority
self.user = user
self.note = note
self.hook: Optional[OpsgenieAlertHook] = None
def _build_opsgenie_payload(self) -> Dict[str, Any]:
"""
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in [
"message",
"alias",
"description",
"responders",
"visible_to",
"actions",
"tags",
"details",
"entity",
"source",
"priority",
"user",
"note",
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload
def execute(self, context: 'Context') -> None:
"""Call the OpsgenieAlertHook to post message"""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.create_alert(self._build_opsgenie_payload())
class OpsgenieCloseAlertOperator(BaseOperator):
"""
This operator allows you to close alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieCloseAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: display name of the request owner
:param note: additional note that will be added while creating the alert
:param source: source field of the alert. Default value is IP address of the incoming request
:param close_alert_kwargs: additional params to pass
"""
def __init__(
self,
*,
identifier: str,
opsgenie_conn_id: str = 'opsgenie_default',
identifier_type: Optional[str] = None,
user: Optional[str] = None,
note: Optional[str] = None,
source: Optional[str] = None,
close_alert_kwargs: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.opsgenie_conn_id = opsgenie_conn_id
self.identifier = identifier
self.identifier_type = identifier_type
self.user = user
self.note = note
self.source = source
self.close_alert_kwargs = close_alert_kwargs
self.hook: Optional[OpsgenieAlertHook] = None
def _build_opsgenie_close_alert_payload(self) -> Dict[str, Any]:
"""
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie close alert payload (dict) to send
"""
payload = {}
for key in [
"user",
"note",
"source",
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload
def execute(self, context: 'Context') -> None:
"""Call the OpsgenieAlertHook to close alert"""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.close_alert(
identifier=self.identifier,
identifier_type=self.identifier_type,
payload=self._build_opsgenie_close_alert_payload(),
**(self.close_alert_kwargs or {}),
)
class OpsgenieDeleteAlertOperator(BaseOperator):
"""
This operator allows you to delete alerts in Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieDeleteAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: Display name of the request owner
:param source: Display name of the request source
"""
template_fields: Sequence[str] = ('identifier',)
def __init__(
self,
*,
identifier: str,
opsgenie_conn_id: str = 'opsgenie_default',
identifier_type: Optional[str] = None,
user: Optional[str] = None,
source: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.opsgenie_conn_id = opsgenie_conn_id
self.identifier = identifier
self.identifier_type = identifier_type
self.user = user
self.source = source
def execute(self, context: 'Context') -> None:
"""Call the OpsgenieAlertHook to delete alert"""
hook = OpsgenieAlertHook(self.opsgenie_conn_id)
hook.delete_alert(
identifier=self.identifier,
identifier_type=self.identifier_type,
user=self.user,
source=self.source,
)
| 37.291667
| 102
| 0.653834
|
d2cca1eac96d2fdcbea1a96bf2232aca7392fc91
| 4,437
|
py
|
Python
|
aws-functions/HttpTriggerApiUsersUseridTasks/lambda_function.py
|
jaguiniga/ad440-winter2021-thursday-repo
|
d099b06488e7a866af5f4bf4f776b32807233994
|
[
"Apache-2.0"
] | 4
|
2021-01-08T02:01:53.000Z
|
2021-01-09T23:06:55.000Z
|
aws-functions/HttpTriggerApiUsersUseridTasks/lambda_function.py
|
jaguiniga/ad440-winter2021-thursday-repo
|
d099b06488e7a866af5f4bf4f776b32807233994
|
[
"Apache-2.0"
] | 242
|
2021-01-08T02:41:43.000Z
|
2021-03-28T20:18:43.000Z
|
aws-functions/HttpTriggerApiUsersUseridTasks/lambda_function.py
|
jaguiniga/ad440-winter2021-thursday-repo
|
d099b06488e7a866af5f4bf4f776b32807233994
|
[
"Apache-2.0"
] | 45
|
2021-01-08T02:44:21.000Z
|
2021-03-26T01:36:53.000Z
|
import os
import json
import logging
import pymysql
import datetime
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def default(dateHandle):
if isinstance(dateHandle, (datetime.datetime, datetime.date)):
return dateHandle.isoformat()
def lambda_handler(event, context):
logger.info('Beginning function execution...')
method = event['requestContext']['http']['method']
userId = event['pathParameters']['userId']
# Connect to the db
db_name = os.environ['rds_db_name']
logger.info(f'Attempting to connect to DB: {db_name}')
try:
conn = pymysql.connect(
host = os.environ['rds_hostname'],
user = os.environ['rds_username'],
password = os.environ['rds_password'],
database = os.environ['rds_db_name'],
connect_timeout = 5
)
except pymysql.MySQLError as e:
logger.error('ERROR: Unexpected error: Could not connect to MySQL instance.')
logger.error(e)
return {'statusCode': 500}
logger.info('Connected to DB successfully!')
# Respond to the method
try:
if method == 'GET':
logger.info('Attempting to retrieve tasks...')
all_tasks_http_response = get_tasks(conn, userId)
logging.info(all_tasks_http_response)
logger.info('tasks retrieved successfully!')
return all_tasks_http_response
elif method == 'POST':
logging.info('Attempting to add task...')
task_req_body = json.loads(event['body'])
new_task_id_http_response = add_task(conn, task_req_body, userId)
logging.info('task added successfully!')
return new_task_id_http_response
else:
logging.warn(f'Request with method {method} has been recieved, but that is not allowed for this endpoint')
return {'statusCode': 405}
except Exception as e:
return {
'error': f'Error: {str(e)}',
'statusCode': 500
}
finally:
conn.close()
logging.debug('Connection to DB closed')
logger.info('Function execution completed successfully!')
def get_tasks(conn, userId):
with conn.cursor() as cursor:
cursor.execute("SELECT * FROM tasks WHERE userId = %s", (userId))
tasks_table = list(cursor.fetchall())
tasks = []
tasks_columns = [column[0] for column in cursor.description]
for task in tasks_table:
tasks.append(dict(zip(tasks_columns, task)))
return json.dumps(tasks, default=default)
def add_task(conn, task_req_body, userId):
logging.info('Testing the add new task request body for necessary fields...')
try:
assert "title" in task_req_body, "New task request body did not contain field: 'title'"
assert "description" in task_req_body, "New task request body did not contain field: 'description'"
except AssertionError as task_req_body_content_error:
logging.critical('New task request body did not contain the necessary fields!')
return {
'error': task_req_body_content_error.args[0],
'statusCode': 400
}
logging.info('New task request body contains all the necessary fields!')
with conn.cursor() as cursor:
# Unpack task data
title = task_req_body['title']
description = task_req_body['description']
task_params = [userId, title, description]
# Create the query
add_task_query = """
INSERT INTO tasks (userId, title, description)
VALUES(%s, %s, %s);
"""
logging.info('Using connection cursor to execute query (add a new task and get id)')
cursor.execute(add_task_query, task_params)
conn.commit()
# Get the task id from cursor
task_id = cursor.lastrowid
logging.info(
f'task added and new task id ({task_id}) retrieved, returning information from add_task function'
)
return {'taskId': task_id}
| 32.152174
| 119
| 0.577868
|
a23fafe4fdff351e2562841a3066cbefb75ed74d
| 106
|
py
|
Python
|
tests/integration/client/__init__.py
|
jaebradley/draftkings_client
|
8db4484f293df3c65c48d62d972b71df95f5ea3d
|
[
"MIT"
] | 111
|
2017-01-07T13:32:00.000Z
|
2022-03-07T22:58:11.000Z
|
tests/integration/client/__init__.py
|
jaebradley/draftkings_client
|
8db4484f293df3c65c48d62d972b71df95f5ea3d
|
[
"MIT"
] | 56
|
2016-11-14T05:50:44.000Z
|
2022-01-18T23:27:44.000Z
|
tests/integration/client/__init__.py
|
jaebradley/draftkings_client
|
8db4484f293df3c65c48d62d972b71df95f5ea3d
|
[
"MIT"
] | 39
|
2017-01-25T01:57:09.000Z
|
2021-12-29T06:57:31.000Z
|
"""
Represents "end-to-end" tests that test the public API definition of the draft_kings.client file.
"""
| 26.5
| 97
| 0.745283
|
0fbcdf471d64d6d8bbccb514788b32e8dd14f8f7
| 45,068
|
py
|
Python
|
tencent-fusai/src/utils.py
|
tangaqi/tencent2019-rank88
|
9b2295e6405fc0958527c509bf1623ea5b67e6b7
|
[
"Apache-2.0"
] | null | null | null |
tencent-fusai/src/utils.py
|
tangaqi/tencent2019-rank88
|
9b2295e6405fc0958527c509bf1623ea5b67e6b7
|
[
"Apache-2.0"
] | null | null | null |
tencent-fusai/src/utils.py
|
tangaqi/tencent2019-rank88
|
9b2295e6405fc0958527c509bf1623ea5b67e6b7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@file:utils.py
@time:2019/6/1 21:57
@author:Tangj
@software:Pycharm
@Desc
"""
import os
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
import random
import pandas as pd
def frame_to_dict(train):
train_dict = {}
for col in train.columns:
train_dict[col] = train[col].columns
return trian_dict
def del_adSize(ad_Size):
ad_size_mean = []
ad_size_max = []
ad_size_min = []
for adSize in ad_Size:
if not isinstance(adSize, str):
# print(adSize)
ad_size_mean.append(adSize)
ad_size_max.append(adSize)
ad_size_min.append(adSize)
continue
size = adSize.split(',')
s = []
for i in size:
s.append(int(i))
ad_size_mean.append(np.mean(s))
ad_size_max.append(np.max(s))
ad_size_min.append(np.min(s))
return ad_size_mean, ad_size_max, ad_size_max
def write_data_into_parts(data, root_path, nums=5100000):
l = data.shape[0] // nums
for i in range(l + 1):
begin = i * nums
end = min(nums * (i + 1), data.shape[0])
t_data = data[begin:end]
t_data.tofile(root_path + '.bin')
def write_dict(path, data):
fw = open(path, 'w')
for key in data:
fw.write(str(key) + ',' + str(data[key]) + '\n')
fw.close()
def read_allfea(path):
f = open(path, 'r')
fea = '0'
for i in f:
fea = i
fea_val = fea.split(',')
index_dict = {}
for i, fea in enumerate(fea_val):
index_dict[fea] = i + 1
if '-1' not in index_dict:
index_dict['-1'] = len(fea_val)
return fea, index_dict
def one_hot_feature_concat(train, test, fea1, fea2, filter_num=100):
train1 = train[fea1].values
train2 = train[fea2].values
test1 = test[fea1].values
test2 = test[fea2].values
train_data = []
test_data = []
train_res = []
test_res = []
for i, values in enumerate(train1):
new = str(values) + '|' + str(train2[i])
train_data.append(new)
for i, values in enumerate(test1):
new = str(values) + '|' + str(test2[i])
# print(new)
test_data.append(new)
count_dict = {}
for d in train_data:
if d not in count_dict:
count_dict[d] = 0
count_dict[d] += 1
filter_set = []
for i in count_dict:
if count_dict[i] < 1:
filter_set.append(i)
index_dict = {}
begin_index = 1
for d in train_data:
# 给出现的value赋予一个index指引
if d in filter_set:
d = '-1'
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
train_res.append(index_dict[d])
if '-1' not in index_dict:
index_dict['-1'] = begin_index
for d in test_data:
if d not in index_dict or d in filter_set:
d = '-1'
test_res.append(index_dict[d])
print(test_res)
return np.array(train_res), np.array(test_res)
def one_hot_feature_process(train_data, val_data, test2_data, begin_num, filter_num=0):
index_dict = {}
begin_index = begin_num
train_res = []
for d in train_data:
# print(d)
# 给出现的value赋予一个index指引
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
# print(index_dict[d])
train_res.append(index_dict[d])
if '-1' not in index_dict:
index_dict['-1'] = begin_index
val_res = []
for d in val_data:
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
val_res.append(index_dict[d])
test2_res = []
for d in test2_data:
if d not in index_dict:
d = '-1'
test2_res.append(index_dict[d])
# print(np.array(train_res))
return np.array(train_res), np.array(val_res), np.array(test2_res), index_dict
def vector_feature_process(train_data, val_data, test2_data, begin_num, max_len, index_dict):
train_res = []
train_res2 = []
val_res2 = []
test2_res2 = []
train_rate = []
val_rate = []
test2_rate = []
for d in train_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
train_res.append(row)
row2 = [1] * max_len
train_res2.append(row2)
train_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
train_res.append(row)
train_res2.append(row2)
train_rate.append(len(lx) / max_len)
val_res = []
for d in val_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
val_res.append(row)
row2 = [1] * max_len
val_res2.append(row2)
val_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
val_res.append(row)
val_res2.append(row2)
val_rate.append(len(lx) / max_len)
test2_res = []
for d in test2_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
test2_res.append(row)
row2 = [1] * max_len
test2_res2.append(row2)
test2_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
test2_res.append(row)
test2_res2.append(row2)
test2_rate.append(len(lx) / max_len)
return np.array(train_res), np.array(val_res), np.array(test2_res), index_dict, np.array(train_res2), np.array(
val_res2), np.array(test2_res2), np.array(train_rate), np.array(val_rate), np.array(test2_rate),
def count_one_feature_times(train, test, fea):
count_dict = {}
test_res = []
train_res = []
for val in train[fea].values:
if val not in count_dict:
count_dict[val] = 0
count_dict[val] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
for i in train[fea].values:
train_res.append(count_dict[i])
for i in test:
if i not in count_dict:
i = '-1'
test_res.append(count_dict[i])
return np.array(train_res), np.array(test_res)
def count_vector_feature_times(train, val_data, test, fea):
count_dict = {}
val_res = []
test_res = []
train_res = []
Train = pd.concat([train, val_data])
for val in Train[fea].values:
vals = val.split(',')
for i in vals:
if i not in count_dict:
count_dict[i] = 0
count_dict[i] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
for val in train[fea].values:
vals = val.split(',')
l = []
for i in vals:
l.append(count_dict[i])
# ['max', 'mean', 'min', 'median']
max_l = np.max(l)
mean_l = np.mean(l)
min_l = np.min(l)
median_l = np.median(l)
train_res.append([max_l, mean_l, min_l, median_l])
for val in val_data[fea].values:
vals = val.split(',')
l = []
for i in vals:
l.append(count_dict[i])
# ['max', 'mean', 'min', 'median']
max_l = np.max(l)
mean_l = np.mean(l)
min_l = np.min(l)
median_l = np.median(l)
val_res.append([max_l, mean_l, min_l, median_l])
for val in test:
vals = val.split(',')
l = []
for i in vals:
if i not in count_dict:
i = '-1'
l.append(count_dict[i])
# ['max', 'mean', 'min', 'median']
max_l = np.max(l)
mean_l = np.mean(l)
min_l = np.min(l)
median_l = np.median(l)
test_res.append([max_l, mean_l, min_l, median_l])
return np.array(train_res), np.array(val_res), np.array(test_res)
# 对曝光、pctr和ecpm和bid的特征
def one_feature_exposure2(Train, test, fea, date):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
test_res = []
train_res = []
id_res = []
reqday_res = []
train = Train
num1 = train[train['day'] == 20190410].shape[0]
id_res.extend(train[train['day'] == 20190410]['ad_id'].values)
reqday_res.extend(train[train['day'] == 20190410]['day'].values)
for i in range(num1):
train_res.append([0, 0, 0, 0])
for i in range(len(date) - 1):
day = int(date[i + 1])
train_compute = Train[Train['day'] == day]
train_count = Train[Train['day'] < day]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
train1 = train_count[train_count[fea] == value]['sucess_rate'].values
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0, 0, 0, 0]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_count = Train[Train['day'] > 20190414]
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['sucess_rate'].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0, 0, 0, 0]
for value in test:
if value not in exposure_dict:
value = '-1'
test_res.append(exposure_dict[value])
return np.array(train_res), np.array(test_res), np.array(id_res), np.array(reqday_res)
def one_feature_exposure4(Train, test, fea, date):
test_res = []
train_res = []
id_res = []
reqday_res = []
train = Train
train_count = train[train['day'] == 20190410]
train_compute = train[train['day'] == 20190410]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value] = []
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_count = train[train['day'] == 20190410]
train_compute = train[train['day'] == 20190411]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value] = []
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
for i in range(len(date) - 2):
day1 = int(date[i + 2])
day2 = int(date[i + 1])
day3 = int(date[i])
train1 = Train[Train['day'] == day3]
train2 = Train[Train['day'] == day2]
train_compute = Train[Train['day'] == day1]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train1 = train[train['day'] == 20190421]
train2 = train[train['day'] == 20190422]
train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
# print(train_count[train_count[fea] == value].shape[0])
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value] = []
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
num_dis = 0
for value in test:
# print(value)
if value not in exposure_dict:
num_dis += 1
value = '-1'
test_res.append(exposure_dict[value])
print(num_dis)
return np.array(train_res), np.array(test_res), \
np.array(id_res), np.array(reqday_res)
def one_feature_exposure3(Train, test, fea, date):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
test_res = []
train_res = []
id_res = []
reqday_res = []
train = Train
# train_count = train[train['day'] == 20190410]
# train_compute = train[train['day'] == 20190410]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value] = []
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
#
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
#
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
#
# train_count = train[train['day'] == 20190410]
# train_compute = train[train['day'] == 20190411]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value] = []
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
#
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
#
# for i in range(len(date) - 2):
# day1 = int(date[i + 2])
# day2 = int(date[i + 1])
# day3 = int(date[i])
#
# train1 = Train[Train['day'] == day3]
# train2 = Train[Train['day'] == day2]
# train_compute = Train[Train['day'] == day1]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# train_count = pd.concat([train1, train2])
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# exposure_dict[value] = []
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
#
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
# train1 = train[train['day'] == 20190421]
train_count = train[train['day'] == 20190422]
# train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['sucess_rate'].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0, 0, 0, 0]
num_dis = 0
for value in test:
# print(value)
if value not in exposure_dict:
num_dis += 1
value = '-1'
test_res.append(exposure_dict[value])
print(num_dis)
return np.array(train_res), np.array(test_res), \
np.array(id_res), np.array(reqday_res)
def one_feature_exposure(train, val, test, fea, date):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
val_res = []
test_res = []
train_res = []
val_res2 = []
test_res2 = []
train_res2 = []
train_res3 = []
id_res = []
reqday_res = []
Train = pd.concat([train, val])
num1 = train[train['Reqday'] == '02_16'].shape[0]
id_res.extend(train[train['Reqday'] == '02_16']['ad_id'].values)
reqday_res.extend(train[train['Reqday'] == '02_16']['Reqday'].values)
for i in range(num1):
train_res.append([8, 8, 8, 8])
train_res2.append([8, 8, 8, 8])
train_count = train[train['Reqday'] == '02_16']
train_compute = train[train['Reqday'] == '02_17']
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['Reqday'].values)
exposure_dict = {}
bid_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['exposure'].values
exposure_dict[value] = []
bid_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
train2 = train_count[train_count[fea] == value]['adBid'].values
bid_dict[value].append(np.max(train2))
bid_dict[value].append(np.min(train2))
bid_dict[value].append(np.mean(train2))
bid_dict[value].append(np.median(train2))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
bid_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_res2.append(bid_dict[value])
for i in range(len(date) - 2):
day1 = date[i + 2]
day2 = date[i + 1]
day3 = date[i]
train1 = Train[Train['Reqday'] == day3]
train2 = Train[Train['Reqday'] == day2]
train_compute = Train[Train['Reqday'] == day1]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['Reqday'].values)
train_count = pd.concat([train1, train2])
exposure_dict = {}
bid_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
bid_dict[value] = []
train1 = train_count[train_count[fea] == value]['exposure'].values
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
train2 = train_count[train_count[fea] == value]['adBid'].values
bid_dict[value].append(np.max(train2))
bid_dict[value].append(np.min(train2))
bid_dict[value].append(np.mean(train2))
bid_dict[value].append(np.median(train2))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
bid_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_res2.append(bid_dict[value])
train_res1 = train_res[:(Train.shape[0] - val.shape[0])]
val_res = train_res[-val.shape[0]:]
train_res3 = train_res2[:(Train.shape[0] - val.shape[0])]
val_res2 = train_res2[-val.shape[0]:]
train1 = train[train['Reqday'] == '03_19']
train2 = train[train['Reqday'] == '03_18']
train_count = pd.concat([train1, train2])
exposure_dict = {}
bid_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['exposure'].values
exposure_dict[value] = []
bid_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
train2 = train_count[train_count[fea] == value]['adBid'].values
bid_dict[value].append(np.max(train2))
bid_dict[value].append(np.min(train2))
bid_dict[value].append(np.mean(train2))
bid_dict[value].append(np.median(train2))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
bid_dict['-1'] = [8, 8, 8, 8]
for value in test:
if value not in exposure_dict:
value = '-1'
test_res.append(exposure_dict[value])
test_res2.append(bid_dict[value])
return np.array(train_res1), np.array(val_res), np.array(test_res), np.array(train_res3), np.array(val_res2), \
np.array(test_res2), np.array(id_res), np.array(reqday_res)
def one_feature_pctr2(train, val, test, fea, date, count_fea):
# 返回pctr的最大值,最小值,均值,中位数四个值
val_res = []
test_res = []
train_res = []
train_res2 = []
Train = pd.concat([train, val])
num1 = train[train['Reqday'] == '02_16'].shape[0]
for i in range(num1):
train_res.append([8, 8, 8, 8])
train_res2.append([8, 8, 8, 8])
for i in range(len(date) - 1):
day = date[i + 1]
train_compute = Train[Train['Reqday'] == day]
train_count = Train[Train['Reqday'] < day]
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
train1 = train_count[train_count[fea] == value][count_fea].values
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_res1 = train_res[:(Train.shape[0] - val.shape[0])]
val_res = train_res[-val.shape[0]:]
# train1 = train[train['Reqday'] == '03_19']
# train2 = train[train['Reqday'] == '03_18']
train_count = Train
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value][count_fea].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
for value in test:
if value not in exposure_dict:
value = '-1'
test_res.append(exposure_dict[value])
return np.array(train_res1), np.array(val_res), np.array(test_res)
def one_feature_pctr(train, val, test, fea, date, count_fea):
# 返回pctr的最大值,最小值,均值,中位数四个值
val_res = []
test_res = []
train_res = []
train_res2 = []
Train = pd.concat([train, val])
num1 = train[train['Reqday'] == '02_16'].shape[0]
for i in range(num1):
train_res.append([8, 8, 8, 8])
train_res2.append([8, 8, 8, 8])
train_count = train[train['Reqday'] == '02_16']
train_compute = train[train['Reqday'] == '02_17']
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value][count_fea].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
for i in range(len(date) - 2):
day1 = date[i + 2]
day2 = date[i + 1]
day3 = date[i]
train1 = Train[Train['Reqday'] == day3]
train2 = Train[Train['Reqday'] == day2]
train_compute = Train[Train['Reqday'] == day1]
train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
train1 = train_count[train_count[fea] == value][count_fea].values
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_res1 = train_res[:(Train.shape[0] - val.shape[0])]
val_res = train_res[-val.shape[0]:]
train1 = train[train['Reqday'] == '03_19']
train2 = train[train['Reqday'] == '03_18']
train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value][count_fea].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
for value in test:
if value not in exposure_dict:
value = '-1'
test_res.append(exposure_dict[value])
return np.array(train_res1), np.array(val_res), np.array(test_res)
def create_mask(value, train):
mask = []
# 这里的train是某个多值特征的值数组
for i in train:
vals = i.split(',')
flag = 0
for j in vals:
if j == value:
flag = 1
mask.append(True)
break
if flag == 0:
mask.append(False)
return mask
def vector_feature_exposure(train, val, test, fea, date):
# 返回曝光均值,最大值,最小值,中位数四个值,
# 返回bid的均值,最大值,最小值,中位数四个值,
val_res = []
test_res = []
train_res = []
val_res2 = []
test_res2 = []
train_res2 = []
train_res3 = []
Train = pd.concat([train, val])
num1 = train[train['Reqday'] == '02_16'].shape[0]
for i in range(num1):
train_res.append([8, 8, 8, 8])
train_res2.append([8, 8, 8, 8])
for i in range(len(date) - 1):
day = date[i + 1]
train_compute = Train[Train['Reqday'] == day]
train_count = Train[Train['Reqday'] < day]
exposure_max = {}
exposure_min = {}
exposure_mean = {}
exposure_median = {}
bid_max = {}
bid_min = {}
bid_mean = {}
bid_median = {}
mask_dict = {}
for vals in train_count[fea].values:
valss = vals.split(',')
for value in valss:
if value not in exposure_max:
if value not in mask_dict:
mask_dict[value] = create_mask(value, train_count[fea].values)
train1 = train_count[mask_dict[value]]['exposure'].values
exposure_max[value] = np.max(train1)
exposure_min[value] = np.min(train1)
exposure_mean[value] = np.mean(train1)
exposure_median[value] = np.median(train1)
train2 = train_count[mask_dict[value]]['adBid'].values
bid_max[value] = np.max(train2)
bid_min[value] = np.min(train2)
bid_mean[value] = np.mean(train2)
bid_median[value] = np.median(train2)
if '-1' not in exposure_max:
exposure_max['-1'] = 8
bid_max['-1'] = 8
if '-1' not in exposure_min:
exposure_min['-1'] = 8
bid_min['-1'] = 8
if '-1' not in exposure_mean:
exposure_mean['-1'] = 8
bid_mean['-1'] = 8
if '-1' not in exposure_median:
exposure_median['-1'] = 8
bid_median['-1'] = 8
for vals in train_compute[fea].values:
max_list = []
min_list = []
mean_list = []
median_list = []
max_list2 = []
min_list2 = []
mean_list2 = []
median_list2 = []
valuess = vals.split(',')
for value in valuess:
if value not in exposure_max:
value = '-1'
max_list.append(exposure_max[value])
min_list.append(exposure_min[value])
mean_list.append(exposure_mean[value])
median_list.append(exposure_median[value])
max_list2.append(bid_max[value])
min_list2.append(bid_min[value])
mean_list2.append(bid_mean[value])
median_list2.append(bid_median[value])
max1 = np.max(max_list)
min1 = np.min(min_list)
mean1 = np.mean(mean_list)
median1 = np.median(median_list)
train_res.append([max1, min1, mean1, median1])
max1 = np.max(max_list2)
min1 = np.min(min_list2)
mean1 = np.mean(mean_list2)
median1 = np.median(median_list2)
train_res2.append([max1, min1, mean1, median1])
train_res1 = train_res[:(Train.shape[0] - val.shape[0])]
val_res = train_res[-val.shape[0]:]
train_res3 = train_res2[:(Train.shape[0] - val.shape[0])]
val_res2 = train_res2[-val.shape[0]:]
train_count = Train
exposure_max = {}
exposure_min = {}
exposure_mean = {}
exposure_median = {}
bid_max = {}
bid_min = {}
bid_mean = {}
bid_median = {}
mask_dict = {}
for vals in train_count[fea].values:
valss = vals.split(',')
for value in valss:
if value not in exposure_max:
if value not in mask_dict:
mask_dict[value] = create_mask(value, train_count[fea].values)
train1 = train_count[mask_dict[value]]['exposure'].values
exposure_max[value] = np.max(train1)
exposure_min[value] = np.min(train1)
exposure_mean[value] = np.mean(train1)
exposure_median[value] = np.median(train1)
train2 = train_count[mask_dict[value]]['adBid'].values
bid_max[value] = np.max(train2)
bid_min[value] = np.min(train2)
bid_mean[value] = np.mean(train2)
bid_median[value] = np.median(train2)
if '-1' not in exposure_max:
exposure_max['-1'] = 8
bid_max['-1'] = 8
if '-1' not in exposure_min:
exposure_min['-1'] = 8
bid_min['-1'] = 8
if '-1' not in exposure_mean:
exposure_mean['-1'] = 8
bid_mean['-1'] = 8
if '-1' not in exposure_median:
exposure_median['-1'] = 8
bid_median['-1'] = 8
for vals in test:
max_list = []
min_list = []
mean_list = []
median_list = []
max_list2 = []
min_list2 = []
mean_list2 = []
median_list2 = []
valuess = vals.split(',')
for value in valuess:
if value not in exposure_max:
value = '-1'
max_list.append(exposure_max[value])
min_list.append(exposure_min[value])
mean_list.append(exposure_mean[value])
median_list.append(exposure_median[value])
max_list2.append(bid_max[value])
min_list2.append(bid_min[value])
mean_list2.append(bid_mean[value])
median_list2.append(bid_median[value])
max1 = np.max(max_list)
min1 = np.min(min_list)
mean1 = np.mean(mean_list)
median1 = np.median(median_list)
test_res.append([max1, min1, mean1, median1])
max1 = np.max(max_list2)
min1 = np.min(min_list2)
mean1 = np.mean(mean_list2)
median1 = np.median(median_list2)
test_res2.append([max1, min1, mean1, median1])
return np.array(train_res1), np.array(val_res), np.array(test_res), np.array(train_res3), \
np.array(val_res2), np.array(test_res2)
def vector_feature_pctr(train, val, test, fea, date, count_fea):
# 返回曝光均值,最大值,最小值,中位数四个值,
val_res = []
test_res = []
train_res = []
train_res2 = []
Train = pd.concat([train, val])
num1 = train[train['Reqday'] == '02_16'].shape[0]
for i in range(num1):
train_res.append([8, 8, 8, 8])
train_res2.append([8, 8, 8, 8])
for i in range(len(date) - 1):
day = date[i + 1]
train_compute = Train[Train['Reqday'] == day]
train_count = Train[Train['Reqday'] < day]
exposure_max = {}
exposure_min = {}
exposure_mean = {}
exposure_median = {}
bid_max = {}
bid_min = {}
bid_mean = {}
bid_median = {}
mask_dict = {}
for vals in train_count[fea].values:
valss = vals.split(',')
for value in valss:
if value not in exposure_max:
if value not in mask_dict:
mask_dict[value] = create_mask(value, train_count[fea].values)
train1 = train_count[mask_dict[value]][count_fea].values
exposure_max[value] = np.max(train1)
exposure_min[value] = np.min(train1)
exposure_mean[value] = np.mean(train1)
exposure_median[value] = np.median(train1)
if '-1' not in exposure_max:
exposure_max['-1'] = 8
bid_max['-1'] = 8
if '-1' not in exposure_min:
exposure_min['-1'] = 8
bid_min['-1'] = 8
if '-1' not in exposure_mean:
exposure_mean['-1'] = 8
bid_mean['-1'] = 8
if '-1' not in exposure_median:
exposure_median['-1'] = 8
bid_median['-1'] = 8
for vals in train_compute[fea].values:
max_list = []
min_list = []
mean_list = []
median_list = []
valuess = vals.split(',')
for value in valuess:
if value not in exposure_max:
value = '-1'
max_list.append(exposure_max[value])
min_list.append(exposure_min[value])
mean_list.append(exposure_mean[value])
median_list.append(exposure_median[value])
max1 = np.max(max_list)
min1 = np.min(min_list)
mean1 = np.mean(mean_list)
median1 = np.median(median_list)
train_res.append([max1, min1, mean1, median1])
train_res1 = train_res[:(Train.shape[0] - val.shape[0])]
val_res = train_res[-val.shape[0]:]
train_count = Train
exposure_max = {}
exposure_min = {}
exposure_mean = {}
exposure_median = {}
mask_dict = {}
for vals in train_count[fea].values:
valss = vals.split(',')
for value in valss:
if value not in exposure_max:
if value not in mask_dict:
mask_dict[value] = create_mask(value, train_count[fea].values)
train1 = train_count[mask_dict[value]][count_fea].values
exposure_max[value] = np.max(train1)
exposure_min[value] = np.min(train1)
exposure_mean[value] = np.mean(train1)
exposure_median[value] = np.median(train1)
if '-1' not in exposure_max:
exposure_max['-1'] = 8
bid_max['-1'] = 8
if '-1' not in exposure_min:
exposure_min['-1'] = 8
bid_min['-1'] = 8
if '-1' not in exposure_mean:
exposure_mean['-1'] = 8
bid_mean['-1'] = 8
if '-1' not in exposure_median:
exposure_median['-1'] = 8
bid_median['-1'] = 8
for vals in test:
max_list = []
min_list = []
mean_list = []
median_list = []
valuess = vals.split(',')
for value in valuess:
if value not in exposure_max:
value = '-1'
max_list.append(exposure_max[value])
min_list.append(exposure_min[value])
mean_list.append(exposure_mean[value])
median_list.append(exposure_median[value])
max1 = np.max(max_list)
min1 = np.min(min_list)
mean1 = np.mean(mean_list)
median1 = np.median(median_list)
test_res.append([max1, min1, mean1, median1])
return np.array(train_res1), np.array(val_res), np.array(test_res)
def one_feature_exposure5(Train, test, fea, date, new_day):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
test_res = []
train_res = []
id_res = []
reqday_res = []
train = Train
# train_count = train[train['day'] == 20190410]
# train_compute = train[train['day'] == 20190410]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value] = []
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
#
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
#
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
#
# train_count = train[train['day'] == 20190410]
# train_compute = train[train['day'] == 20190411]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value] = []
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
#
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
#
# for i in range(len(date) - 2):
# day1 = int(date[i + 2])
# day2 = int(date[i + 1])
# day3 = int(date[i])
#
# train1 = Train[Train['day'] == day3]
# train2 = Train[Train['day'] == day2]
# train_compute = Train[Train['day'] == day1]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# train_count = pd.concat([train1, train2])
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# exposure_dict[value] = []
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
#
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
# train_count = pd.concat([train1, train2])
train_count = train[train['day'] >= new_day]
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['sucess_rate'].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0, 0, 0, 0]
num_dis = 0
for value in test:
if value not in exposure_dict:
num_dis += 1
value = '-1'
test_res.append(exposure_dict[value])
print(num_dis)
return np.array(train_res), np.array(test_res), \
np.array(id_res), np.array(reqday_res)
| 36.5515
| 115
| 0.558844
|
76b4ff3525a608bb25570d13663084bd0939dc93
| 3,165
|
py
|
Python
|
data_preprocess.py
|
William-Zhanng/Protein_affinity
|
8abd12073b182274bf464ff23fd3be406c4e39ac
|
[
"MIT"
] | 1
|
2022-01-09T12:23:16.000Z
|
2022-01-09T12:23:16.000Z
|
data_preprocess.py
|
William-Zhanng/Protein_affinity
|
8abd12073b182274bf464ff23fd3be406c4e39ac
|
[
"MIT"
] | null | null | null |
data_preprocess.py
|
William-Zhanng/Protein_affinity
|
8abd12073b182274bf464ff23fd3be406c4e39ac
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import tqdm
import argparse
import random
import os
def create_data(protein_idx,all_data,pseq_dict,out_path,sample_ratio=100):
# Postive sample
items = all_data[all_data['item_id_a'] == protein_idx]
df_out = pd.DataFrame(columns = ['item_id_a','item_id_b','sequence_a','sequence_b','label'])
target_proteins = pd.Series(items['item_id_b'].values)
df_out['item_id_b'] = target_proteins
df_out['item_id_a'] = protein_idx
df_out['label'] = 1
df_out['sequence_a'] = pseq_dict[protein_idx]
seq_list = []
for i in range(df_out.shape[0]):
target = target_proteins[i]
target_seq = pseq_dict[target]
seq_list.append(target_seq)
seq_list = pd.Series(seq_list)
df_out['sequence_b'] = seq_list
# Negative sample
# Neg sample compute
all_idx = set(all_data['item_id_a'])
target_idx = set(df_out['item_id_b'])
neg_idx = all_idx - target_idx
sample_ratio = 100
sample_num = sample_ratio * len(target_idx)
neg_prot = random.sample(neg_idx,min(len(neg_idx),sample_num))
# Create neg sample dataframe
df_neg = pd.DataFrame(columns = ['item_id_a','item_id_b','sequence_a','sequence_b','label'])
df_neg['item_id_b'] = pd.Series(neg_prot)
df_neg['item_id_a'] = protein_idx
df_neg['label'] = 0
df_neg['sequence_a'] = pseq_dict[protein_idx]
seq_list = []
for i in range(df_neg.shape[0]):
target = neg_prot[i]
target_seq = pseq_dict[target]
seq_list.append(target_seq)
seq_list = pd.Series(seq_list)
df_neg['sequence_b'] = seq_list
df_out = pd.concat([df_out,df_neg],axis=0)
df_out.to_csv(out_path)
return df_out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--actions_data_path', type=str, default='./data/9606.protein.actions.all_connected.txt')
parser.add_argument('--pseq_path', type=str, default='./data/protein.STRING_all_connected.sequences.dictionary.tsv',
help='The protein sequence data file path')
parser.add_argument('--protein_idx', type=str, default='9606.ENSP00000217109',
help='The name of protein in STRING dataset')
parser.add_argument('--out_dir', type=str, default='./data',
help='The protein sequence data file path')
args = parser.parse_args()
os.makedirs(args.out_dir,exist_ok=True)
all_data = pd.read_csv(args.actions_data_path,sep='\t')
# Create protein sequence dict
pseq_dict = {}
for line in open(args.pseq_path):
line = line.strip().split('\t')
pseq_dict[line[0]] = line[1]
all_data.drop(columns=['mode','action','is_directional','a_is_acting','score'],axis=1,inplace=True)
all_data.drop_duplicates(inplace = True)
df_each_cnt = all_data.groupby('item_id_a').count()['item_id_b'].sort_values()
protein_idxs = list(df_each_cnt[(df_each_cnt >= 150) & (df_each_cnt < 300)].index)
df_out = create_data(args.protein_idx,all_data,pseq_dict,os.path.join(args.out_dir,'{}_data.csv'.format(args.protein_idx)))
| 40.063291
| 127
| 0.671722
|
0aabc1e47715692927742952898209806e93ecb1
| 115,487
|
py
|
Python
|
snmp/tests/test_profiles.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
snmp/tests/test_profiles.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
snmp/tests/test_profiles.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import logging
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.snmp import SnmpCheck
from datadog_checks.snmp.utils import (
_get_profile_name,
_is_abstract_profile,
_iter_default_profile_file_paths,
get_profile_definition,
recursively_expand_base_profiles,
)
from . import common, metrics
from .metrics import (
ADAPTER_IF_COUNTS,
CCCA_ROUTER_GAUGES,
CIE_METRICS,
COS_COUNTS,
COS_RATES,
CPU_METRICS,
DCU_COUNTS,
DISK_GAUGES,
DRS_GAUGES,
FIREWALL_COUNTS,
FRU_METRICS,
IDRAC_SYSTEM_STATUS_GAUGES,
IF_BANDWIDTH_USAGE,
IF_COUNTS,
IF_GAUGES,
IF_RATES,
IF_SCALAR_GAUGE,
IP_COUNTS,
IP_IF_COUNTS,
IPX_COUNTS,
LTM_GAUGES,
LTM_NODES_COUNTS,
LTM_NODES_GAUGES,
LTM_NODES_RATES,
LTM_POOL_COUNTS,
LTM_POOL_GAUGES,
LTM_POOL_MEMBER_COUNTS,
LTM_POOL_MEMBER_GAUGES,
LTM_POOL_MEMBER_RATES,
LTM_POOL_RATES,
LTM_VIRTUAL_SERVER_COUNTS,
LTM_VIRTUAL_SERVER_GAUGES,
LTM_VIRTUAL_SERVER_RATES,
MEMORY_METRICS,
PEER_GAUGES,
PEER_RATES,
POWEREDGE_SYSTEM_STATUS_GAUGES,
PROBE_GAUGES,
SCU_COUNTS,
TCP_COUNTS,
TCP_GAUGES,
UDP_COUNTS,
USER_FIREWALL,
VIRTUAL_CHASSIS_COUNTS,
VIRTUAL_CHASSIS_RATES,
VOLTAGE_GAUGES,
)
pytestmark = common.snmp_integration_only
def test_load_profiles(caplog):
instance = common.generate_instance_config([])
check = SnmpCheck('snmp', {}, [instance])
caplog.at_level(logging.WARNING)
for name, profile in check.profiles.items():
try:
check._config.refresh_with_profile(profile)
except ConfigurationError as e:
pytest.fail("Profile `{}` is not configured correctly: {}".format(name, e))
assert "table doesn't have a 'metric_tags' section" not in caplog.text
caplog.clear()
def test_profile_hierarchy():
"""
* Only concrete profiles MUST inherit from '_base.yaml'.
* Only concrete profiles MUST define a `sysobjectid` field.
"""
errors = []
compat_base_profiles = ['_base_cisco', '_base_cisco_voice']
for path in _iter_default_profile_file_paths():
name = _get_profile_name(path)
definition = get_profile_definition({'definition_file': path})
extends = definition.get('extends', [])
sysobjectid = definition.get('sysobjectid')
if _is_abstract_profile(name):
if '_base.yaml' in extends and name not in compat_base_profiles:
errors.append("'{}': mixin wrongly extends '_base.yaml'".format(name))
if sysobjectid is not None:
errors.append("'{}': mixin wrongly defines a `sysobjectid`".format(name))
else:
if '_base.yaml' not in extends:
errors.append("'{}': concrete profile must directly extend '_base.yaml'".format(name))
if sysobjectid is None:
errors.append("'{}': concrete profile must define a `sysobjectid`".format(name))
if errors:
pytest.fail('\n'.join(sorted(errors)))
def run_profile_check(recording_name, profile_name=None):
"""
Run a single check with the provided `recording_name` used as
`community_string` by the docker SNMP endpoint.
"""
instance = common.generate_instance_config([])
instance['community_string'] = recording_name
instance['enforce_mib_constraints'] = False
check = SnmpCheck('snmp', {}, [instance])
# First, see if recording name is a profile, then use profile as definition.
if profile_name is not None:
profile = check.profiles.get(profile_name)
else:
profile = check.profiles.get(recording_name)
if profile:
try:
test_check = SnmpCheck('snmp', {}, [common.generate_instance_config([])])
test_check._config.refresh_with_profile(profile)
except ConfigurationError as e:
pytest.fail("Profile `{}` is not configured correctly: {}".format(recording_name, e))
check.check(instance)
@pytest.mark.unit
@pytest.mark.parametrize(
'definition_file, equivalent_definition',
[
pytest.param('_base_cisco.yaml', {'extends': ['_base.yaml', '_cisco-generic.yaml']}, id='generic'),
pytest.param(
'_base_cisco_voice.yaml',
{'extends': ['_base.yaml', '_cisco-generic.yaml', '_cisco-voice.yaml']},
id='voice',
),
],
)
def test_compat_cisco_base_profiles(definition_file, equivalent_definition):
# type: (str, dict) -> None
"""
Cisco and Cisco Voice base profiles were replaced by mixins (see Pull #6792).
But their definition files should still be present and contain equivalent metrics to ensure backward compatibility.
"""
definition = get_profile_definition({'definition_file': definition_file})
recursively_expand_base_profiles(definition)
recursively_expand_base_profiles(equivalent_definition)
assert definition == equivalent_definition
@pytest.mark.usefixtures("dd_environment")
def test_cisco_voice(aggregator):
run_profile_check('cisco_icm')
tags = [
'snmp_profile:cisco_icm',
'snmp_host:test',
'device_vendor:cisco',
] + common.CHECK_TAGS
resources = ["hrSWRunPerfMem", "hrSWRunPerfCPU"]
common.assert_common_metrics(aggregator, tags)
for resource in resources:
aggregator.assert_metric('snmp.{}'.format(resource), metric_type=aggregator.GAUGE, tags=tags)
run_indices = [4, 7, 8, 9, 10, 18, 24, 29, 30]
for index in run_indices:
status_tags = tags + ['run_index:{}'.format(index)]
aggregator.assert_metric('snmp.hrSWRunStatus', metric_type=aggregator.GAUGE, tags=status_tags)
cvp_gauges = [
"ccvpSipIntAvgLatency1",
"ccvpSipIntAvgLatency2",
"ccvpSipIntConnectsRcv",
"ccvpSipIntNewCalls",
"ccvpSipRtActiveCalls",
"ccvpSipRtTotalCallLegs",
"ccvpLicRtPortsInUse",
"ccvpLicAggMaxPortsInUse",
]
for cvp in cvp_gauges:
aggregator.assert_metric('snmp.{}'.format(cvp), metric_type=aggregator.GAUGE, tags=tags)
ccms_counts = ["ccmRejectedPhones", "ccmUnregisteredPhones"]
ccms_gauges = ["ccmRegisteredGateways", "ccmRegisteredPhones"]
for ccm in ccms_counts:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.RATE, tags=tags)
for ccm in ccms_gauges:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.GAUGE, tags=tags)
calls = [
"cvCallVolPeerIncomingCalls",
"cvCallVolPeerOutgoingCalls",
]
peers = [4, 13, 14, 17, 18, 22, 25, 30, 31]
for call in calls:
for peer in peers:
peer_tags = tags + ["peer_index:{}".format(peer)]
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=peer_tags)
calls = [
"cvCallVolMediaIncomingCalls",
"cvCallVolMediaOutgoingCalls",
]
for call in calls:
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=tags)
dial_controls = [
"dialCtlPeerStatsAcceptCalls",
"dialCtlPeerStatsFailCalls",
"dialCtlPeerStatsRefuseCalls",
"dialCtlPeerStatsSuccessCalls",
]
for ctl in dial_controls:
aggregator.assert_metric(
'snmp.{}'.format(ctl), metric_type=aggregator.MONOTONIC_COUNT, tags=["peer_index:7"] + tags
)
pim_tags = tags + ['pim_host:test', 'pim_name:name', 'pim_num:2']
aggregator.assert_metric('snmp.{}'.format("cccaPimStatus"), metric_type=aggregator.GAUGE, tags=pim_tags)
aggregator.assert_metric('snmp.{}'.format("sysUpTimeInstance"), metric_type=aggregator.GAUGE, tags=tags, count=1)
instance_numbers = ['4446', '5179', '12093', '19363', '25033', '37738', '42562', '51845', '62906', '63361']
for metric in CCCA_ROUTER_GAUGES:
for instance_number in instance_numbers:
instance_tags = tags + ['instance_number:{}'.format(instance_number)]
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=instance_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5(aggregator):
profile = 'f5-big-ip'
run_profile_check(profile)
gauges = [
'sysStatMemoryTotal',
'sysStatMemoryUsed',
'sysGlobalTmmStatMemoryTotal',
'sysGlobalTmmStatMemoryUsed',
'sysGlobalHostOtherMemoryTotal',
'sysGlobalHostOtherMemoryUsed',
'sysGlobalHostSwapTotal',
'sysGlobalHostSwapUsed',
'sysTcpStatOpen',
'sysTcpStatCloseWait',
'sysTcpStatFinWait',
'sysTcpStatTimeWait',
'sysUdpStatOpen',
'sysClientsslStatCurConns',
]
counts = [
'sysTcpStatAccepts',
'sysTcpStatAcceptfails',
'sysTcpStatConnects',
'sysTcpStatConnfails',
'sysUdpStatAccepts',
'sysUdpStatAcceptfails',
'sysUdpStatConnects',
'sysUdpStatConnfails',
'sysClientsslStatEncryptedBytesIn',
'sysClientsslStatEncryptedBytesOut',
'sysClientsslStatDecryptedBytesIn',
'sysClientsslStatDecryptedBytesOut',
'sysClientsslStatHandshakeFailures',
]
cpu_rates = [
'sysMultiHostCpuUser',
'sysMultiHostCpuNice',
'sysMultiHostCpuSystem',
'sysMultiHostCpuIdle',
'sysMultiHostCpuIrq',
'sysMultiHostCpuSoftirq',
'sysMultiHostCpuIowait',
]
cpu_gauges = [
'sysMultiHostCpuUsageRatio',
]
interfaces = [
('1.0', 'desc2'),
('mgmt', 'desc1'),
('/Common/internal', 'desc5'),
('/Common/http-tunnel', 'desc3'),
('/Common/socks-tunnel', 'desc4'),
]
interfaces_with_bandwidth_usage = {
'1.0',
'mgmt',
'/Common/internal',
}
tags = [
'snmp_profile:' + profile,
'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal',
'device_vendor:f5',
]
tags += common.CHECK_TAGS
common.assert_common_metrics(aggregator, tags)
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in counts:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in cpu_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:0'] + tags, count=1)
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:1'] + tags, count=1)
for metric in cpu_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=['cpu:0'] + tags, count=1)
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=['cpu:1'] + tags, count=1)
for metric in IF_SCALAR_GAUGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for interface, desc in interfaces:
interface_tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=interface_tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=interface_tags, count=1
)
if interface in interfaces_with_bandwidth_usage:
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=interface_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric),
metric_type=aggregator.GAUGE,
tags=interface_tags,
count=1,
)
for version in ['ipv4', 'ipv6']:
ip_tags = ['ipversion:{}'.format(version)] + tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=ip_tags, count=1
)
for metric in LTM_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
servers = ['server1', 'server2', 'server3']
for server in servers:
server_tags = tags + ['server:{}'.format(server)]
for metric in LTM_VIRTUAL_SERVER_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=server_tags, count=1)
for metric in LTM_VIRTUAL_SERVER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=server_tags, count=1
)
for metric in LTM_VIRTUAL_SERVER_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=server_tags, count=1)
nodes = ['node1', 'node2', 'node3']
for node in nodes:
node_tags = tags + ['node:{}'.format(node)]
for metric in LTM_NODES_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=node_tags, count=1)
for metric in LTM_NODES_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=node_tags, count=1
)
for metric in LTM_NODES_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=node_tags, count=1)
pools = ['pool1', 'pool2']
for pool in pools:
pool_tags = tags + ['pool:{}'.format(pool)]
for metric in LTM_POOL_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_tags, count=1)
for metric in LTM_POOL_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_tags, count=1
)
for metric in LTM_POOL_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_tags, count=1)
pool_members = [('pool1', 'node1'), ('pool1', 'node2'), ('pool2', 'node3')]
for pool, node in pool_members:
pool_member_tags = tags + ['pool:{}'.format(pool), 'node:{}'.format(node)]
for metric in LTM_POOL_MEMBER_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_member_tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_router(aggregator):
profile = "generic-router"
run_profile_check(profile)
common_tags = common.CHECK_TAGS + ['snmp_profile:' + profile]
common.assert_common_metrics(aggregator, common_tags)
for metric in IF_SCALAR_GAUGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
interfaces = [
('eth0', 'kept'),
('eth1', 'their forward oxen'),
]
for interface, if_desc in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(if_desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS + IPX_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IP_IF_COUNTS:
for interface in ['17', '21']:
tags = ['ipversion:{}'.format(version), 'interface:{}'.format(interface)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5_router(aggregator):
# Use the generic profile against the f5 device
instance = common.generate_instance_config([])
instance['community_string'] = 'f5-big-ip'
instance['enforce_mib_constraints'] = False
init_config = {'profiles': {'router': {'definition_file': 'generic-router.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
interfaces = [
('1.0', 'desc2'),
('mgmt', 'desc1'),
('/Common/internal', 'desc5'),
('/Common/http-tunnel', 'desc3'),
('/Common/socks-tunnel', 'desc4'),
]
interfaces_with_bandwidth_usage = {
'1.0',
'mgmt',
'/Common/internal',
}
common_tags = [
'snmp_profile:router',
'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal',
]
common_tags.extend(common.CHECK_TAGS)
common.assert_common_metrics(aggregator, common_tags)
for metric in IF_SCALAR_GAUGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for interface, desc in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
if interface in interfaces_with_bandwidth_usage:
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_3850(aggregator):
profile = "cisco-3850"
run_profile_check(profile)
# We're not covering all interfaces
interfaces = ["Gi1/0/{}".format(i) for i in range(1, 48)]
common_tags = common.CHECK_TAGS + [
'snmp_host:Cat-3850-4th-Floor.companyname.local',
'snmp_profile:' + profile,
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
aliases = {
'Gi1/0/24': 'LWAP-example',
'Gi1/0/33': 'switchboard console',
'Gi1/0/38': 'Mitel Console',
'Gi1/1/3': 'Link to Switch',
'Gi2/0/13': 'AP01',
'Gi2/0/14': 'AP02',
'Gi2/0/15': 'AP03',
'Gi2/0/16': 'AP04',
'Gi2/0/17': 'AP05',
'Gi2/0/18': 'AP06',
'Gi2/1/4': 'Link to Switch',
}
for metric in IF_SCALAR_GAUGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for interface in interfaces:
alias = aliases.get(interface, '')
tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(alias)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IP_COUNTS + IPX_COUNTS:
tags = common_tags + ['ipversion:ipv6']
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1006, 1007, 1008, 2006, 2007, 2008]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [1000, 2000]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
power_supplies = [
(1, 'Switch 1 - Power Supply B, NotExist'),
(1, 'Switch 2 - Power Supply B, NotExist'),
(2, 'Switch 1 - Power Supply A, Normal'),
(2, 'Switch 2 - Power Supply A, Normal'),
]
for source, descr in power_supplies:
env_tags = ['power_source:{}'.format(source), 'power_status_descr:{}'.format(descr)]
aggregator.assert_metric(
'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=env_tags + common_tags
)
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
for switch, mac_addr in [(1, '0x046c9d42b080'), (2, '0xdccec1430680')]:
tags = ['entity_name:Switch {}'.format(switch), 'mac_addr:{}'.format(mac_addr)] + common_tags
aggregator.assert_metric('snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=tags)
frus = [1011, 1012, 1013, 2011, 2012, 2013]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for mem_metrics in MEMORY_METRICS:
for pool in ['Processor', 'IOS Process stack']:
tags = ['mem_pool_name:{}'.format(pool)] + common_tags
aggregator.assert_metric('snmp.{}'.format(mem_metrics), metric_type=aggregator.GAUGE, tags=tags)
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState']
for metric in lls_metrics:
tags = ['ospf_ip_addr:192.29.116.25'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for temp_index in [1006, 1007, 1008, 2006, 2007, 2008]:
env_tag = ['temp_index:{}'.format(temp_index), 'temp_state:1']
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue', metric_type=aggregator.GAUGE, tags=env_tag + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_meraki_cloud_controller(aggregator):
run_profile_check('meraki-cloud-controller')
common_tags = common.CHECK_TAGS + [
'snmp_profile:meraki-cloud-controller',
'snmp_host:dashboard.meraki.com',
'device_vendor:meraki',
]
common.assert_common_metrics(aggregator, common_tags)
dev_metrics = ['devStatus', 'devClientCount']
dev_tags = ['device:Gymnasium', 'product:MR16-HW', 'network:L_NETWORK', 'mac_address:0x02020066f57f'] + common_tags
for metric in dev_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=dev_tags, count=1)
if_tags = ['interface:wifi0', 'index:4', 'mac_address:0x02020066f500'] + common_tags
if_metrics = ['devInterfaceSentPkts', 'devInterfaceRecvPkts', 'devInterfaceSentBytes', 'devInterfaceRecvBytes']
for metric in if_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
# IF-MIB
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_idrac(aggregator):
run_profile_check('idrac')
interfaces = ['eth0', 'en1']
common_tags = common.CHECK_TAGS + ['snmp_profile:idrac', 'device_vendor:dell']
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in IDRAC_SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('supply1', '13', 'forward their oxen acted acted'),
('supply2', '16', 'quaintly but acted'),
]
for name, number, fqdd in tag_mappings:
tags = [
'supply_name:{}'.format(name),
'enclosure_power_supply_number:{}'.format(number),
'enclosure_power_supply_fqdd:{}'.format(fqdd),
] + common_tags
aggregator.assert_metric('snmp.enclosurePowerSupplyState', metric_type=aggregator.GAUGE, tags=tags, count=1)
disks = ['disk1', 'disk2']
for disk in disks:
tags = ['disk_name:{}'.format(disk)] + common_tags
for gauge in DISK_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('battery1', 'but but acted driving driving'),
('battery2', 'oxen acted Jaded quaintly kept forward quaintly forward Jaded'),
]
for name, fqdd in tag_mappings:
tags = [
'battery_name:{}'.format(name),
'battery_fqdd:{}'.format(fqdd),
] + common_tags
aggregator.assert_metric('snmp.batteryState', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
(
'controller1',
'4',
'quaintly kept acted acted but acted zombies quaintly forward',
'quaintly zombies acted driving oxen',
),
('controller2', '21', 'acted', 'driving quaintly'),
]
for name, number, pci_slot, fqdd in tag_mappings:
tags = [
'controller_name:{}'.format(name),
'controller_number:{}'.format(number),
'controller_pci_slot:{}'.format(pci_slot),
'controller_fqdd:{}'.format(fqdd),
] + common_tags
aggregator.assert_metric('snmp.controllerRollUpStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
devices = ['device1', 'device2']
indexes = ['10', '20']
for device, index in zip(devices, indexes):
tags = ['device_descr_name:{}'.format(device), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("pCIDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
slots = ['slot1', 'slot2']
indexes = ['19', '21']
for slot, index in zip(slots, indexes):
tags = ['slot_name:{}'.format(slot), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("systemSlotStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('29', 'device2', '0x9e00e0291401'), ('3', 'device1', '0x9e00e0291401')]
for index, device, mac in tag_mappings:
tags = [
'chassis_index:{}'.format(index),
'device_fqdd:{}'.format(device),
'mac_addr:{}'.format(mac),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("networkDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
tag_mappings = [('3', '26'), ('31', '19')]
for chassis_index, bios_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'system_bios_index:{}'.format(bios_index),
] + common_tags
aggregator.assert_metric('snmp.systemBIOSStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('9', '26', '28'), ('18', '26', '4')]
for chassis_index, probe_type, probe_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'probe_type:{}'.format(probe_type),
'amperage_probe_index:{}'.format(probe_index),
] + common_tags
for gauge in PROBE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('12', '6', '15'), ('22', '3', '19')]
for chassis_index, probe_type, probe_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'probe_type:{}'.format(probe_type),
'voltage_probe_index:{}'.format(probe_index),
] + common_tags
for gauge in VOLTAGE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('18', '23', 'Jaded oxen driving zombies acted oxen'),
('29', '21', 'kept zombies oxen kept driving forward oxen'),
]
for chassis_index, intrusion_index, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'intrusion_index:{}'.format(intrusion_index),
'intrusion_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.intrusionStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.intrusionReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('12', '14', 'zombies quaintly forward acted quaintly acted Jaded zombies'),
('22', '22', 'acted quaintly their Jaded oxen forward forward'),
]
for chassis_index, power_usage_index, power_usage_entity_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'power_usage_index:{}'.format(power_usage_index),
'power_usage_entity_name:{}'.format(power_usage_entity_name),
] + common_tags
aggregator.assert_metric('snmp.powerUsageStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('20', '31', 'quaintly but oxen Jaded driving'),
('21', '13', 'kept kept their but quaintly kept quaintly driving'),
]
for chassis_index, battery_index, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'system_battery_index:{}'.format(battery_index),
'system_battery_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.systemBatteryStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.systemBatteryReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('1', '19', 'driving oxen forward'),
('6', '31', 'their Jaded quaintly but but their quaintly kept acted'),
]
for chassis_index, cooling_unit_index, cooling_unit_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'cooling_unit_index:{}'.format(cooling_unit_index),
'cooling_unit_name:{}'.format(cooling_unit_name),
] + common_tags
aggregator.assert_metric('snmp.coolingUnitRedundancyStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.coolingUnitStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('8', '11', '9', 'acted', 'acted'),
('19', '3', '10', 'acted oxen but zombies driving acted Jaded', 'quaintly kept'),
]
for chassis_index, device_name, device_type, location_name, cooling_device_fqdd in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'cooling_device_name:{}'.format(device_name),
'cooling_device_type:{}'.format(device_type),
'cooling_device_location_name:{}'.format(location_name),
'cooling_device_fqdd:{}'.format(cooling_device_fqdd),
] + common_tags
aggregator.assert_metric('snmp.coolingDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.coolingDeviceReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.coolingDeviceDiscreteReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('7', '28', '2', 'quaintly their but forward acted acted kept Jaded forward'),
('15', '28', '2', 'but driving quaintly kept Jaded'),
]
for chassis_index, probe_index, probe_type, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'temperature_probe_index:{}'.format(probe_index),
'temperature_probe_type:{}'.format(probe_type),
'temperature_probe_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.temperatureProbeStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.temperatureProbeReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric(
'snmp.temperatureProbeDiscreteReading', metric_type=aggregator.GAUGE, tags=tags, count=1
)
tag_mappings = [
('4', '24', 'but oxen forward', 'their forward oxen'),
(
'19',
'1',
'but driving oxen but driving oxen oxen oxen forward',
'zombies quaintly Jaded but Jaded driving acted forward',
),
]
for chassis_index, device_index, brand_name, device_fqdd in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'processor_device_index:{}'.format(device_index),
'processor_device_brand_name:{}'.format(brand_name),
'processor_device_fqdd:{}'.format(device_fqdd),
] + common_tags
aggregator.assert_metric('snmp.processorDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.processorDeviceMaximumSpeed', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.processorDeviceCurrentSpeed', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.processorDeviceVoltage', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('3', '11', 'driving zombies oxen driving kept Jaded driving'),
(
'18',
'21',
'kept kept',
),
]
for chassis_index, status_index, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'processor_device_status_index:{}'.format(status_index),
'processor_device_status_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.processorDeviceStatusStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.processorDeviceStatusReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('2', '11', 'but kept Jaded'),
(
'8',
'3',
'quaintly quaintly oxen oxen kept kept their acted forward',
),
]
for chassis_index, fru_index, fru_fqdd in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'fru_index:{}'.format(fru_index),
'fru_fqdd:{}'.format(fru_fqdd),
] + common_tags
aggregator.assert_metric('snmp.fruInformationStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('2', 'driving quaintly kept Jaded forward but forward kept', 'Jaded but Jaded their'),
(
'8',
'zombies quaintly kept kept but quaintly forward quaintly oxen',
'oxen acted their their forward but Jaded zombies oxen',
),
]
for disk_number, disk_name, disk_fqdd in tag_mappings:
tags = [
'virtual_disk_number:{}'.format(disk_number),
'virtual_disk_name:{}'.format(disk_name),
'virtual_disk_fqdd:{}'.format(disk_fqdd),
] + common_tags
aggregator.assert_metric('snmp.virtualDiskState', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.virtualDiskSizeInMB', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.virtualDiskComponentStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.virtualDiskT10PIStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('2', '27'),
(
'83',
'86',
),
]
for chassis_index, psu_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'drs_psu_index:{}'.format(psu_index),
] + common_tags
aggregator.assert_metric('snmp.drsWattsReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.drsAmpsReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.drsKWhCumulative', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
indexes = ['29', '22']
device_types = ['26', '4']
device_indexes = ['4', '21']
for index, device_type, device_index in zip(indexes, device_types, device_indexes):
tags = [
'chassis_index:{}'.format(index),
'device_type:{}'.format(device_type),
'device_index:{}'.format(device_index),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("memoryDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
for gauge in DRS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_cisco_nexus(aggregator):
profile = "cisco-nexus"
run_profile_check(profile)
interfaces = ["GigabitEthernet1/0/{}".format(i) for i in range(1, 9)]
common_tags = common.CHECK_TAGS + [
'snmp_host:Nexus-eu1.companyname.managed',
'snmp_profile:' + profile,
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
for metric in IF_SCALAR_GAUGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface), 'interface_alias:'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1, 9, 11, 12, 12, 14, 17, 26, 29, 31]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [6, 7, 15, 16, 19, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [3173, 6692, 11571, 19529, 30674, 38253, 52063, 54474, 55946, 63960]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
power_supply_tags = ['power_source:1', 'power_status_descr:Jaded driving their their their'] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=power_supply_tags)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric(
'snmp.cswStackPortOperStatus',
metric_type=aggregator.GAUGE,
tags=common_tags + ['interface:GigabitEthernet1/0/1'],
)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_dell_poweredge(aggregator):
run_profile_check('dell-poweredge')
# Poweredge
sys_mem_gauges = [
'operatingSystemMemoryAvailablePhysicalSize',
'operatingSystemMemoryTotalPageFileSize',
'operatingSystemMemoryAvailablePageFileSize',
'operatingSystemMemoryTotalVirtualSize',
'operatingSystemMemoryAvailableVirtualSize',
]
power_supply_gauges = [
'powerSupplyStatus',
'powerSupplyOutputWatts',
'powerSupplyMaximumInputVoltage',
'powerSupplyCurrentInputVoltage',
]
temperature_probe_gauges = ['temperatureProbeStatus', 'temperatureProbeReading']
processor_device_gauges = ['processorDeviceStatus', 'processorDeviceThreadCount']
cache_device_gauges = ['cacheDeviceStatus', 'cacheDeviceMaximumSize', 'cacheDeviceCurrentSize']
memory_device_gauges = ['memoryDeviceStatus', 'memoryDeviceFailureModes']
common_tags = common.CHECK_TAGS + [
'snmp_profile:dell-poweredge',
'device_vendor:dell',
]
common.assert_common_metrics(aggregator, common_tags)
chassis_indexes = [29, 31]
for chassis_index in chassis_indexes:
tags = ['chassis_index:{}'.format(chassis_index)] + common_tags
for metric in sys_mem_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [5, 17]
for index in indexes:
tags = ['chassis_index:4', 'index:{}'.format(index)] + common_tags
for metric in power_supply_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
tag_mappings = [
('14', '8', '2', 'but their forward oxen oxen'),
('18', '13', '16', 'acted Jaded kept kept but quaintly quaintly zombies'),
('21', '13', '1', 'kept oxen oxen forward'),
('22', '4', '3', 'but but oxen zombies quaintly quaintly but Jaded'),
('23', '23', '3', 'kept driving driving Jaded zombies forward quaintly zombies but'),
('24', '10', '3', 'acted their kept forward forward'),
('25', '17', '1', 'oxen their their oxen'),
]
for chassis_index, probe_index, probe_type, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'index:{}'.format(probe_index),
'temperature_probe_type:{}'.format(probe_type),
'temperature_probe_location_name:{}'.format(location_name),
] + common_tags
for metric in temperature_probe_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [17, 28]
for index in indexes:
tags = ['chassis_index:5', 'index:{}'.format(index)] + common_tags
for metric in processor_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [15, 27]
for index in indexes:
tags = ['chassis_index:11', 'index:{}'.format(index)] + common_tags
for metric in cache_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
serial_numbers = ['forward zombies acted Jaded', 'kept oxen their their oxen oxen']
for serial_number in serial_numbers:
tags = ['serial_number_name:{}'.format(serial_number), 'chassis_index:1'] + common_tags
for metric in memory_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
ip_addresses = ['66.97.1.103', '62.148.76.32', '45.3.243.155']
for ip_address in ip_addresses:
tags = ['ip_address:{}'.format(ip_address)] + common_tags
aggregator.assert_metric('snmp.networkDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, at_least=1)
# Intel Adapter
interfaces = ['eth0', 'en1']
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in POWEREDGE_SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('3', '17', 'zombies kept their quaintly but'),
('6', '19', 'zombies'),
]
for chassis_index, battery_index, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'battery_index:{}'.format(battery_index),
'battery_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.batteryStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.batteryReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('3', '26'), ('31', '19')]
for chassis_index, bios_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'system_bios_index:{}'.format(bios_index),
] + common_tags
aggregator.assert_metric('snmp.systemBIOSStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('9', '26', '28'), ('18', '26', '4')]
for chassis_index, probe_type, probe_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'probe_type:{}'.format(probe_type),
'amperage_probe_index:{}'.format(probe_index),
] + common_tags
for gauge in PROBE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('12', '6', '15'), ('22', '3', '19')]
for chassis_index, probe_type, probe_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'probe_type:{}'.format(probe_type),
'voltage_probe_index:{}'.format(probe_index),
] + common_tags
for gauge in VOLTAGE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('18', '23', 'Jaded oxen driving zombies acted oxen'),
('29', '21', 'kept zombies oxen kept driving forward oxen'),
]
for chassis_index, intrusion_index, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'intrusion_index:{}'.format(intrusion_index),
'intrusion_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.intrusionStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.intrusionReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('12', '14', 'zombies quaintly forward acted quaintly acted Jaded zombies'),
('22', '22', 'acted quaintly their Jaded oxen forward forward'),
]
for chassis_index, power_usage_index, power_usage_entity_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'power_usage_index:{}'.format(power_usage_index),
'power_usage_entity_name:{}'.format(power_usage_entity_name),
] + common_tags
aggregator.assert_metric('snmp.powerUsageStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('1', '19', 'driving oxen forward'),
('6', '31', 'their Jaded quaintly but but their quaintly kept acted'),
]
for chassis_index, cooling_unit_index, cooling_unit_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'cooling_unit_index:{}'.format(cooling_unit_index),
'cooling_unit_name:{}'.format(cooling_unit_name),
] + common_tags
aggregator.assert_metric('snmp.coolingUnitRedundancyStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.coolingUnitStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('8', '11', '9', 'acted'),
('19', '3', '10', 'acted oxen but zombies driving acted Jaded'),
]
for chassis_index, device_name, device_type, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'cooling_device_name:{}'.format(device_name),
'cooling_device_type:{}'.format(device_type),
'cooling_device_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.coolingDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.coolingDeviceReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.coolingDeviceDiscreteReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('3', '11', 'driving zombies oxen driving kept Jaded driving'),
(
'18',
'21',
'kept kept',
),
]
for chassis_index, status_index, location_name in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'processor_device_status_index:{}'.format(status_index),
'processor_device_status_location_name:{}'.format(location_name),
] + common_tags
aggregator.assert_metric('snmp.processorDeviceStatusStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.processorDeviceStatusReading', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('2', '11'),
(
'8',
'3',
),
]
for chassis_index, fru_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'fru_index:{}'.format(fru_index),
] + common_tags
aggregator.assert_metric('snmp.fruInformationStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [
('2', '11'),
(
'8',
'3',
),
]
for chassis_index, fru_index in tag_mappings:
tags = [
'chassis_index:{}'.format(chassis_index),
'fru_index:{}'.format(fru_index),
] + common_tags
aggregator.assert_metric('snmp.fruInformationStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('19', 'their kept kept zombies kept zombies their'), ('21', 'zombies their')]
for index, slot in tag_mappings:
tags = ['slot_name:{}'.format(slot), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.systemSlotStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('2', 'driving oxen oxen but'), ('7', 'kept but Jaded oxen quaintly Jaded zombies')]
for index, descr_name in tag_mappings:
tags = ['device_descr_name:{}'.format(descr_name), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.pCIDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_hp_ilo4(aggregator):
profile = "hp-ilo4"
run_profile_check(profile)
status_gauges = [
'cpqHeCritLogCondition',
'cpqHeCorrMemLogStatus',
'cpqHeCorrMemLogCondition',
'cpqHeAsrStatus',
'cpqHeAsrPost',
'cpqHeAsrCondition',
'cpqHeAsrNetworkAccessStatus',
'cpqHeThermalCondition',
'cpqHeThermalTempStatus',
'cpqHeThermalSystemFanStatus',
'cpqHeThermalCpuFanStatus',
'cpqNicVtVirusActivity',
'cpqSm2CntlrServerPowerState',
'cpqSm2CntlrBatteryStatus',
'cpqSm2CntlrRemoteSessionStatus',
'cpqSm2CntlrInterfaceStatus',
]
cpqhlth_counts = ['cpqHeAsrRebootCount', 'cpqHeCorrMemTotalErrs']
cpqhlth_gauges = ['cpqHeSysUtilEisaBusMin', 'cpqHePowerMeterCurrReading', 'cpqHeSysUtilLifeTime']
cpqsm2_gauges = [
'cpqSm2CntlrBatteryPercentCharged',
'cpqSm2CntlrSelfTestErrors',
'cpqSm2EventTotalEntries',
]
EMBEDDED = 2
PCMCIA = 3
card_locations = [EMBEDDED, PCMCIA]
network_card_counts = [
'cpqSm2NicXmitBytes',
'cpqSm2NicXmitTotalPackets',
'cpqSm2NicXmitDiscardPackets',
'cpqSm2NicXmitErrorPackets',
'cpqSm2NicXmitQueueLength',
'cpqSm2NicRecvBytes',
'cpqSm2NicRecvTotalPackets',
'cpqSm2NicRecvDiscardPackets',
'cpqSm2NicRecvErrorPackets',
'cpqSm2NicRecvUnknownPackets',
]
interfaces = ['eth0', 'en1']
phys_adapter_counts = [
'cpqNicIfPhysAdapterGoodTransmits',
'cpqNicIfPhysAdapterGoodReceives',
'cpqNicIfPhysAdapterBadTransmits',
'cpqNicIfPhysAdapterBadReceives',
'cpqNicIfPhysAdapterInOctets',
'cpqNicIfPhysAdapterOutOctets',
]
phys_adapter_gauges = ['cpqNicIfPhysAdapterSpeed', 'cpqNicIfPhysAdapterSpeedMbps']
temperature_sensors = [1, 13, 28]
batteries = [1, 3, 4, 5]
common_tags = common.CHECK_TAGS + ['snmp_profile:' + profile, 'device_vendor:hp', 'snmp_host:hp-ilo4.example']
common.assert_common_metrics(aggregator, common_tags)
for metric in status_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqhlth_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in cpqhlth_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqsm2_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for index in temperature_sensors:
tags = ['temperature_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeTemperatureCelsius', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeTemperatureCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
for index in batteries:
tags = ['battery_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeSysBatteryCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeSysBatteryStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
for location in card_locations:
tags = ['nic_stats_location:{}'.format(location)] + common_tags
for metric in network_card_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in phys_adapter_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in phys_adapter_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
drive_counts = [
"cpqDaPhyDrvUsedReallocs",
"cpqDaPhyDrvRefHours",
"cpqDaPhyDrvHardReadErrs",
"cpqDaPhyDrvRecvReadErrs",
"cpqDaPhyDrvHardWriteErrs",
"cpqDaPhyDrvRecvWriteErrs",
"cpqDaPhyDrvHSeekErrs",
"cpqDaPhyDrvSeekErrs",
]
drive_gauges = [
"cpqDaPhyDrvStatus",
"cpqDaPhyDrvFactReallocs",
"cpqDaPhyDrvSpinupTime",
"cpqDaPhyDrvSize",
"cpqDaPhyDrvSmartStatus",
"cpqDaPhyDrvCurrentTemperature",
]
drive_idx = [(0, 2), (0, 28), (8, 31), (9, 24), (9, 28), (10, 17), (11, 4), (12, 20), (18, 22), (23, 2)]
for drive_cntrl_idx, drive_index in drive_idx:
tags = ['drive_cntrl_idx:{}'.format(drive_cntrl_idx), "drive_index:{}".format(drive_index)] + common_tags
for metric in drive_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in drive_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_proliant(aggregator):
run_profile_check('hpe-proliant')
common_tags = common.CHECK_TAGS + [
'snmp_profile:hpe-proliant',
'device_vendor:hp',
'snmp_host:hpe-proliant.example',
]
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
cpu_gauges = [
"cpqSeCpuSlot",
"cpqSeCpuSpeed",
"cpqSeCpuStatus",
"cpqSeCpuExtSpeed",
"cpqSeCpuCore",
"cpqSeCPUCoreMaxThreads",
"cpqSeCpuPrimary",
]
cpu_indexes = [0, 4, 6, 8, 13, 15, 26, 27]
for idx in cpu_indexes:
tags = ['cpu_index:{}'.format(idx)] + common_tags
for metric in cpu_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpu_util_gauges = ["cpqHoCpuUtilMin", "cpqHoCpuUtilFiveMin", "cpqHoCpuUtilThirtyMin", "cpqHoCpuUtilHour"]
cpu_unit_idx = [4, 7, 13, 20, 22, 23, 29]
for idx in cpu_unit_idx:
tags = ['cpu_unit_index:{}'.format(idx)] + common_tags
for metric in cpu_util_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
file_sys_gauges = [
"cpqHoFileSysSpaceTotal",
"cpqHoFileSysSpaceUsed",
"cpqHoFileSysPercentSpaceUsed",
"cpqHoFileSysAllocUnitsTotal",
"cpqHoFileSysAllocUnitsUsed",
"cpqHoFileSysStatus",
]
file_sys_idx = [5, 8, 11, 15, 19, 21, 28, 30]
for idx in file_sys_idx:
tags = ['file_sys_index:{}'.format(idx)] + common_tags
for metric in file_sys_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
memory_gauges = [
"cpqSiMemModuleSize",
"cpqSiMemModuleType",
"cpqSiMemModuleSpeed",
"cpqSiMemModuleTechnology",
"cpqSiMemModuleECCStatus",
"cpqSiMemModuleFrequency",
"cpqSiMemModuleCellStatus",
]
memory_idx = [(6, 16), (7, 17), (7, 30), (8, 20), (10, 4), (15, 27), (20, 14), (21, 14), (23, 0), (28, 20)]
for board_idx, mem_module_index in memory_idx:
tags = ['mem_board_index:{}'.format(board_idx), "mem_module_index:{}".format(mem_module_index)] + common_tags
for metric in memory_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
drive_counts = [
"cpqDaPhyDrvUsedReallocs",
"cpqDaPhyDrvRefHours",
"cpqDaPhyDrvHardReadErrs",
"cpqDaPhyDrvRecvReadErrs",
"cpqDaPhyDrvHardWriteErrs",
"cpqDaPhyDrvRecvWriteErrs",
"cpqDaPhyDrvHSeekErrs",
"cpqDaPhyDrvSeekErrs",
]
drive_gauges = [
"cpqDaPhyDrvStatus",
"cpqDaPhyDrvFactReallocs",
"cpqDaPhyDrvSpinupTime",
"cpqDaPhyDrvSize",
"cpqDaPhyDrvSmartStatus",
"cpqDaPhyDrvCurrentTemperature",
]
drive_idx = [(0, 2), (0, 28), (8, 31), (9, 24), (9, 28), (10, 17), (11, 4), (12, 20), (18, 22), (23, 2)]
for drive_cntrl_idx, drive_index in drive_idx:
tags = ['drive_cntrl_idx:{}'.format(drive_cntrl_idx), "drive_index:{}".format(drive_index)] + common_tags
for metric in drive_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in drive_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
interfaces = [
('eth0', 'quaintly zombies quaintly forward'),
('eth1', 'quaintly but quaintly quaintly'),
]
for interface, desc in interfaces:
if_tags = ['interface:{}'.format(interface), 'interface_alias:{}'.format(desc)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
mem_boards = ['11', '12']
for board in mem_boards:
tags = ['mem_board_index:{}'.format(board)] + common_tags
aggregator.assert_metric('snmp.cpqHeResMem2ModuleCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
adapter_gauges = ['cpqNicIfPhysAdapterStatus', 'cpqNicIfPhysAdapterState']
for gauge in adapter_gauges:
tags = ['adapter_name:adapter', 'adapter_mac_addr:mac'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
power_metrics = [
'cpqHeFltTolPowerSupplyStatus',
'cpqHeFltTolPowerSupplyCapacityUsed',
'cpqHeFltTolPowerSupplyCapacityMaximum',
]
for gauge in power_metrics:
tags = ['chassis_num:30'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
controller_index = ['controller_index:3'] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("cpqDaCntlrCondition"), metric_type=aggregator.GAUGE, tags=controller_index, count=1
)
thermal_metrics = ['cpqHeThermalCondition', 'cpqHeSysUtilLifeTime', 'cpqHeFltTolPwrSupplyStatus']
for metric in thermal_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_generic_host_resources(aggregator):
instance = common.generate_instance_config([])
instance['community_string'] = 'generic_host'
instance['enforce_mib_constraints'] = False
instance['profile'] = 'generic'
init_config = {'profiles': {'generic': {'definition_file': '_generic-host-resources.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
common_tags = common.CHECK_TAGS + ['snmp_profile:generic']
common.assert_common_metrics(aggregator, common_tags)
sys_metrics = [
'snmp.hrSystemUptime',
'snmp.hrSystemNumUsers',
'snmp.hrSystemProcesses',
'snmp.hrSystemMaxProcesses',
]
for metric in sys_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=common_tags, count=1)
storages = [
('1.3.6.1.2.1.25.2.1.3', 'oxen their driving forward quaintly'),
('1.3.6.1.2.1.25.2.1.4', 'quaintly driving Jaded forward their quaintly zombies'),
]
for storage_type, storage_desc in storages:
tags = common_tags + ['storagetype:{}'.format(storage_type), 'storagedesc:{}'.format(storage_desc)]
aggregator.assert_metric('snmp.hrStorageAllocationUnits', count=1, tags=tags)
aggregator.assert_metric('snmp.hrStorageSize', count=1, tags=tags)
aggregator.assert_metric('snmp.hrStorageUsed', count=1, tags=tags)
aggregator.assert_metric('snmp.hrStorageAllocationFailures', count=1, tags=tags)
processors = [
'1.3.6.1.3.81.16',
'1.3.6.1.3.95.73.140.186.121.144.199',
]
for proc in processors:
tags = common_tags + ['processorid:{}'.format(proc)]
aggregator.assert_metric('snmp.hrProcessorLoad', count=1, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_palo_alto(aggregator):
profile = "palo-alto"
run_profile_check(profile)
common_tags = common.CHECK_TAGS + [
'snmp_profile:' + profile,
'device_vendor:paloaltonetworks',
]
common.assert_common_metrics(aggregator, common_tags)
session = [
'panSessionUtilization',
'panSessionMax',
'panSessionActive',
'panSessionActiveTcp',
'panSessionActiveUdp',
'panSessionActiveICMP',
'panSessionActiveSslProxy',
'panSessionSslProxyUtilization',
]
global_protect = [
'panGPGWUtilizationPct',
'panGPGWUtilizationMaxTunnels',
'panGPGWUtilizationActiveTunnels',
]
entity = [
'panEntityTotalPowerAvail',
'panEntityTotalPowerUsed',
]
entry = ['panEntryFRUModulePowerUsed', 'panEntryFRUModuleNumPorts']
for metric in session:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in global_protect:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entity:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entry:
# Needs cross table entPhysicalIsFRU tag
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
# Needs cross table entLogicalDescr tag
aggregator.assert_metric('snmp.panEntryFanTrayPowerUsed', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_asa_all(aggregator):
profile = "cisco-asa"
assert_cisco_asa(aggregator, profile)
@pytest.mark.usefixtures("dd_environment")
def test_cisco_asa_5525(aggregator):
profile = "cisco-asa-5525"
assert_cisco_asa(aggregator, profile)
def assert_cisco_asa(aggregator, profile):
run_profile_check(profile)
common_tags = common.CHECK_TAGS + [
'snmp_profile:' + profile,
'snmp_host:kept',
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
for metric in IF_BANDWIDTH_USAGE:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1)
frus = [3, 4, 5, 7, 16, 17, 24, 25]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [7746]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
sensor_tags = ['sensor_id:31', 'sensor_type:9'] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
stat_tags = [(20, 2), (5, 5)]
for (svc, stat) in stat_tags:
aggregator.assert_metric(
'snmp.cfwConnectionStatValue',
metric_type=aggregator.GAUGE,
tags=['stat_type:{}'.format(stat), 'service_type:{}'.format(svc)] + common_tags,
)
aggregator.assert_metric('snmp.crasNumDeclinedSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumUsers', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric(
'snmp.crasNumSetupFailInsufResources', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags
)
aggregator.assert_metric('snmp.cipSecGlobalActiveTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcInOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcOutOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
power_supply_tags = ['power_source:1', 'power_status_descr:Jaded driving their their their'] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=power_supply_tags)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for mem_metrics in MEMORY_METRICS:
tags = ['mem_pool_name:test_pool'] + common_tags
aggregator.assert_metric('snmp.{}'.format(mem_metrics), metric_type=aggregator.GAUGE, tags=tags)
for conn in [1, 2, 5]:
conn_tags = ['connection_type:{}'.format(conn)] + common_tags
aggregator.assert_metric('snmp.cfwConnectionStatCount', metric_type=aggregator.RATE, tags=conn_tags)
hardware_tags = [(3, 'Secondary unit'), (5, 'Primary unit'), (6, 'Failover LAN Interface')]
for (htype, hdesc) in hardware_tags:
aggregator.assert_metric(
'snmp.cfwHardwareStatusValue',
metric_type=aggregator.GAUGE,
tags=['hardware_type:{}'.format(htype), 'hardware_desc:{}'.format(hdesc)] + common_tags,
)
for switch in [4684, 4850, 8851, 9997, 15228, 16580, 24389, 30813, 36264]:
aggregator.assert_metric(
'snmp.cvsChassisUpTime',
metric_type=aggregator.GAUGE,
tags=['chassis_switch_id:{}'.format(switch)] + common_tags,
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# RTT
rtt_indexes = [1, 7, 10, 13, 15, 18, 20]
rtt_types = [22, 21, 17, 6, 20, 8, 16]
rtt_states = [3, 1, 6, 4, 6, 1, 6]
rtt_gauges = ['rttMonLatestRttOperCompletionTime', 'rttMonLatestRttOperSense', 'rttMonCtrlOperTimeoutOccurred']
for i in range(len(rtt_indexes)):
tags = [
"rtt_index:{}".format(rtt_indexes[i]),
"rtt_type:{}".format(rtt_types[i]),
"rtt_state:{}".format(rtt_states[i]),
] + common_tags
for rtt in rtt_gauges:
aggregator.assert_metric('snmp.{}'.format(rtt), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_csr(aggregator):
run_profile_check('cisco-csr1000v')
common_tags = common.CHECK_TAGS + [
'snmp_profile:cisco-csr1000v',
'device_vendor:cisco',
]
common.assert_common_metrics(aggregator, common_tags)
_check_bgp4(aggregator, common_tags)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_checkpoint_firewall(aggregator):
run_profile_check('checkpoint-firewall')
common_tags = common.CHECK_TAGS + [
'snmp_profile:checkpoint-firewall',
'device_vendor:checkpoint',
]
common.assert_common_metrics(aggregator, common_tags)
cpu_metrics = [
'multiProcUserTime',
'multiProcSystemTime',
'multiProcIdleTime',
'multiProcUsage',
]
cpu_cores = [7097, 13039, 13761, 28994, 29751, 33826, 40053, 48847, 61593, 65044]
for core in cpu_cores:
tags = ['cpu_core:{}'.format(core)] + common_tags
for metric in cpu_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric('snmp.procNum', metric_type=aggregator.GAUGE, tags=common_tags)
mem_metrics = ['memTotalReal64', 'memActiveReal64', 'memFreeReal64', 'memTotalVirtual64', 'memActiveVirtual64']
for metric in mem_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
disk_metrics = [
'multiDiskSize',
'multiDiskUsed',
'multiDiskFreeTotalBytes',
'multiDiskFreeAvailableBytes',
'multiDiskFreeTotalPercent',
'multiDiskFreeAvailablePercent',
]
appliance_metrics = [
'fanSpeedSensorValue',
'fanSpeedSensorStatus',
'tempertureSensorValue',
'tempertureSensorStatus',
]
common_indices = range(10)
common_names = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']
for idx in common_indices:
name = common_names[idx]
tags = ['disk_index:{}'.format(idx), 'disk_name:{}'.format(name)] + common_tags
for metric in disk_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
tags = ['sensor_index:{}'.format(idx), 'sensor_name:{}'.format(name)] + common_tags
for metric in appliance_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
fw_count_metrics = ['fwAccepted', 'fwDropped', 'fwRejected']
for metric in fw_count_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
fw_gauge_metrics = ['fwNumConn', 'fwPeakNumConn']
for metric in fw_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_arista(aggregator):
run_profile_check('arista')
common_tags = common.CHECK_TAGS + ['snmp_profile:arista', 'device_vendor:arista', 'snmp_host:DCS-7504-name']
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:13', 'queue_index:10'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:28', 'queue_index:22'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:7', 'queue_index:25'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:8', 'queue_index:24'],
count=1,
)
for (sensor_id, sensor_type) in [(1, 11), (7, 8)]:
sensor_tags = ['sensor_id:{}'.format(sensor_id), 'sensor_type:{}'.format(sensor_type)] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_metric('snmp.entPhySensorOperStatus', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_aruba(aggregator):
run_profile_check('aruba')
common_tags = common.CHECK_TAGS + ['snmp_profile:aruba-switch', 'device_vendor:aruba']
common.assert_common_metrics(aggregator, common_tags)
for fan in [18, 28]:
fan_tags = common_tags + ['fan_index:{}'.format(fan)]
aggregator.assert_metric('snmp.sysExtFanStatus', metric_type=aggregator.GAUGE, tags=fan_tags, count=1)
for psu in [1, 17]:
psu_tags = common_tags + ['powersupply_index:{}'.format(psu)]
aggregator.assert_metric('snmp.sysExtPowerSupplyStatus', metric_type=aggregator.GAUGE, tags=psu_tags, count=1)
for proc in [11, 26]:
proc_tags = common_tags + ['processor_index:{}'.format(proc)]
aggregator.assert_metric('snmp.sysExtProcessorLoad', metric_type=aggregator.GAUGE, tags=proc_tags, count=1)
for mem in [3, 20]:
mem_tags = common_tags + ['memory_index:{}'.format(mem)]
aggregator.assert_metric('snmp.sysExtMemorySize', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryUsed', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryFree', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric(
'snmp.wlsxSysExtPacketLossPercent', metric_type=aggregator.GAUGE, tags=common_tags, count=1
)
# OSPF metrics
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
virtual_neighbor_metrics = [
('ospfVirtNbrState', aggregator.GAUGE),
('ospfVirtNbrEvents', aggregator.RATE),
('ospfVirtNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in virtual_neighbor_metrics:
for ip, nbr in [('74.210.82.1', '194.154.66.112'), ('122.226.86.1', '184.201.101.140')]:
tags = ['neighbor_ip:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState', 'ospfIfLsaCount']
for metric in lls_metrics:
for ip, nbr in [('58.115.169.188', '192.29.66.79'), ('18.2.8.29', '118.246.193.247')]:
tags = ['ospf_ip_addr:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
virtual_lls_metrics = ['ospfVirtIfRetransInterval', 'ospfVirtIfState', 'ospfVirtIfLsaCount']
for metric in virtual_lls_metrics:
for nbr in ['194.154.66.112', '184.201.101.140']:
tags = ['neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_chatsworth(aggregator):
profile = "chatsworth_pdu"
run_profile_check(profile)
# Legacy global tags are applied to all metrics
legacy_global_tags = [
'legacy_pdu_macaddress:00:0E:D3:AA:CC:EE',
'legacy_pdu_model:P10-1234-ABC',
'legacy_pdu_name:legacy-name1',
'legacy_pdu_version:1.2.3',
]
common_tags = common.CHECK_TAGS + legacy_global_tags + ['snmp_profile:' + profile, 'device_vendor:chatsworth']
common.assert_common_metrics(aggregator, common_tags)
# Legacy metrics
legacy_pdu_tags = common_tags
legacy_pdu_gauge_metrics = [
'snmp.pduRole',
'snmp.outOfService',
]
legacy_pdu_monotonic_count_metrics = []
for line in range(1, 4):
legacy_pdu_gauge_metrics.append('snmp.line{}curr'.format(line))
for branch in range(1, 3):
legacy_pdu_gauge_metrics.append('snmp.temperatureProbe{}'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.humidityProbe{}'.format(branch))
for xyz in ['xy', 'yz', 'zx']:
legacy_pdu_monotonic_count_metrics.append('snmp.energy{}{}s'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.voltage{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.power{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.powerFact{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.current{}{}'.format(xyz, branch))
for branch in range(1, 25):
legacy_pdu_monotonic_count_metrics.append('snmp.receptacleEnergyoutlet{}s'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.outlet{}Current'.format(branch))
for metric in legacy_pdu_gauge_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=legacy_pdu_tags, count=1)
for metric in legacy_pdu_monotonic_count_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.MONOTONIC_COUNT, tags=legacy_pdu_tags, count=1)
# New metrics
pdu_tags = common_tags + [
'pdu_cabinetid:cab1',
'pdu_ipaddress:42.2.210.224',
'pdu_macaddress:0x00249b3503f6',
'pdu_model:model1',
'pdu_name:name1',
'pdu_version:v1.1',
]
aggregator.assert_metric('snmp.cpiPduNumberBranches', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduNumberOutlets', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutOfService', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduUpgrade', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduChainRole', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduTotalPower', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
for lock in [1, 2]:
lock_tags = common_tags + ['lock_id:{}'.format(lock)]
aggregator.assert_metric('snmp.cpiPduEasStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduDoorStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduLockStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
for (sensor_name, sensor_index) in [('sensor1', 4), ('sensor2', 6)]:
sensor_tags = common_tags + [
'sensor_index:{}'.format(sensor_index),
'sensor_name:{}'.format(sensor_name),
'sensor_type:1',
]
aggregator.assert_metric('snmp.cpiPduSensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
for line in [6, 18]:
line_tags = common_tags + ['line_id:{}'.format(line)]
aggregator.assert_metric('snmp.cpiPduLineCurrent', metric_type=aggregator.GAUGE, tags=line_tags, count=1)
for branch in [1, 17]:
branch_tags = common_tags + ['branch_id:{}'.format(branch), 'pdu_name:name1']
aggregator.assert_metric('snmp.cpiPduBranchCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchMaxCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchVoltage', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchPower', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchPowerFactor', metric_type=aggregator.GAUGE, tags=branch_tags, count=1
)
aggregator.assert_metric('snmp.cpiPduBranchStatus', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=branch_tags, count=1
)
for branch in [1]:
branch_tags = common_tags + ['branch_id:{}'.format(branch), 'pdu_name:name2']
aggregator.assert_metric(
'snmp.cpiPduBranchPowerFactor', metric_type=aggregator.GAUGE, tags=branch_tags, count=1
)
aggregator.assert_metric(
'snmp.cpiPduBranchEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=branch_tags, count=1
)
for (outlet_id, outlet_branch, outlet_name) in [(7, 29, 'outlet1'), (16, 23, 'outlet2')]:
outlet_tags = common_tags + [
'outlet_id:{}'.format(outlet_id),
'outlet_branchid:{}'.format(outlet_branch),
'outlet_name:{}'.format(outlet_name),
]
aggregator.assert_metric('snmp.cpiPduOutletCurrent', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletVoltage', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletPower', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletStatus', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduOutletEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=outlet_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_isilon(aggregator):
run_profile_check('isilon')
common_tags = common.CHECK_TAGS + [
'snmp_profile:isilon',
'cluster_name:testcluster1',
'node_name:node1',
'node_type:1',
'device_vendor:dell',
]
cluster_rates = [
'clusterIfsInBytes',
'clusterIfsOutBytes',
]
node_rates = [
'nodeIfsOutBytes',
'nodeIfsInBytes',
]
protocol_metrics = [
'protocolOpsPerSecond',
'latencyMin',
'latencyMax',
'latencyAverage',
]
quota_metrics = ['quotaHardThreshold', 'quotaSoftThreshold', 'quotaUsage', 'quotaAdvisoryThreshold']
quota_ids_types = [
(422978632, 1),
(153533730, 5),
(3299369987, 4),
(2149993012, 3),
(1424325378, 1),
(4245321451, 0),
(2328145711, 1),
(1198032230, 4),
(1232918362, 1),
(1383990869, 1),
]
common.assert_common_metrics(aggregator, common_tags)
for metric in quota_metrics:
for qid, qtype in quota_ids_types:
tags = ['quota_id:{}'.format(qid), 'quota_type:{}'.format(qtype)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in protocol_metrics:
for num in range(1, 3):
tags = ['protocol_name:testprotocol{}'.format(num)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.clusterHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cluster_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.nodeHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in node_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
for fan in [4, 6, 10, 11, 14, 21, 22, 23, 25, 30]:
tags = ['fan_name:testfan', 'fan_number:{}'.format(fan)] + common_tags
aggregator.assert_metric('snmp.fanSpeed', metric_type=aggregator.GAUGE, tags=tags, count=1)
for status, bay in [('SMARTFAIL', 1), ('HEALTHY', 2), ('DEAD', 3)]:
tags = common_tags + ['disk_status:{}'.format(status), 'disk_bay:{}'.format((bay))]
aggregator.assert_metric('snmp.diskSizeBytes', metric_type=aggregator.RATE, tags=tags)
aggregator.assert_metric('snmp.ifsUsedBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.ifsTotalBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_apc_ups(aggregator):
run_profile_check('apc_ups')
profile_tags = [
'snmp_profile:apc_ups',
'model:APC Smart-UPS 600',
'firmware_version:2.0.3-test',
'serial_num:test_serial',
'ups_name:testIdentName',
'device_vendor:apc',
]
tags = common.CHECK_TAGS + profile_tags
common.assert_common_metrics(aggregator, tags)
for metric in metrics.APC_UPS_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric(
'snmp.upsOutletGroupStatusGroupState',
metric_type=aggregator.GAUGE,
tags=['outlet_group_name:test_outlet'] + tags,
)
for metric, value in metrics.APC_UPS_UPS_BASIC_STATE_OUTPUT_STATE_METRICS:
aggregator.assert_metric(metric, value=value, metric_type=aggregator.GAUGE, count=1, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_fortinet_fortigate(aggregator):
run_profile_check('fortinet-fortigate')
common_tags = common.CHECK_TAGS + [
'snmp_profile:fortinet-fortigate',
'device_vendor:fortinet',
]
common_gauge_metrics = [
'fgSysCpuUsage',
'fgSysMemUsage',
'fgSysMemCapacity',
'fgSysLowMemUsage',
'fgSysLowMemCapacity',
'fgSysDiskUsage',
'fgSysDiskCapacity',
'fgSysSesCount',
'fgSysSesRate1',
'fgSysSes6Count',
'fgSysSes6Rate1',
'fgApHTTPConnections',
'fgApHTTPMaxConnections',
'fgVdNumber',
'fgVdMaxVdoms',
]
processor_gauge_metrics = [
'fgProcessorUsage',
'fgProcessorSysUsage',
]
processor_count_metrics = [
'fgProcessorPktRxCount',
'fgProcessorPktTxCount',
'fgProcessorPktDroppedCount',
]
processor_tags = common_tags + ['processor_index:12']
vd_metrics = [
'fgVdEntOpMode',
'fgVdEntHaState',
'fgVdEntCpuUsage',
'fgVdEntMemUsage',
'fgVdEntSesCount',
'fgVdEntSesRate',
]
vd_tags = common_tags + ['virtualdomain_index:4', 'virtualdomain_name:their oxen quaintly']
common.assert_common_metrics(aggregator, common_tags)
for metric in common_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in processor_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=processor_tags, count=1)
for metric in processor_count_metrics:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=processor_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=processor_tags, count=1
)
for metric in vd_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=vd_tags, count=1)
# Interface
aggregator.assert_metric('snmp.fgIntfEntVdom', metric_type=aggregator.GAUGE, count=1)
# Firewall
firewall_tags = common_tags + ['policy_index:22']
for metric in ['fgFwPolPktCount', 'fgFwPolByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall_tags, count=1
)
# Firewall 6
firewall6_tags = common_tags + ['policy6_index:29']
for metric in ['fgFwPol6PktCount', 'fgFwPol6ByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall6_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall6_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.usefixtures("dd_environment")
def test_netapp(aggregator):
run_profile_check('netapp')
profile_tags = [
'snmp_profile:netapp',
'snmp_host:example-datacenter.company',
'device_vendor:netapp',
]
common_tags = common.CHECK_TAGS + profile_tags
common.assert_common_metrics(aggregator, common_tags)
gauges = [
'cfInterconnectStatus',
'miscCacheAge',
'ncHttpActiveCliConns',
]
counts = [
'extcache64Hits',
]
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
snapvault_counts = [
'svTotalFailures',
]
snapvaults = [('5', '/vol/dir1', '5'), ('6', '/vol/dir3', '2'), ('18', '/vol/dir9', '4')]
for metric in snapvault_counts:
for index, destination, state in snapvaults:
tags = [
'index:{}'.format(index),
'destination:{}'.format(destination),
'state:{}'.format(state),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
snapmirrors = [('6', '1'), ('9', '5'), ('29', '1')]
snapmirror_gauges = [
'snapmirrorLag',
]
snapmirror_counts = [
'snapmirrorTotalFailures',
]
for index, state in snapmirrors:
tags = ['index:{}'.format(index), 'state:{}'.format(state)] + common_tags
for metric in snapmirror_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in snapmirror_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
filesystem_gauges = [
'dfHighTotalKBytes',
'dfHighAvailKBytes',
'dfInodesUsed',
'dfInodesFree',
]
filesystem_indexes = [
'1022',
'1023',
'1024',
'1025',
'1026',
'1027',
'1028',
'1029',
'1032',
'1033',
]
filesystems = ['/vol/dir{}'.format(n) for n in range(1, len(filesystem_indexes) + 1)]
for metric in filesystem_gauges:
for index, filesystem in zip(filesystem_indexes, filesystems):
tags = ['index:{}'.format(index), 'filesystem:{}'.format(filesystem)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
if_counts = [
'ifHighInOctets',
]
if_rates = [
'ifHighInOctets.rate',
]
interfaces = [
# Interface descriptions will be normalized in the backend, but we receive the raw DisplayString values here.
('6', 'netgear ifX300 v1'),
('7', 'junyper proto12 12.3'),
('23', 'malabar yz42 10.2020'),
]
for index, descr in interfaces:
tags = ['index:{}'.format(index), 'interface:{}'.format(descr)] + common_tags
for metric in if_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in if_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_catalyst(aggregator):
run_profile_check('cisco-catalyst')
common_tags = common.CHECK_TAGS + [
'snmp_host:catalyst-6000.example',
'snmp_profile:cisco-catalyst',
'device_vendor:cisco',
]
sensors = [5, 9]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:10'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
interfaces = ["Gi1/0/{}".format(i) for i in [6, 10, 12, 18, 22, 25, 27]]
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.parametrize("file", ["juniper-ex", "juniper-ex-variation"])
@pytest.mark.usefixtures("dd_environment")
def test_juniper_ex(aggregator, file):
run_profile_check(file, 'juniper-ex')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-ex',
'device_vendor:juniper-networks',
]
_check_juniper_virtual_chassis(aggregator, common_tags)
_check_juniper_dcu(aggregator, common_tags)
_check_juniper_cos(aggregator, common_tags)
_check_juniper_firewall(aggregator, common_tags)
_check_bgp4(aggregator, common_tags)
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.parametrize("file", ["juniper-mx", "juniper-mx-variation"])
@pytest.mark.usefixtures("dd_environment")
def test_juniper_mx(aggregator, file):
run_profile_check(file, 'juniper-mx')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-mx',
'device_vendor:juniper-networks',
]
_check_juniper_virtual_chassis(aggregator, common_tags)
_check_juniper_firewall(aggregator, common_tags)
_check_bgp4(aggregator, common_tags)
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
@pytest.mark.parametrize("file", ["juniper-srx", "juniper-srx-variation"])
@pytest.mark.usefixtures("dd_environment")
def test_juniper_srx(aggregator, file):
run_profile_check(file, 'juniper-srx')
common_tags = common.CHECK_TAGS + [
'snmp_profile:juniper-srx',
'device_vendor:juniper-networks',
]
_check_juniper_userfirewall(aggregator, common_tags)
_check_juniper_dcu(aggregator, common_tags)
_check_juniper_scu(aggregator, common_tags)
_check_bgp4(aggregator, common_tags)
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
def _check_juniper_scu(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting scu
"""
scu_tags = [
['address_family:1', 'interface:kept but'],
['address_family:1', 'interface:quaintly driving oxen their zombies oxen acted acted'],
['address_family:1', 'interface:but forward kept but their driving oxen quaintly acted'],
]
for metric in SCU_COUNTS:
for tags in scu_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_userfirewall(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting userfirewall (user auth)
"""
userfirewall_tags = [
['ldap_domain_name:Mycroft Holmes', 'ldap_host:brother'],
['ldap_domain_name:Jim Moriarty', 'ldap_host:enemy'],
]
for metric in USER_FIREWALL:
for tags in userfirewall_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_dcu(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting DCU
"""
dcu_tags = [
[
'address_family:1',
'destination_class_name:their',
'interface:quaintly driving oxen their zombies oxen acted acted',
],
[
'address_family:1',
'destination_class_name:acted but forward acted zombies forward',
'interface:but forward kept but their driving oxen quaintly acted',
],
[
'address_family:2',
'destination_class_name:oxen Jaded oxen Jaded forward kept quaintly',
'interface:kept but',
],
]
for decu_metric in DCU_COUNTS:
for tags in dcu_tags:
aggregator.assert_metric(
'snmp.{}'.format(decu_metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
def _check_juniper_firewall(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting firewall metrics
"""
firewall_tags = [
[
'counter_name:Jaded oxen kept their driving but kept',
'counter_type:4',
'firewall_filter_name:their driving quaintly but Jaded oxen',
],
[
'counter_name:but but but their their their kept kept forward',
'counter_type:4',
'firewall_filter_name:driving kept acted Jaded zombies kept acted',
],
]
for metric in FIREWALL_COUNTS:
for tags in firewall_tags:
aggregator.assert_metric(
'snmp.{}'.format(metric),
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + tags,
count=1,
)
def _check_juniper_virtual_chassis(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting virtual chassis metrics
"""
virtual_chassis_tags = [
['port_name:but driving but'],
['port_name:Jaded forward but oxen quaintly their their'],
['port_name:forward forward driving driving Jaded Jaded'],
]
for count_and_rate_metric in VIRTUAL_CHASSIS_COUNTS:
for tags in virtual_chassis_tags:
aggregator.assert_metric(
'snmp.{}'.format(count_and_rate_metric),
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + tags,
count=1,
)
for rate_metric in VIRTUAL_CHASSIS_RATES:
for tags in virtual_chassis_tags:
aggregator.assert_metric(
'snmp.{}'.format(rate_metric), metric_type=aggregator.GAUGE, tags=common_tags + tags, count=1
)
def _check_juniper_cos(aggregator, common_tags):
"""
Shared testing function for Juniper profiles supporting COS metrics
"""
cos_tags = [
['interface:acted oxen oxen forward quaintly kept zombies but oxen', 'queue_number:25'],
['interface:acted kept quaintly acted oxen kept', 'queue_number:50'],
['interface:their', 'queue_number:15'],
]
for cos_metric in COS_COUNTS:
for tags in cos_tags:
aggregator.assert_metric(
'snmp.{}'.format(cos_metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags + tags, count=1
)
for cos_metric in COS_RATES:
for tags in cos_tags:
aggregator.assert_metric(
'snmp.{}'.format(cos_metric), metric_type=aggregator.GAUGE, tags=common_tags + tags, count=1
)
def _check_bgp4(aggregator, common_tags):
"""
Shared testing function for profiles supporting BGP4 metrics.
"""
tags = ['neighbor:244.12.239.177'] + common_tags
for metric in PEER_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
for metric in PEER_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags)
| 41.631939
| 120
| 0.66372
|
220ad28a66f95935015a64890be020a67d2f058a
| 96
|
py
|
Python
|
github/methods/auth/__init__.py
|
appheap/PyGithub
|
2442a2512a708c6463f81d8398fd91090e134df1
|
[
"MIT"
] | 1
|
2022-03-16T21:33:37.000Z
|
2022-03-16T21:33:37.000Z
|
github/methods/auth/__init__.py
|
appheap/PyGithub
|
2442a2512a708c6463f81d8398fd91090e134df1
|
[
"MIT"
] | null | null | null |
github/methods/auth/__init__.py
|
appheap/PyGithub
|
2442a2512a708c6463f81d8398fd91090e134df1
|
[
"MIT"
] | 1
|
2022-03-16T21:28:06.000Z
|
2022-03-16T21:28:06.000Z
|
from .authenticate_user import AuthenticateUser
class Auth(
AuthenticateUser,
):
pass
| 12
| 47
| 0.75
|
5131e1ed4eb223cf86d82dd18d494cecd799c7df
| 796
|
py
|
Python
|
gallary/urls.py
|
israelwangila/gallery
|
9bd799a498e556274bd3518ad047183180463bb7
|
[
"MIT"
] | null | null | null |
gallary/urls.py
|
israelwangila/gallery
|
9bd799a498e556274bd3518ad047183180463bb7
|
[
"MIT"
] | null | null | null |
gallary/urls.py
|
israelwangila/gallery
|
9bd799a498e556274bd3518ad047183180463bb7
|
[
"MIT"
] | null | null | null |
"""gallary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('',include('photos.urls'))
]
| 36.181818
| 77
| 0.704774
|
149e1b969b2a6358de3052612b2af8decb56ae0a
| 7,496
|
py
|
Python
|
saleor/csv/tests/export/products_data/test_prepare_headers.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/csv/tests/export/products_data/test_prepare_headers.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | 4
|
2021-09-06T03:55:32.000Z
|
2021-10-15T08:47:58.000Z
|
saleor/csv/tests/export/products_data/test_prepare_headers.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | null | null | null |
from .....attribute.models import Attribute
from .....channel.models import Channel
from .....graphql.csv.enums import ProductFieldEnum
from ....utils.export import product_max_min_filter_to_export_headers
from ....utils.product_headers import (
get_attributes_headers,
get_channels_headers,
get_export_fields_and_headers_info,
get_product_export_fields_and_headers,
get_warehouses_headers,
)
def test_get_export_fields_and_headers_fields_without_price():
# given
export_info = {
"fields": [
ProductFieldEnum.COLLECTIONS.value,
ProductFieldEnum.DESCRIPTION.value,
ProductFieldEnum.VARIANT_SKU.value,
],
"warehoses": [],
}
# when
export_fields, file_headers = get_product_export_fields_and_headers(export_info)
# then
expected_headers = ["id", "collections", "description", "variant sku"]
assert set(export_fields) == {
"collections__slug",
"id",
"variants__sku",
"description_as_str",
}
assert file_headers == expected_headers
def test_get_export_fields_and_headers_no_fields():
export_fields, file_headers = get_product_export_fields_and_headers({})
assert export_fields == ["id"]
assert file_headers == ["id"]
def test_get_attributes_headers(
product_with_multiple_values_attributes, product_type_without_variant
):
# given
attribute_ids = Attribute.objects.values_list("id", flat=True)
export_info = {"attributes": attribute_ids}
product_type = product_with_multiple_values_attributes.product_type
product_attribute = product_type.product_attributes.first()
product_type_without_variant.product_attributes.add(product_attribute)
# when
attributes_headers = get_attributes_headers(export_info)
# then
product_headers = []
variant_headers = []
for attr in Attribute.objects.all():
if attr.product_types.exists():
product_headers.append(f"{attr.slug} (product attribute)")
if attr.product_variant_types.exists():
variant_headers.append(f"{attr.slug} (variant attribute)")
expected_headers = product_headers + variant_headers
assert attributes_headers == expected_headers
def test_get_attributes_headers_lack_of_attributes_ids():
# given
export_info = {}
# when
attributes_headers = get_attributes_headers(export_info)
# then
assert attributes_headers == []
def test_get_warehouses_headers(warehouses):
# given
warehouse_ids = [warehouses[0].pk]
export_info = {"warehouses": warehouse_ids}
# when
warehouse_headers = get_warehouses_headers(export_info)
# then
assert warehouse_headers == [f"{warehouses[0].slug} (warehouse quantity)"]
def test_get_warehouses_headers_lack_of_warehouse_ids():
# given
export_info = {}
# when
warehouse_headers = get_warehouses_headers(export_info)
# then
assert warehouse_headers == []
def test_get_channels_headers(channel_USD, channel_PLN):
# given
channel_usd_slug = channel_USD.slug
channel_pln_slug = channel_PLN.slug
channel_ids = [channel_USD.pk, channel_PLN.pk]
export_info = {"channels": channel_ids}
# when
channel_headers = get_channels_headers(export_info)
# then
expected_headers = []
for channel_slug in [channel_pln_slug, channel_usd_slug]:
for field in [
"product currency code",
"published",
"publication date",
"searchable",
"available for purchase",
"price amount",
"variant currency code",
"variant cost price",
]:
expected_headers.append(f"{channel_slug} (channel {field})")
assert channel_headers == expected_headers
def test_get_channels_headers_lack_of_channel_ids():
# given
export_info = {}
# when
channel_headers = get_channels_headers(export_info)
# then
assert channel_headers == []
def test_get_export_fields_and_headers_info(
warehouses, product_with_multiple_values_attributes, channel_PLN, channel_USD
):
# given
warehouse_ids = [w.pk for w in warehouses]
attribute_ids = [attr.pk for attr in Attribute.objects.all()]
channel_ids = [channel_PLN.pk, channel_USD.pk]
export_info = {
"fields": [
ProductFieldEnum.COLLECTIONS.value,
ProductFieldEnum.DESCRIPTION.value,
],
"warehouses": warehouse_ids,
"attributes": attribute_ids,
"channels": channel_ids,
}
expected_file_headers = [
"id",
"collections",
"description",
]
# when
export_fields, file_headers, data_headers = get_export_fields_and_headers_info(
export_info
)
# then
expected_fields = [
"id",
"collections__slug",
"description_as_str",
]
product_headers = []
variant_headers = []
for attr in Attribute.objects.all().order_by("slug"):
if attr.product_types.exists():
product_headers.append(f"{attr.slug} (product attribute)")
if attr.product_variant_types.exists():
variant_headers.append(f"{attr.slug} (variant attribute)")
warehouse_headers = [f"{w.slug} (warehouse quantity)" for w in warehouses]
channel_headers = []
for channel in Channel.objects.all().order_by("slug"):
slug = channel.slug
for field in [
"product currency code",
"published",
"publication date",
"searchable",
"available for purchase",
"price amount",
"variant currency code",
"variant cost price",
]:
channel_headers.append(f"{slug} (channel {field})")
excepted_headers = (
expected_fields
+ product_headers
+ variant_headers
+ warehouse_headers
+ channel_headers
)
expected_file_headers += (
product_headers + variant_headers + warehouse_headers + channel_headers
)
assert expected_file_headers == file_headers
assert set(export_fields) == set(expected_fields)
assert data_headers == excepted_headers
def test_product_max_min_get_export_fields_and_headers_no_fields():
export_fields, file_headers = product_max_min_filter_to_export_headers({})
assert export_fields == []
assert file_headers == []
def test_product_max_min_get_export_fields_and_headers_info():
# given
export_info = {
"fields": [
"channel_slug",
"channel_name",
"variant_sku",
"product_name",
"selling_unit",
"item_type",
"current_product_class",
"previous_product_class",
"previous_min_level",
"previous_max_level",
"current_min_level",
"current_max_level",
],
}
expected_file_headers = [
"Channel Slug",
"Channel Name",
"Variant SKU",
"Product Name",
"Selling Unit",
"Item Type",
"Current Product Class Recommendation",
"Previous Product Class Recommendation",
"Previous Min Level",
"Previous Max Level",
"Current Min Level",
"Current Max Level",
]
# when
export_fields, file_headers = product_max_min_filter_to_export_headers(export_info)
# then
assert expected_file_headers == file_headers
| 27.970149
| 87
| 0.657551
|
d7807010f7959f3cd3dd3b4e65f3e4c0b837e7ab
| 3,467
|
py
|
Python
|
aaron_crackingV1.py
|
awharkrider/CPSC3320_Lab_Password_Cracking
|
593ee5efda5dca92360c892b6bb694362ff7e499
|
[
"MIT"
] | 1
|
2018-12-05T15:30:52.000Z
|
2018-12-05T15:30:52.000Z
|
aaron_crackingV1.py
|
awharkrider/CPSC3320_Lab_Password_Cracking
|
593ee5efda5dca92360c892b6bb694362ff7e499
|
[
"MIT"
] | null | null | null |
aaron_crackingV1.py
|
awharkrider/CPSC3320_Lab_Password_Cracking
|
593ee5efda5dca92360c892b6bb694362ff7e499
|
[
"MIT"
] | null | null | null |
"""
Version one of my cracking.py
"""
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
import argparse
import time
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dictionary_file", default="words", help="dictionary file, (defaults to words) ")
parser.add_argument("passwords_file", default=".awharkrider_digests.csv",
help="password file of .csv (defaults to .awharkrider_digests.csv)")
parser.add_argument("cracked_digests", default="cracked_digests.csv",
help="stores the digests found with matched password (defaults to cracked_digests.csv)")
args = parser.parse_args()
# read in dictionary file
dictionary = []
with open(args.dictionary_file) as dict_file:
for line in dict_file:
# if the line == length 8
if len(line.strip()) == 8:
dictionary.append(line.strip())
# Read in passwords description file
salty_digests = []
with open(args.passwords_file) as passwords:
for line in passwords:
salty_digests.append(line)
with open(args.cracked_digests, 'r') as found_file:
for line in found_file:
# remove found digest from the digest array
found_salt, found_digest, found_password = line.strip().split(',')
found_salty_digest = found_salt + ',' + found_digest + '\n'
if found_salty_digest in salty_digests:
print('Digest already cracked removing it. found_salty_digest = {}'.format(found_salty_digest))
salty_digests.remove(found_salty_digest)
found_file.close()
then = time.time() # Time before the operations start
new_found = crack_password(dictionary, salty_digests)
now = time.time() # Time after it finished
print("It took: ", now - then, " seconds to iterate through the entire dictionary and digests.\n")
with open(args.digests_found, 'a') as found_file:
for password in new_found:
found_file.write(password)
def crack_password(dictonary, digests):
new_found = []
for word in dictonary:
for line in digests:
then = time.time() # Time before the operations start
salt, digest = line.strip().split(',')
backend = default_backend()
# Salts should be randomly generated
salt_bytes = bytes.fromhex(salt)
# derive
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),
length=32,
salt=salt_bytes,
iterations=100000,
backend=backend)
key = kdf.derive(word.encode('utf-8'))
digest_bytes = bytes.fromhex(digest)
if key == digest_bytes:
elapsed_time = time.time() - then
print('FOUND! It took "{}" seconds to crack this password.\n'.format(elapsed_time))
found = salt + ',' + digest
digests.remove(line)
new_found.append(found + ',' + word + '\n')
print('digest: {},\n digest_bytes: {},\n word: {}\n'.format(digest, digest_bytes, word))
return new_found
# Standard boilerplate to call the main function, if executed
if __name__ == '__main__':
main()
| 34.67
| 112
| 0.614076
|
65997a764e7fd48ee588009f688e2d152a8c422f
| 123
|
py
|
Python
|
src/core/models/__init__.py
|
ablil/meistertask-cli
|
6c90802ac5dc7e5ac016e5c61c0e68db043e5784
|
[
"MIT"
] | 3
|
2020-11-03T22:27:18.000Z
|
2021-12-11T23:13:55.000Z
|
src/core/models/__init__.py
|
ablil/meistertask-cli
|
6c90802ac5dc7e5ac016e5c61c0e68db043e5784
|
[
"MIT"
] | 1
|
2021-09-12T13:28:13.000Z
|
2021-09-12T13:28:13.000Z
|
src/core/models/__init__.py
|
ablil/meistertask-cli
|
6c90802ac5dc7e5ac016e5c61c0e68db043e5784
|
[
"MIT"
] | null | null | null |
from .project import Project
from .task import Task
from .section import Section
__all__ = ["Project", "Section", "Task"]
| 20.5
| 40
| 0.739837
|
066f0856199f4b238e6c3a17ba7014f9d3f8e27a
| 2,372
|
py
|
Python
|
source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/rtde/rtde_config.py
|
ebruun/python-samples
|
746e5090f45659c60f01bf831a0308966d713b21
|
[
"BSD-3-Clause"
] | 19
|
2018-07-24T22:44:22.000Z
|
2022-03-26T09:37:08.000Z
|
source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/rtde/rtde_config.py
|
ebruun/python-samples
|
746e5090f45659c60f01bf831a0308966d713b21
|
[
"BSD-3-Clause"
] | 4
|
2018-05-02T12:52:35.000Z
|
2021-02-15T22:59:54.000Z
|
source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/rtde/rtde_config.py
|
ebruun/python-samples
|
746e5090f45659c60f01bf831a0308966d713b21
|
[
"BSD-3-Clause"
] | 4
|
2018-01-22T11:06:28.000Z
|
2020-03-17T08:37:24.000Z
|
# Copyright (c) 2016, Universal Robots A/S,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Universal Robots A/S nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL UNIVERSAL ROBOTS A/S BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import xml.etree.ElementTree as ET
class Recipe(object):
__slots__=['key', 'names', 'types']
@staticmethod
def parse(recipe_node):
rmd = Recipe()
rmd.key = recipe_node.get('key')
rmd.names = [f.get('name') for f in recipe_node.findall('field')]
rmd.types = [f.get('type') for f in recipe_node.findall('field')]
return rmd
class ConfigFile(object):
def __init__(self, filename):
self.__filename = filename
tree = ET.parse(self.__filename)
root = tree.getroot()
recipes = [Recipe.parse(r) for r in root.findall('recipe')]
self.__dictionary = dict()
for r in recipes:
self.__dictionary[r.key] = r
def get_recipe(self, key):
r = self.__dictionary[key]
return r.names, r.types
| 46.509804
| 81
| 0.713322
|
b5aaccafa497a39f70914bc860ceb940951c5b37
| 3,776
|
py
|
Python
|
venv/lib/python3.7/site-packages/rinoh/frontend/rst/__init__.py
|
rodrez/jobpy
|
02da7ed62a5f057ce66f7b3c0a5f13223363af9a
|
[
"MIT"
] | 3
|
2020-05-16T01:54:47.000Z
|
2021-04-27T01:37:08.000Z
|
venv/lib/python3.7/site-packages/rinoh/frontend/rst/__init__.py
|
rodrez/jobpy
|
02da7ed62a5f057ce66f7b3c0a5f13223363af9a
|
[
"MIT"
] | 7
|
2020-05-11T01:45:45.000Z
|
2022-03-02T14:58:30.000Z
|
venv/lib/python3.7/site-packages/rinoh/frontend/rst/__init__.py
|
rodrez/jobpy
|
02da7ed62a5f057ce66f7b3c0a5f13223363af9a
|
[
"MIT"
] | 1
|
2020-07-19T04:31:01.000Z
|
2020-07-19T04:31:01.000Z
|
# This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from pathlib import Path
from docutils.core import publish_doctree
from docutils.io import FileInput
from docutils.parsers.rst import Parser as ReStructuredTextParser
from ...document import DocumentTree
from ...text import MixedStyledText
from .. import (TreeNode, TreeNodeMeta, InlineNode, BodyNode, BodySubNode,
GroupingNode, DummyNode, Reader)
__all__ = ['DocutilsNode', 'DocutilsInlineNode',
'DocutilsBodyNode', 'DocutilsBodySubNode',
'DocutilsGroupingNode', 'DocutilsDummyNode',
'ReStructuredTextReader']
class DocutilsNode(TreeNode, metaclass=TreeNodeMeta):
@staticmethod
def node_tag_name(node):
return node.tagname
@staticmethod
def node_parent(node):
return node.parent
@staticmethod
def node_children(node):
return node.children
@staticmethod
def node_location(node):
return node.source, node.line, node.tagname
@property
def _ids(self):
return self.get('ids')
@property
def text(self):
return self.node.astext()
@property
def attributes(self):
return self.node.attributes
def get(self, key, default=None):
return self.node.get(key, default)
def __getitem__(self, name):
return self.node[name]
def process_content(self, style=None):
children_text = (child.styled_text() for child in self.getchildren())
return MixedStyledText([text for text in children_text if text],
style=style)
class DocutilsInlineNode(DocutilsNode, InlineNode):
@property
def text(self):
return super().text.replace('\n', ' ')
def styled_text(self):
styled_text = super().styled_text()
try:
styled_text.classes.extend(self.get('classes'))
except AttributeError:
pass
return styled_text
class DocutilsBodyNode(DocutilsNode, BodyNode):
def flowables(self):
classes = self.get('classes')
for flowable in super().flowables():
flowable.classes.extend(classes)
yield flowable
class DocutilsBodySubNode(DocutilsNode, BodySubNode):
pass
class DocutilsGroupingNode(DocutilsBodyNode, GroupingNode):
pass
class DocutilsDummyNode(DocutilsNode, DummyNode):
pass
from . import nodes
class DocutilsReader(Reader):
parser_class = None
def parse(self, filename_or_file):
try:
filename = Path(filename_or_file)
settings_overrides = dict(input_encoding='utf-8')
doctree = publish_doctree(None, source_path=str(filename),
source_class=FileInput,
settings_overrides=settings_overrides,
parser=self.parser_class())
except TypeError:
filename = getattr(filename_or_file, 'name', None)
doctree = publish_doctree(filename_or_file,
source_class=FileInput,
parser=self.parser_class())
return self.from_doctree(filename, doctree)
def from_doctree(self, filename, doctree):
mapped_tree = DocutilsNode.map_node(doctree.document)
flowables = mapped_tree.children_flowables()
return DocumentTree(flowables, source_file=Path(filename))
class ReStructuredTextReader(DocutilsReader):
extensions = ('rst', )
parser_class = ReStructuredTextParser
| 28.606061
| 77
| 0.652278
|
6d36fda52180852e80eeb7ec15ab1bb6c10c765d
| 17,579
|
py
|
Python
|
test/core/test_operator.py
|
snyxan/Mephisto
|
a48ab19edf25b359379f4d31a990dfe2a3791f37
|
[
"MIT"
] | null | null | null |
test/core/test_operator.py
|
snyxan/Mephisto
|
a48ab19edf25b359379f4d31a990dfe2a3791f37
|
[
"MIT"
] | null | null | null |
test/core/test_operator.py
|
snyxan/Mephisto
|
a48ab19edf25b359379f4d31a990dfe2a3791f37
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import shutil
import shlex
import os
import tempfile
import time
import threading
from mephisto.abstractions.test.utils import get_test_requester
from mephisto.data_model.constants.assignment_state import AssignmentState
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.operations.operator import Operator
from mephisto.abstractions.architects.mock_architect import (
MockArchitect,
MockArchitectArgs,
)
from mephisto.operations.hydra_config import MephistoConfig
from mephisto.abstractions.providers.mock.mock_provider import MockProviderArgs
from mephisto.abstractions.blueprints.mock.mock_blueprint import MockBlueprintArgs
from mephisto.data_model.task_config import TaskConfigArgs
from omegaconf import OmegaConf
TIMEOUT_TIME = 10
MOCK_TASK_ARGS = TaskConfigArgs(
task_title="title",
task_description="This is a description",
task_reward="0.3",
task_tags="1,2,3",
)
class TestOperator(unittest.TestCase):
"""
Unit testing for the Mephisto Operator
"""
def setUp(self):
self.data_dir = tempfile.mkdtemp()
database_path = os.path.join(self.data_dir, "mephisto.db")
self.db = LocalMephistoDB(database_path)
self.requester_name, _req_id = get_test_requester(self.db)
self.operator = None
def tearDown(self):
if self.operator is not None:
self.operator.shutdown()
self.db.shutdown()
shutil.rmtree(self.data_dir, ignore_errors=True)
self.assertTrue(
len(threading.enumerate()) == 1,
f"Expected only main thread at teardown, found {threading.enumerate()}",
)
def wait_for_complete_assignment(self, assignment, timeout: int):
start_time = time.time()
while time.time() - start_time < timeout:
if assignment.get_status() == AssignmentState.COMPLETED:
break
time.sleep(0.1)
self.assertLess(
time.time() - start_time, timeout, "Assignment not completed in time"
)
def await_server_start(self, architect: "MockArchitect"):
start_time = time.time()
assert architect.server is not None, "Cannot wait on empty server"
while time.time() - start_time < 5:
if len(architect.server.subs) > 0:
break
time.sleep(0.1)
self.assertLess(time.time() - start_time, 5, "Mock server not up in time")
def test_initialize_supervisor(self):
"""Quick test to ensure that the operator can be initialized"""
self.operator = Operator(self.db)
def test_run_job_concurrent(self):
"""Ensure that the supervisor object can even be created"""
self.operator = Operator(self.db)
config = MephistoConfig(
blueprint=MockBlueprintArgs(num_assignments=1, is_concurrent=True),
provider=MockProviderArgs(requester_name=self.requester_name),
architect=MockArchitectArgs(should_run_server=True),
task=MOCK_TASK_ARGS,
)
self.operator.validate_and_run_config(OmegaConf.structured(config))
tracked_runs = self.operator.get_running_task_runs()
self.assertEqual(len(tracked_runs), 1, "Run not launched")
task_run_id, tracked_run = list(tracked_runs.items())[0]
self.assertIsNotNone(tracked_run)
self.assertIsNotNone(tracked_run.task_launcher)
self.assertIsNotNone(tracked_run.task_runner)
self.assertIsNotNone(tracked_run.architect)
self.assertIsNotNone(tracked_run.task_run)
self.assertEqual(tracked_run.task_run.db_id, task_run_id)
# Create two agents to step through the task
architect = tracked_run.architect
self.assertIsInstance(architect, MockArchitect, "Must use mock in testing")
# Register a worker
mock_worker_name = "MOCK_WORKER"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id = workers[0].db_id
self.assertEqual(len(tracked_run.task_runner.running_assignments), 0)
# Register an agent
mock_agent_details = "FAKE_ASSIGNMENT"
architect.server.register_mock_agent(worker_id, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 1, "Agent was not created properly")
agent = agents[0]
self.assertIsNotNone(agent)
# Register another worker
mock_worker_name = "MOCK_WORKER_2"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id = workers[0].db_id
# Register an agent
mock_agent_details = "FAKE_ASSIGNMENT_2"
architect.server.register_mock_agent(worker_id, mock_agent_details)
# Give up to 5 seconds for whole mock task to complete
start_time = time.time()
while time.time() - start_time < TIMEOUT_TIME:
if len(self.operator.get_running_task_runs()) == 0:
break
time.sleep(0.1)
self.assertLess(
time.time() - start_time, TIMEOUT_TIME, "Task not completed in time"
)
# Ensure the assignment is completed
task_run = tracked_run.task_run
assignment = task_run.get_assignments()[0]
self.assertEqual(assignment.get_status(), AssignmentState.COMPLETED)
def test_run_job_not_concurrent(self):
"""Ensure that the supervisor object can even be created"""
self.operator = Operator(self.db)
config = MephistoConfig(
blueprint=MockBlueprintArgs(num_assignments=1, is_concurrent=False),
provider=MockProviderArgs(requester_name=self.requester_name),
architect=MockArchitectArgs(should_run_server=True),
task=MOCK_TASK_ARGS,
)
self.operator.validate_and_run_config(OmegaConf.structured(config))
tracked_runs = self.operator.get_running_task_runs()
self.assertEqual(len(tracked_runs), 1, "Run not launched")
task_run_id, tracked_run = list(tracked_runs.items())[0]
self.assertIsNotNone(tracked_run)
self.assertIsNotNone(tracked_run.task_launcher)
self.assertIsNotNone(tracked_run.task_runner)
self.assertIsNotNone(tracked_run.architect)
self.assertIsNotNone(tracked_run.task_run)
self.assertEqual(tracked_run.task_run.db_id, task_run_id)
# Create two agents to step through the task
architect = tracked_run.architect
self.assertIsInstance(architect, MockArchitect, "Must use mock in testing")
# Register a worker
mock_worker_name = "MOCK_WORKER"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id = workers[0].db_id
self.assertEqual(len(tracked_run.task_runner.running_assignments), 0)
# Register an agent
mock_agent_details = "FAKE_ASSIGNMENT"
architect.server.register_mock_agent(worker_id, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 1, "Agent was not created properly")
agent = agents[0]
self.assertIsNotNone(agent)
# Register another worker
mock_worker_name = "MOCK_WORKER_2"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id = workers[0].db_id
# Register an agent
mock_agent_details = "FAKE_ASSIGNMENT_2"
architect.server.register_mock_agent(worker_id, mock_agent_details)
# Give up to 5 seconds for both tasks to complete
start_time = time.time()
while time.time() - start_time < TIMEOUT_TIME:
if len(self.operator.get_running_task_runs()) == 0:
break
time.sleep(0.1)
self.assertLess(
time.time() - start_time, TIMEOUT_TIME, "Task not completed in time"
)
# Ensure the assignment is completed
task_run = tracked_run.task_run
assignment = task_run.get_assignments()[0]
self.assertEqual(assignment.get_status(), AssignmentState.COMPLETED)
def test_run_jobs_with_restrictions(self):
"""Ensure allowed_concurrent and maximum_units_per_worker work"""
self.operator = Operator(self.db)
provider_args = MockProviderArgs(requester_name=self.requester_name)
architect_args = MockArchitectArgs(should_run_server=True)
config = MephistoConfig(
blueprint=MockBlueprintArgs(num_assignments=3, is_concurrent=True),
provider=provider_args,
architect=architect_args,
task=TaskConfigArgs(
task_title="title",
task_description="This is a description",
task_reward="0.3",
task_tags="1,2,3",
maximum_units_per_worker=2,
allowed_concurrent=1,
task_name="max-unit-test",
),
)
self.operator.validate_and_run_config(OmegaConf.structured(config))
tracked_runs = self.operator.get_running_task_runs()
self.assertEqual(len(tracked_runs), 1, "Run not launched")
task_run_id, tracked_run = list(tracked_runs.items())[0]
self.assertIsNotNone(tracked_run)
self.assertIsNotNone(tracked_run.task_launcher)
self.assertIsNotNone(tracked_run.task_runner)
self.assertIsNotNone(tracked_run.architect)
self.assertIsNotNone(tracked_run.task_run)
self.assertEqual(tracked_run.task_run.db_id, task_run_id)
self.await_server_start(tracked_run.architect)
# Create two agents to step through the task
architect = tracked_run.architect
self.assertIsInstance(architect, MockArchitect, "Must use mock in testing")
# Register a worker
mock_worker_name = "MOCK_WORKER"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id_1 = workers[0].db_id
self.assertEqual(len(tracked_run.task_runner.running_assignments), 0)
# Register an agent
mock_agent_details = "FAKE_ASSIGNMENT"
architect.server.register_mock_agent(worker_id_1, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 1, "Agent was not created properly")
agent = agents[0]
self.assertIsNotNone(agent)
# Try to register a second agent, which should fail due to concurrency
mock_agent_details = "FAKE_ASSIGNMENT_2"
architect.server.register_mock_agent(worker_id_1, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 1, "Second agent was created")
# Register another worker
mock_worker_name = "MOCK_WORKER_2"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id_2 = workers[0].db_id
# Register an agent
mock_agent_details = "FAKE_ASSIGNMENT_2"
architect.server.register_mock_agent(worker_id_2, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 2, "Second agent was not created")
# wait for task to pass
self.wait_for_complete_assignment(agents[1].get_unit().get_assignment(), 3)
# Pass a second task as well
mock_agent_details = "FAKE_ASSIGNMENT_3"
architect.server.register_mock_agent(worker_id_1, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 3, "Agent was not created properly")
mock_agent_details = "FAKE_ASSIGNMENT_4"
architect.server.register_mock_agent(worker_id_2, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 4, "Fourth agent was not created")
# wait for task to pass
self.wait_for_complete_assignment(agents[3].get_unit().get_assignment(), 3)
# Both workers should have saturated their tasks, and not be granted agents
mock_agent_details = "FAKE_ASSIGNMENT_5"
architect.server.register_mock_agent(worker_id_1, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 4, "Additional agent was created")
architect.server.register_mock_agent(worker_id_2, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 4, "Additional agent was created")
# new workers should be able to work on these just fine though
mock_worker_name = "MOCK_WORKER_3"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id_3 = workers[0].db_id
mock_worker_name = "MOCK_WORKER_4"
architect.server.register_mock_worker(mock_worker_name)
workers = self.db.find_workers(worker_name=mock_worker_name)
worker_id_4 = workers[0].db_id
# Register agents from new workers
mock_agent_details = "FAKE_ASSIGNMENT_5"
architect.server.register_mock_agent(worker_id_3, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 5, "Additional agent was not created")
mock_agent_details = "FAKE_ASSIGNMENT_6"
architect.server.register_mock_agent(worker_id_4, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 6, "Additional agent was not created")
# wait for task to pass
self.wait_for_complete_assignment(agents[5].get_unit().get_assignment(), 3)
# Give up to 5 seconds for whole mock task to complete
start_time = time.time()
while time.time() - start_time < TIMEOUT_TIME:
if len(self.operator.get_running_task_runs()) == 0:
break
time.sleep(0.1)
self.assertLess(
time.time() - start_time, TIMEOUT_TIME, "Task not completed in time"
)
# Ensure all assignments are completed
task_run = tracked_run.task_run
assignments = task_run.get_assignments()
for assignment in assignments:
self.assertEqual(assignment.get_status(), AssignmentState.COMPLETED)
# Create a new task
config = MephistoConfig(
blueprint=MockBlueprintArgs(num_assignments=1, is_concurrent=True),
provider=MockProviderArgs(requester_name=self.requester_name),
architect=MockArchitectArgs(should_run_server=True),
task=TaskConfigArgs(
task_title="title",
task_description="This is a description",
task_reward="0.3",
task_tags="1,2,3",
maximum_units_per_worker=2,
allowed_concurrent=1,
task_name="max-unit-test",
),
)
self.operator.validate_and_run_config(OmegaConf.structured(config))
tracked_runs = self.operator.get_running_task_runs()
self.assertEqual(len(tracked_runs), 1, "Run not launched")
task_run_id, tracked_run = list(tracked_runs.items())[0]
self.await_server_start(tracked_run.architect)
architect = tracked_run.architect
# Workers one and two still shouldn't be able to make agents
mock_agent_details = "FAKE_ASSIGNMENT_7"
architect.server.register_mock_agent(worker_id_1, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(
len(agents),
6,
"Additional agent was created for worker exceeding max units",
)
mock_agent_details = "FAKE_ASSIGNMENT_7"
architect.server.register_mock_agent(worker_id_2, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(
len(agents),
6,
"Additional agent was created for worker exceeding max units",
)
# Three and four should though
mock_agent_details = "FAKE_ASSIGNMENT_7"
architect.server.register_mock_agent(worker_id_3, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 7, "Additional agent was not created")
mock_agent_details = "FAKE_ASSIGNMENT_8"
architect.server.register_mock_agent(worker_id_4, mock_agent_details)
agents = self.db.find_agents()
self.assertEqual(len(agents), 8, "Additional agent was not created")
# Ensure the task run completed and that all assignments are done
start_time = time.time()
while time.time() - start_time < TIMEOUT_TIME:
if len(self.operator.get_running_task_runs()) == 0:
break
time.sleep(0.1)
self.assertLess(
time.time() - start_time, TIMEOUT_TIME, "Task not completed in time"
)
task_run = tracked_run.task_run
assignments = task_run.get_assignments()
for assignment in assignments:
self.assertEqual(assignment.get_status(), AssignmentState.COMPLETED)
| 42.564165
| 84
| 0.680073
|
eeb2319cc3449e13e65880ac53113114937ff6e5
| 1,328
|
py
|
Python
|
picmodels/migrations/0004_auto_20170117_1047.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picmodels/migrations/0004_auto_20170117_1047.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picmodels/migrations/0004_auto_20170117_1047.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-17 10:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('picmodels', '0003_auto_20170117_0843'),
]
operations = [
migrations.AddField(
model_name='consumercpsinfoentry',
name='apt_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='consumercpsinfoentry',
name='case_mgmt_status',
field=models.CharField(blank=True, choices=[('Open', 'Open'), ('Closed', 'Silver'), ('Not Available', 'Not Available')], default='Not Available', max_length=1000, null=True),
),
migrations.AddField(
model_name='consumercpsinfoentry',
name='case_mgmt_type',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AddField(
model_name='consumercpsinfoentry',
name='phone_apt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='consumercpsinfoentry',
name='target_list',
field=models.BooleanField(default=False),
),
]
| 32.390244
| 186
| 0.601657
|
6347ff10ab5e4a2b24c7a484b4913e466a351dcc
| 2,318
|
py
|
Python
|
jython-protolib/src/test/resources/com/streamsets/pipeline/stage/origin/jython/MultiGeneratorOriginScript.py
|
klaxman/datacollector
|
6ebf079dcf7cadc55ded87fe0c9bc2e6b527a319
|
[
"Apache-2.0"
] | null | null | null |
jython-protolib/src/test/resources/com/streamsets/pipeline/stage/origin/jython/MultiGeneratorOriginScript.py
|
klaxman/datacollector
|
6ebf079dcf7cadc55ded87fe0c9bc2e6b527a319
|
[
"Apache-2.0"
] | null | null | null |
jython-protolib/src/test/resources/com/streamsets/pipeline/stage/origin/jython/MultiGeneratorOriginScript.py
|
klaxman/datacollector
|
6ebf079dcf7cadc55ded87fe0c9bc2e6b527a319
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sample Jython code - Multithreaded Data Generator
sdc.importLock()
from threading import Thread
sdc.importUnlock()
prefix = sdc.userParams.get('recordPrefix', '')
verbose = sdc.userParams.get('verbose', False)
def produce(entityName, entityOffset):
if verbose:
print ('starting jython script ' + entityName
+ ' with offset ' + entityOffset)
offset = int(entityOffset)
cur_batch = sdc.createBatch()
record = sdc.createRecord('generated data')
hasNext = True
while hasNext:
try:
offset = offset + 1
record = sdc.createRecord('generated data')
value = prefix + entityName + ':' + str(offset)
record.value = value
cur_batch.add(record)
# if the batch is full, process it and start a new one
if cur_batch.size() >= sdc.batchSize:
# blocks until all records are written to all destinations
# (or failure) and updates offset
# in accordance with delivery guarantee
cur_batch.process(entityName, str(offset))
cur_batch = sdc.createBatch()
if sdc.isStopped():
hasNext = False
except Exception as e:
sdc.error.write(record, str(e))
hasNext = False
threads = []
for t in range(sdc.numThreads):
if str(t) in sdc.lastOffsets:
offset = sdc.lastOffsets[str(t)]
else:
offset = '0'
thread = Thread(
target=produce,
args=(
str(t),
str(offset)
))
threads.append(thread)
thread.start()
# wait for every thread to finish
for thread in threads:
thread.join()
| 30.5
| 74
| 0.622519
|
87bebce15b5181922e5dfbcf2df04ea5c835a797
| 7,845
|
py
|
Python
|
openpype/hosts/nuke/plugins/load/load_gizmo_ip.py
|
jonclothcat/OpenPype
|
d1208cbebc0a7f378de0062ccd653295c6399195
|
[
"MIT"
] | null | null | null |
openpype/hosts/nuke/plugins/load/load_gizmo_ip.py
|
jonclothcat/OpenPype
|
d1208cbebc0a7f378de0062ccd653295c6399195
|
[
"MIT"
] | null | null | null |
openpype/hosts/nuke/plugins/load/load_gizmo_ip.py
|
jonclothcat/OpenPype
|
d1208cbebc0a7f378de0062ccd653295c6399195
|
[
"MIT"
] | null | null | null |
import nuke
from avalon import io
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.nuke.api.lib import (
maintained_selection,
create_backdrop,
get_avalon_knob_data,
set_avalon_knob_data
)
from openpype.hosts.nuke.api import (
containerise,
update_container,
viewer_update_and_undo_stop
)
class LoadGizmoInputProcess(load.LoaderPlugin):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["gizmo"]
families = ["gizmo"]
label = "Load Gizmo - Input Process"
order = 0
icon = "eye"
color = "#cc0000"
node_color = "0x7533c1ff"
def load(self, context, name, namespace, data):
"""
Loading function to get Gizmo as Input Process on viewer
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
with maintained_selection():
# add group from nk
nuke.nodePaste(file)
GN = nuke.selectedNode()
GN["name"].setValue(object_name)
# try to place it under Viewer1
if not self.connect_active_viewer(GN):
nuke.delete(GN)
return
return containerise(
node=GN,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
with maintained_selection():
xpos = GN.xpos()
ypos = GN.ypos()
avalon_data = get_avalon_knob_data(GN)
nuke.delete(GN)
# add group from nk
nuke.nodePaste(file)
GN = nuke.selectedNode()
set_avalon_knob_data(GN, avalon_data)
GN.setXYpos(xpos, ypos)
GN["name"].setValue(object_name)
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd88467ff", 16))
else:
GN["tile_color"].setValue(int(self.node_color, 16))
self.log.info("updated to version: {}".format(version.get("name")))
return update_container(GN, data_imprint)
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
msg = str("Please create Viewer node before you "
"run this action again")
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
create_backdrop(
label="Input Process",
layer=2,
nodes=[viewer, group_node],
color="0x7c7faaff"
)
return True
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes through all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)
| 30.406977
| 79
| 0.562141
|
d41aaa2c6144636a4e08b6fd88d0f754ca26ef74
| 30,633
|
py
|
Python
|
emscripten.py
|
jslhs/emscripten
|
8408257cd66435af849f493c10c7f0e8d1d5fa3b
|
[
"MIT"
] | 1
|
2015-11-08T06:57:01.000Z
|
2015-11-08T06:57:01.000Z
|
emscripten.py
|
jslhs/emscripten
|
8408257cd66435af849f493c10c7f0e8d1d5fa3b
|
[
"MIT"
] | null | null | null |
emscripten.py
|
jslhs/emscripten
|
8408257cd66435af849f493c10c7f0e8d1d5fa3b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
'''
You should normally never use this! Use emcc instead.
This is a small wrapper script around the core JS compiler. This calls that
compiler with the settings given to it. It can also read data from C/C++
header files (so that the JS compiler can see the constants in those
headers, for the libc implementation in JS).
'''
import os, sys, json, optparse, subprocess, re, time, multiprocessing, functools
from tools import jsrun, cache as cache_module, tempfiles
__rootpath__ = os.path.abspath(os.path.dirname(__file__))
def path_from_root(*pathelems):
"""Returns the absolute path for which the given path elements are
relative to the emscripten root.
"""
return os.path.join(__rootpath__, *pathelems)
def scan(ll, settings):
# blockaddress(@main, %23)
blockaddrs = []
for blockaddr in re.findall('blockaddress\([^)]*\)', ll):
b = blockaddr.split('(')[1][:-1].split(', ')
blockaddrs.append(b)
if len(blockaddrs) > 0:
settings['NECESSARY_BLOCKADDRS'] = blockaddrs
NUM_CHUNKS_PER_CORE = 1.25
MIN_CHUNK_SIZE = 1024*1024
MAX_CHUNK_SIZE = float(os.environ.get('EMSCRIPT_MAX_CHUNK_SIZE') or 'inf') # configuring this is just for debugging purposes
def process_funcs((i, funcs, meta, settings_file, compiler, forwarded_file, libraries, compiler_engine, temp_files, DEBUG)):
funcs_file = temp_files.get('.func_%d.ll' % i).name
f = open(funcs_file, 'w')
f.write(funcs)
funcs = None
f.write('\n')
f.write(meta)
f.close()
out = jsrun.run_js(
compiler,
engine=compiler_engine,
args=[settings_file, funcs_file, 'funcs', forwarded_file] + libraries,
stdout=subprocess.PIPE,
cwd=path_from_root('src'))
tempfiles.try_delete(funcs_file)
if DEBUG: print >> sys.stderr, '.'
return out
def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None):
"""Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible
Args:
infile: The path to the input LLVM assembly file.
settings: JSON-formatted settings that override the values
defined in src/settings.js.
outfile: The file where the output is written.
"""
compiler = path_from_root('src', 'compiler.js')
# Parallelization: We run 3 phases:
# 1 aka 'pre' : Process types and metadata and so forth, and generate the preamble.
# 2 aka 'funcs': Process functions. We can parallelize this, working on each function independently.
# 3 aka 'post' : Process globals, generate postamble and finishing touches.
if DEBUG: print >> sys.stderr, 'emscript: ll=>js'
if jcache: jcache.ensure()
# Pre-scan ll and alter settings as necessary
if DEBUG: t = time.time()
ll = open(infile).read()
scan(ll, settings)
total_ll_size = len(ll)
ll = None # allow collection
if DEBUG: print >> sys.stderr, ' emscript: scan took %s seconds' % (time.time() - t)
# Split input into the relevant parts for each phase
pre = []
funcs = [] # split up functions here, for parallelism later
func_idents = []
meta = [] # needed by each function XXX
if DEBUG: t = time.time()
in_func = False
ll_lines = open(infile).readlines()
for line in ll_lines:
if in_func:
funcs[-1][1].append(line)
if line.startswith('}'):
in_func = False
funcs[-1] = (funcs[-1][0], ''.join(funcs[-1][1]))
pre.append(line) # pre needs it to, so we know about all implemented functions
else:
if line.startswith(';'): continue
if line.startswith('define '):
in_func = True
funcs.append((line, [line])) # use the entire line as the identifier
pre.append(line) # pre needs it to, so we know about all implemented functions
elif line.find(' = type { ') > 0:
pre.append(line) # type
elif line.startswith('!'):
if line.startswith('!llvm.module'): continue # we can ignore that
meta.append(line) # metadata
else:
pre.append(line) # pre needs it so we know about globals in pre and funcs. So emit globals there
ll_lines = None
meta = ''.join(meta)
if DEBUG and len(meta) > 1024*1024: print >> sys.stderr, 'emscript warning: large amounts of metadata, will slow things down'
if DEBUG: print >> sys.stderr, ' emscript: split took %s seconds' % (time.time() - t)
#if DEBUG:
# print >> sys.stderr, '========= pre ================\n'
# print >> sys.stderr, ''.join(pre)
# print >> sys.stderr, '========== funcs ===============\n'
# for func in funcs:
# print >> sys.stderr, '\n// ===\n\n', ''.join(func)
# print >> sys.stderr, '=========================\n'
# Save settings to a file to work around v8 issue 1579
settings_file = temp_files.get('.txt').name
def save_settings():
global settings_text
settings_text = json.dumps(settings, sort_keys=True)
s = open(settings_file, 'w')
s.write(settings_text)
s.close()
save_settings()
# Phase 1 - pre
if DEBUG: t = time.time()
pre_file = temp_files.get('.pre.ll').name
pre_input = ''.join(pre) + '\n' + meta
out = None
if jcache:
keys = [pre_input, settings_text, ','.join(libraries)]
shortkey = jcache.get_shortkey(keys)
if DEBUG_CACHE: print >>sys.stderr, 'shortkey', shortkey
out = jcache.get(shortkey, keys)
if DEBUG_CACHE and not out:
dfpath = os.path.join(get_configuration().TEMP_DIR, "ems_" + shortkey)
dfp = open(dfpath, 'w')
dfp.write(pre_input);
dfp.write("\n\n========================== settings_text\n\n");
dfp.write(settings_text);
dfp.write("\n\n========================== libraries\n\n");
dfp.write("\n".join(libraries))
dfp.close()
print >>sys.stderr, ' cache miss, key data dumped to %s' % dfpath
if out and DEBUG: print >> sys.stderr, ' loading pre from jcache'
if not out:
open(pre_file, 'w').write(pre_input)
out = jsrun.run_js(compiler, compiler_engine, [settings_file, pre_file, 'pre'] + libraries, stdout=subprocess.PIPE,
cwd=path_from_root('src'))
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
if jcache:
if DEBUG: print >> sys.stderr, ' saving pre to jcache'
jcache.set(shortkey, keys, out)
pre, forwarded_data = out.split('//FORWARDED_DATA:')
forwarded_file = temp_files.get('.json').name
open(forwarded_file, 'w').write(forwarded_data)
if DEBUG: print >> sys.stderr, ' emscript: phase 1 took %s seconds' % (time.time() - t)
# Phase 2 - func
cores = int(os.environ.get('EMCC_CORES') or multiprocessing.cpu_count())
assert cores >= 1
if cores > 1:
intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
chunk_size = max(MIN_CHUNK_SIZE, total_ll_size / intended_num_chunks)
chunk_size += 3*len(meta) + len(forwarded_data)/3 # keep ratio of lots of function code to meta (expensive to process, and done in each parallel task) and forwarded data (less expensive but potentially significant)
chunk_size = min(MAX_CHUNK_SIZE, chunk_size)
else:
chunk_size = MAX_CHUNK_SIZE # if 1 core, just use the max chunk size
if DEBUG: t = time.time()
forwarded_json = json.loads(forwarded_data)
indexed_functions = set()
if settings.get('ASM_JS'):
settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
save_settings()
chunks = cache_module.chunkify(
funcs, chunk_size,
jcache.get_cachename('emscript_files') if jcache else None)
funcs = None
if jcache:
# load chunks from cache where we can # TODO: ignore small chunks
cached_outputs = []
def load_from_cache(chunk):
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys) # TODO: share shortkeys with later code
out = jcache.get(shortkey, keys) # this is relatively expensive (pickling?)
if out:
cached_outputs.append(out)
return False
return True
chunks = filter(load_from_cache, chunks)
if len(cached_outputs) > 0:
if out and DEBUG: print >> sys.stderr, ' loading %d funcchunks from jcache' % len(cached_outputs)
else:
cached_outputs = []
# TODO: minimize size of forwarded data from funcs to what we actually need
if cores == 1 and total_ll_size < MAX_CHUNK_SIZE:
assert len(chunks) == 1, 'no point in splitting up without multiple cores'
if len(chunks) > 0:
if DEBUG: print >> sys.stderr, ' emscript: phase 2 working on %d chunks %s (intended chunk size: %.2f MB, meta: %.2f MB, forwarded: %.2f MB, total: %.2f MB)' % (len(chunks), ('using %d cores' % cores) if len(chunks) > 1 else '', chunk_size/(1024*1024.), len(meta)/(1024*1024.), len(forwarded_data)/(1024*1024.), total_ll_size/(1024*1024.))
commands = [
(i, chunk, meta, settings_file, compiler, forwarded_file, libraries, compiler_engine, temp_files, DEBUG)
for i, chunk in enumerate(chunks)
]
if len(chunks) > 1:
pool = multiprocessing.Pool(processes=cores)
outputs = pool.map(process_funcs, commands, chunksize=1)
elif len(chunks) == 1:
outputs = [process_funcs(commands[0])]
commands = None
else:
outputs = []
if jcache:
# save chunks to cache
for i in range(len(chunks)):
chunk = chunks[i]
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys)
jcache.set(shortkey, keys, outputs[i])
if out and DEBUG and len(chunks) > 0: print >> sys.stderr, ' saving %d funcchunks to jcache' % len(chunks)
chunks = None
if jcache: outputs += cached_outputs # TODO: preserve order
outputs = [output.split('//FORWARDED_DATA:') for output in outputs]
for output in outputs:
assert len(output) == 2, 'Did not receive forwarded data in an output - process failed? We only got: ' + output[0][-3000:]
if DEBUG: print >> sys.stderr, ' emscript: phase 2 took %s seconds' % (time.time() - t)
if DEBUG: t = time.time()
# merge forwarded data
if settings.get('ASM_JS'):
all_exported_functions = set(settings['EXPORTED_FUNCTIONS']) # both asm.js and otherwise
for additional_export in settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE']: # additional functions to export from asm, if they are implemented
all_exported_functions.add('_' + additional_export)
exported_implemented_functions = set()
for func_js, curr_forwarded_data in outputs:
curr_forwarded_json = json.loads(curr_forwarded_data)
forwarded_json['Types']['preciseI64MathUsed'] = forwarded_json['Types']['preciseI64MathUsed'] or curr_forwarded_json['Types']['preciseI64MathUsed']
for key, value in curr_forwarded_json['Functions']['blockAddresses'].iteritems():
forwarded_json['Functions']['blockAddresses'][key] = value
for key in curr_forwarded_json['Functions']['indexedFunctions'].iterkeys():
indexed_functions.add(key)
if settings.get('ASM_JS'):
export_bindings = settings['EXPORT_BINDINGS']
for key in curr_forwarded_json['Functions']['implementedFunctions'].iterkeys():
if key in all_exported_functions or (export_bindings and key.startswith('_emscripten_bind')):
exported_implemented_functions.add(key)
for key, value in curr_forwarded_json['Functions']['unimplementedFunctions'].iteritems():
forwarded_json['Functions']['unimplementedFunctions'][key] = value
if settings.get('ASM_JS'):
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
outputs.append([parts[1]])
funcs_js = [output[0] for output in outputs]
outputs = None
if DEBUG: print >> sys.stderr, ' emscript: phase 2b took %s seconds' % (time.time() - t)
if DEBUG: t = time.time()
# calculations on merged forwarded data
forwarded_json['Functions']['indexedFunctions'] = {}
i = 2
for indexed in indexed_functions:
#print >> sys.stderr, 'indaxx', indexed, i
forwarded_json['Functions']['indexedFunctions'][indexed] = i # make sure not to modify this python object later - we use it in indexize
i += 2
forwarded_json['Functions']['nextIndex'] = i
indexing = forwarded_json['Functions']['indexedFunctions']
def indexize(js):
return re.sub(r"'{{ FI_([\w\d_$]+) }}'", lambda m: str(indexing.get(m.groups(0)[0]) or 0), js)
blockaddrs = forwarded_json['Functions']['blockAddresses']
def blockaddrsize(js):
return re.sub(r'{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}', lambda m: str(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)
#if DEBUG: outfile.write('// pre\n')
outfile.write(blockaddrsize(indexize(pre)))
pre = None
#if DEBUG: outfile.write('// funcs\n')
# forward
forwarded_data = json.dumps(forwarded_json)
forwarded_file = temp_files.get('.2.json').name
open(forwarded_file, 'w').write(indexize(forwarded_data))
if DEBUG: print >> sys.stderr, ' emscript: phase 2c took %s seconds' % (time.time() - t)
# Phase 3 - post
if DEBUG: t = time.time()
post_file = temp_files.get('.post.ll').name
open(post_file, 'w').write('\n') # no input, just processing of forwarded data
out = jsrun.run_js(compiler, compiler_engine, [settings_file, post_file, 'post', forwarded_file] + libraries, stdout=subprocess.PIPE,
cwd=path_from_root('src'))
post, last_forwarded_data = out.split('//FORWARDED_DATA:') # if this fails, perhaps the process failed prior to printing forwarded data?
last_forwarded_json = json.loads(last_forwarded_data)
if settings.get('ASM_JS'):
post_funcs, post_rest = post.split('// EMSCRIPTEN_END_FUNCS\n')
post = post_rest
funcs_js += ['\n' + post_funcs + '// EMSCRIPTEN_END_FUNCS\n']
simple = os.environ.get('EMCC_SIMPLE_ASM')
class Counter:
i = 0
pre_tables = last_forwarded_json['Functions']['tables']['pre']
del last_forwarded_json['Functions']['tables']['pre']
# Find function table calls without function tables generated for them
for funcs_js_item in funcs_js:
for use in set(re.findall(r'{{{ FTM_[\w\d_$]+ }}}', funcs_js_item)):
sig = use[8:len(use)-4]
if sig not in last_forwarded_json['Functions']['tables']:
if DEBUG: print >> sys.stderr, 'add empty function table', sig
last_forwarded_json['Functions']['tables'][sig] = 'var FUNCTION_TABLE_' + sig + ' = [0,0];\n'
def make_table(sig, raw):
i = Counter.i
Counter.i += 1
bad = 'b' + str(i)
params = ','.join(['p%d' % p for p in range(len(sig)-1)])
coercions = ';'.join(['p%d = %sp%d%s' % (p, '+' if sig[p+1] != 'i' else '', p, '' if sig[p+1] != 'i' else '|0') for p in range(len(sig)-1)]) + ';'
ret = '' if sig[0] == 'v' else ('return %s0' % ('+' if sig[0] != 'i' else ''))
return ('function %s(%s) { %s abort(%d); %s }' % (bad, params, coercions, i, ret), raw.replace('[0,', '[' + bad + ',').replace(',0,', ',' + bad + ',').replace(',0,', ',' + bad + ',').replace(',0]', ',' + bad + ']').replace(',0]', ',' + bad + ']').replace(',0\n', ',' + bad + '\n'))
infos = [make_table(sig, raw) for sig, raw in last_forwarded_json['Functions']['tables'].iteritems()]
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n// EMSCRIPTEN_END_FUNCS\n' + '\n'.join([info[1] for info in infos])
asm_setup = ''
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul']]
fundamentals = ['Math', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array']
math_envs = ['Math.min'] # TODO: move min to maths
asm_setup += '\n'.join(['var %s = %s;' % (f.replace('.', '_'), f) for f in math_envs])
basic_funcs = ['abort', 'assert', 'asmPrintInt', 'asmPrintFloat', 'copyTempDouble', 'copyTempFloat'] + [m.replace('.', '_') for m in math_envs]
if settings['SAFE_HEAP']: basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_STORE', 'SAFE_HEAP_CLEAR']
if settings['CHECK_HEAP_ALIGN']: basic_funcs += ['CHECK_ALIGN_2', 'CHECK_ALIGN_4', 'CHECK_ALIGN_8']
basic_vars = ['STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
basic_float_vars = ['NaN', 'Infinity']
if forwarded_json['Types']['preciseI64MathUsed']:
basic_funcs += ['i64Math_' + op for op in ['add', 'subtract', 'multiply', 'divide', 'modulo']]
asm_setup += '''
var i64Math_add = function(a, b, c, d) { i64Math.add(a, b, c, d) };
var i64Math_subtract = function(a, b, c, d) { i64Math.subtract(a, b, c, d) };
var i64Math_multiply = function(a, b, c, d) { i64Math.multiply(a, b, c, d) };
var i64Math_divide = function(a, b, c, d, e) { i64Math.divide(a, b, c, d, e) };
var i64Math_modulo = function(a, b, c, d, e) { i64Math.modulo(a, b, c, d, e) };
'''
asm_runtime_funcs = ['stackAlloc', 'stackSave', 'stackRestore', 'setThrew'] + ['setTempRet%d' % i for i in range(10)]
# function tables
def asm_coerce(value, sig):
if sig == 'v': return value
return ('+' if sig != 'i' else '') + value + ('|0' if sig == 'i' else '')
function_tables = ['dynCall_' + table for table in last_forwarded_json['Functions']['tables']]
function_tables_impls = []
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + asm_coerce('a' + str(i), sig[i]) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([asm_coerce('a' + str(i), sig[i]) for i in range(1, len(sig))])
ret = ('return ' if sig[0] != 'v' else '') + asm_coerce('FUNCTION_TABLE_%s[index&{{{ FTM_%s }}}](%s)' % (sig, sig, coerced_args), sig[0])
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
# calculate exports
exported_implemented_functions = list(exported_implemented_functions)
exports = []
if not simple:
for export in exported_implemented_functions + asm_runtime_funcs + function_tables:
exports.append("%s: %s" % (export, export))
exports = '{ ' + ', '.join(exports) + ' }'
else:
exports = '_main'
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except:
pass
# If no named globals, only need externals
global_vars = map(lambda g: g['name'], filter(lambda g: settings['NAMED_GLOBALS'] or g.get('external') or g.get('unIndexable'), forwarded_json['Variables']['globals'].values()))
global_funcs = ['_' + x for x in forwarded_json['Functions']['libraryFunctions'].keys()]
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1];
asm_global_funcs = ''.join([' var ' + g.replace('.', '_') + '=global.' + g + ';\n' for g in maths]) + \
''.join([' var ' + g + '=env.' + math_fix(g) + ';\n' for g in basic_funcs + global_funcs])
asm_global_vars = ''.join([' var ' + g + '=env.' + g + '|0;\n' for g in basic_vars + global_vars]) + \
''.join([' var ' + g + '=+env.' + g + ';\n' for g in basic_float_vars])
# sent data
the_global = '{ ' + ', '.join([math_fix(s) + ': ' + s for s in fundamentals]) + ' }'
sending = '{ ' + ', '.join([math_fix(s) + ': ' + s for s in basic_funcs + global_funcs + basic_vars + basic_float_vars + global_vars]) + ' }'
# received
if not simple:
receiving = ';\n'.join(['var ' + s + ' = Module["' + s + '"] = asm.' + s for s in exported_implemented_functions + function_tables])
else:
receiving = 'var _main = Module["_main"] = asm;'
# finalize
if DEBUG: print >> sys.stderr, 'asm text sizes', map(len, funcs_js), len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables), len('\n'.join(function_tables_impls)), len(function_tables_defs.replace('\n', '\n ')), len(exports), len(the_global), len(sending), len(receiving)
funcs_js = ['''
%s
function asmPrintInt(x, y) {
Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
}
function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
var asm = (function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
var HEAP32 = new global.Int32Array(buffer);
var HEAPU8 = new global.Uint8Array(buffer);
var HEAPU16 = new global.Uint16Array(buffer);
var HEAPU32 = new global.Uint32Array(buffer);
var HEAPF32 = new global.Float32Array(buffer);
var HEAPF64 = new global.Float64Array(buffer);
''' % (asm_setup,) + '\n' + asm_global_vars + '''
var __THREW__ = 0;
var undef = 0;
var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
''' + ''.join(['''
var tempRet%d = 0;''' % i for i in range(10)]) + '\n' + asm_global_funcs + '''
// EMSCRIPTEN_START_FUNCS
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
STACKTOP = ((STACKTOP + 3)>>2)<<2;
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
function setThrew(threw) {
threw = threw|0;
__THREW__ = threw;
}
''' + ''.join(['''
function setTempRet%d(value) {
value = value|0;
tempRet%d = value;
}
''' % (i, i) for i in range(10)])] + funcs_js + ['''
%s
return %s;
})
// EMSCRIPTEN_END_ASM
(%s, %s, buffer);
%s;
Runtime.stackAlloc = function(size) { return asm.stackAlloc(size) };
Runtime.stackSave = function() { return asm.stackSave() };
Runtime.stackRestore = function(top) { asm.stackRestore(top) };
''' % (pre_tables + '\n'.join(function_tables_impls) + '\n' + function_tables_defs.replace('\n', '\n '), exports, the_global, sending, receiving)]
# Set function table masks
def function_table_maskize(js):
masks = {}
default = None
for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
masks[sig] = str(table.count(','))
default = sig
def fix(m):
sig = m.groups(0)[0]
return masks[sig]
return re.sub(r'{{{ FTM_([\w\d_$]+) }}}', lambda m: fix(m), js) # masks[m.groups(0)[0]]
funcs_js = map(function_table_maskize, funcs_js)
else:
function_tables_defs = '\n'.join([table for table in last_forwarded_json['Functions']['tables'].itervalues()])
outfile.write(function_tables_defs)
funcs_js = ['''
// EMSCRIPTEN_START_FUNCS
'''] + funcs_js + ['''
// EMSCRIPTEN_END_FUNCS
''']
for funcs_js_item in funcs_js: # do this loop carefully to save memory
funcs_js_item = indexize(funcs_js_item)
funcs_js_item = blockaddrsize(funcs_js_item)
outfile.write(funcs_js_item)
funcs_js = None
outfile.write(indexize(post))
if DEBUG: print >> sys.stderr, ' emscript: phase 3 took %s seconds' % (time.time() - t)
outfile.close()
def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG, DEBUG_CACHE):
# Prepare settings for serialization to JSON.
settings = {}
for setting in args.settings:
name, value = setting.strip().split('=', 1)
settings[name] = json.loads(value)
# Add header defines to settings
defines = {}
include_root = path_from_root('system', 'include')
headers = args.headers[0].split(',') if len(args.headers) > 0 else []
seen_headers = set()
while len(headers) > 0:
header = headers.pop(0)
if not os.path.isabs(header):
header = os.path.join(include_root, header)
seen_headers.add(header)
for line in open(header, 'r'):
line = line.replace('\t', ' ')
m = re.match('^ *# *define +(?P<name>[-\w_.]+) +\(?(?P<value>[-\w_.|]+)\)?.*', line)
if not m:
# Catch enum defines of a very limited sort
m = re.match('^ +(?P<name>[A-Z_\d]+) += +(?P<value>\d+).*', line)
if m:
if m.group('name') != m.group('value'):
defines[m.group('name')] = m.group('value')
#else:
# print 'Warning: %s #defined to itself' % m.group('name') # XXX this can happen if we are set to be equal to an enum (with the same name)
m = re.match('^ *# *include *["<](?P<name>[\w_.-/]+)[">].*', line)
if m:
# Find this file
found = False
for w in [w for w in os.walk(include_root)]:
for f in w[2]:
curr = os.path.join(w[0], f)
if curr.endswith(m.group('name')) and curr not in seen_headers:
headers.append(curr)
found = True
break
if found: break
#assert found, 'Could not find header: ' + m.group('name')
if len(defines) > 0:
def lookup(value):
try:
while not unicode(value).isnumeric():
value = defines[value]
return value
except:
pass
try: # 0x300 etc.
value = eval(value)
return value
except:
pass
try: # CONST1|CONST2
parts = map(lookup, value.split('|'))
value = reduce(lambda a, b: a|b, map(eval, parts))
return value
except:
pass
return None
for key, value in defines.items():
value = lookup(value)
if value is not None:
defines[key] = str(value)
else:
del defines[key]
#print >> sys.stderr, 'new defs:', str(defines).replace(',', ',\n '), '\n\n'
settings.setdefault('C_DEFINES', {}).update(defines)
# libraries
libraries = args.libraries[0].split(',') if len(args.libraries) > 0 else []
# Compile the assembly to Javascript.
if settings.get('RELOOP'):
if not relooper:
relooper = cache.get_path('relooper.js')
settings.setdefault('RELOOPER', relooper)
if not os.path.exists(relooper):
from tools import shared
shared.Building.ensure_relooper(relooper)
emscript(args.infile, settings, args.outfile, libraries, compiler_engine=compiler_engine,
jcache=jcache, temp_files=temp_files, DEBUG=DEBUG, DEBUG_CACHE=DEBUG_CACHE)
def _main(environ):
parser = optparse.OptionParser(
usage='usage: %prog [-h] [-H HEADERS] [-o OUTFILE] [-c COMPILER_ENGINE] [-s FOO=BAR]* infile',
description=('You should normally never use this! Use emcc instead. '
'This is a wrapper around the JS compiler, converting .ll to .js.'),
epilog='')
parser.add_option('-H', '--headers',
default=[],
action='append',
help='System headers (comma separated) whose #defines should be exposed to the compiled code.')
parser.add_option('-L', '--libraries',
default=[],
action='append',
help='Library files (comma separated) to use in addition to those in emscripten src/library_*.')
parser.add_option('-o', '--outfile',
default=sys.stdout,
help='Where to write the output; defaults to stdout.')
parser.add_option('-c', '--compiler',
default=None,
help='Which JS engine to use to run the compiler; defaults to the one in ~/.emscripten.')
parser.add_option('--relooper',
default=None,
help='Which relooper file to use if RELOOP is enabled.')
parser.add_option('-s', '--setting',
dest='settings',
default=[],
action='append',
metavar='FOO=BAR',
help=('Overrides for settings defined in settings.js. '
'May occur multiple times.'))
parser.add_option('-j', '--jcache',
action='store_true',
default=False,
help=('Enable jcache (ccache-like caching of compilation results, for faster incremental builds).'))
parser.add_option('-T', '--temp-dir',
default=None,
help=('Where to create temporary files.'))
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='Displays debug output')
parser.add_option('-q', '--quiet',
action='store_false',
dest='verbose',
help='Hides debug output')
parser.add_option('--suppressUsageWarning',
action='store_true',
default=environ.get('EMSCRIPTEN_SUPPRESS_USAGE_WARNING'),
help=('Suppress usage warning'))
# Convert to the same format that argparse would have produced.
keywords, positional = parser.parse_args()
if not keywords.suppressUsageWarning:
print >> sys.stderr, '''
==============================================================
WARNING: You should normally never use this! Use emcc instead.
==============================================================
'''
if len(positional) != 1:
raise RuntimeError('Must provide exactly one positional argument.')
keywords.infile = os.path.abspath(positional[0])
if isinstance(keywords.outfile, basestring):
keywords.outfile = open(keywords.outfile, 'w')
if keywords.relooper:
relooper = os.path.abspath(keywords.relooper)
else:
relooper = None # use the cache
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
from tools import shared
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
if keywords.temp_dir is None:
temp_files = get_configuration().get_temp_files()
else:
temp_dir = os.path.abspath(keywords.temp_dir)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
temp_files = tempfiles.TempFiles(temp_dir)
if keywords.compiler is None:
from tools import shared
keywords.compiler = shared.COMPILER_ENGINE
if keywords.verbose is None:
DEBUG = get_configuration().DEBUG
DEBUG_CACHE = get_configuration().DEBUG_CACHE
else:
DEBUG = keywords.verbose
DEBUG_CACHE = keywords.verbose
cache = cache_module.Cache()
temp_files.run_and_clean(lambda: main(
keywords,
compiler_engine=keywords.compiler,
cache=cache,
jcache=cache_module.JCache(cache) if keywords.jcache else None,
relooper=relooper,
temp_files=temp_files,
DEBUG=DEBUG,
DEBUG_CACHE=DEBUG_CACHE,
))
if __name__ == '__main__':
_main(environ=os.environ)
| 42.078297
| 344
| 0.629746
|
814ff6688754eec0216e342ebfb43af3b2e1bd8d
| 730
|
py
|
Python
|
app/main/form.py
|
Ryan-GM/Pitchy
|
ca90741c251b98434ccdaf21593beb964d83e937
|
[
"MIT"
] | null | null | null |
app/main/form.py
|
Ryan-GM/Pitchy
|
ca90741c251b98434ccdaf21593beb964d83e937
|
[
"MIT"
] | null | null | null |
app/main/form.py
|
Ryan-GM/Pitchy
|
ca90741c251b98434ccdaf21593beb964d83e937
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField,SelectField
from wtforms.validators import Required
class PitchyForm(FlaskForm):
title = StringField('Title',validators=[Required()])
post = TextAreaField('Pitch it', validators=[Required()])
category = SelectField('Category',choices=[('Cheesy','Cheesy'),('Life','Life','Funny','Funny')],validators=[Required()])
submit = SubmitField('Pitchy')
class CommentForm(FlaskForm):
comment = TextAreaField('Do leave a comment',validators=[Required()])
submit = SubmitField('Comment')
class UpdateProfile(FlaskForm):
bio = TextAreaField('A brief one about you',validators=[Required()])
submit = SubmitField('Save')
| 42.941176
| 124
| 0.735616
|
61efd941e81bae4c638b7f184ab519b978403736
| 1,797
|
py
|
Python
|
streaming_server.py
|
BenevolentPista/NP-Assignment
|
2217986e71332912b6b64f138f7e5c7234877389
|
[
"MIT"
] | null | null | null |
streaming_server.py
|
BenevolentPista/NP-Assignment
|
2217986e71332912b6b64f138f7e5c7234877389
|
[
"MIT"
] | null | null | null |
streaming_server.py
|
BenevolentPista/NP-Assignment
|
2217986e71332912b6b64f138f7e5c7234877389
|
[
"MIT"
] | null | null | null |
import socket
import tqdm
import os
import ffmpeg
# device's IP address
SERVER_HOST = "192.169.1.104"
SERVER_PORT = 5001
width = 10
height = 10
# receive 4096 bytes each time
BUFFER_SIZE = 4096
SEPARATOR = "<SEPARATOR>"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((SERVER_HOST, SERVER_PORT))
sock.listen(1)
print('Listening at', sock.getsockname())
while True:
sc, sockname = sock.accept()
print('We have accepted a connection from', sockname)
print(' Socket name:', sc.getsockname())
print(' Socket peer:', sc.getpeername())
# receive the file infos
# receive using client socket, not server socket
received = sc.recv(BUFFER_SIZE).decode()
filename, filesize = received.split(SEPARATOR)
# remove absolute path if there is
filename = os.path.basename(filename)
# convert to integer
filesize = int(filesize)
progress = tqdm.tqdm(range(filesize), f"Receiving {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for _ in progress:
bytes = sc.recv(BUFFER_SIZE)
if not bytes:
break
f.write(bytes)
'''
process = ffmpeg.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
process = ffmpeg.output('output.mp4', pix_fmt='yuv420p')
process = ffmpeg.overwrite_output()
process = ffmpeg.run_async(pipe_stdin=True)
process.communicate(input=bytes)
'''
# update the progress bar
progress.update(len(bytes))
print("File successfully delivered")
print(os.path.dirname(os.path.realpath(__file__)))
sc.close()
| 28.078125
| 112
| 0.648303
|
2623bd30c65c418836c75689ca29c625a8ef19a6
| 6,271
|
py
|
Python
|
straxen/bokeh_utils.py
|
FaroutYLq/straxen
|
4741b8e61895da85b593903ea55dacdaeae65b2f
|
[
"BSD-3-Clause"
] | null | null | null |
straxen/bokeh_utils.py
|
FaroutYLq/straxen
|
4741b8e61895da85b593903ea55dacdaeae65b2f
|
[
"BSD-3-Clause"
] | 2
|
2021-12-20T08:10:59.000Z
|
2022-03-14T08:08:00.000Z
|
straxen/bokeh_utils.py
|
ershockley/straxen
|
881c52571dba28aa2130587f39a6ced45059fde9
|
[
"BSD-3-Clause"
] | null | null | null |
import bokeh.plotting as bklt
import bokeh
import numpy as np
import strax
export, __all__ = strax.exporter()
@export
def bokeh_to_wiki(fig, outputfile=None):
"""
Function which converts bokeh HTML code to a wiki readable code.
:param fig: Figure to be conerted
:param outputfile: String of absolute file path. If specified output
is writen to the file. Else output is print to the notebook and
can be simply copied into the wiki.
"""
# convert plot to wiki format:
html = bokeh.embed.file_html(fig, bokeh.resources.CDN)
html = '\n'.join((['<html>'] + html.split('\n')[6:]))
if outputfile:
with open(outputfile, mode='w') as file:
file.write(html)
else:
print(html)
def get_peaks_source(peaks, relative_start=0, time_scaler=1, keep_amplitude_per_sample=True):
"""
Computes bokeh.plotting.ColumnDataSource for given peaks.
:param peaks: Peaks to be plotted.
:param relative_start: t0 from which on the peaks should be plotted.
:param time_scaler: Factor to rescale the time from ns to other scale.
E.g. =1000 scales to µs.
:param keep_amplitude_per_sample: Boolean if true amplitude of the
plotted peaks is in pe/sample. False pe/ns.
:return: bokeh.plotting.ColumnDataSource instance which can be used
to plot peaks.
"""
if not (np.all(peaks['type'] == peaks[0]['type'])):
raise ValueError('All peaks must be of the same type (S1, S2 or Unknown)!')
x_p = []
y_p = []
for ind, p in enumerate(peaks):
x, y = _patches_x_y(p, keep_amplitude_per_sample=keep_amplitude_per_sample)
x -= relative_start # relative to first peak
x = x / time_scaler
x_p.append(x)
y_p.append(y)
if peaks[0]['type'] == 2:
scaler = 10**-3
else:
scaler = 1
source = bklt.ColumnDataSource(data={'xs': x_p, # Coordinates for peak patches
'ys': y_p,
'x': peaks['x'], # XY-Pos in PMZ Hitpattern
'y': peaks['y'],
'dt': peaks['dt'],
'time': peaks['time'],
'center_time': peaks['center_time'],
'endtime': strax.endtime(peaks),
'width_50': peaks['range_50p_area'] * scaler,
'width_90': peaks['range_90p_area'] * scaler,
'rise': peaks['rise_time'] * scaler,
'rel_center_time': (peaks['center_time'] - peaks['time']) * scaler,
'area': peaks['area'],
'aft': peaks['area_fraction_top'],
'nhits': peaks['n_hits'],
}
)
return source
def _patches_x_y(peak, keep_amplitude_per_sample=False):
"""
Creates x,y coordinates needed to draw peaks via
bokeh.models.patches.
:param peak: Peak for which we need the x/y samples
:param keep_amplitude_per_sample: If y-data should be in units of "per sample"
or "per ns".
:returns: Tuple of x, y
"""
if keep_amplitude_per_sample:
dt_a = 1
else:
dt_a = peak['dt']
x = np.arange(peak["length"])
xx = np.zeros(2 * len(x), dtype=x.dtype)
mx = 0.5 * (x[1::] + x[:-1])
xx[1:-1:2] = mx
xx[2::2] = mx
xx[0] = 1.5 * x[0] - 0.5 * x[1]
xx[-1] = 1.5 * x[-1] - 0.5 * x[-2]
xx = np.array(xx) * peak['dt'] + peak['time']
y = peak['data'][:peak['length']]
yy = np.zeros(2 * len(x))
yy[0::2] = y
yy[1::2] = y
yy = np.array(yy) / dt_a
# baseline since we'll fill underneath
xx = np.concatenate([[xx[0]], xx, [xx[-1]]])
yy = np.concatenate([[0], yy, [0]])
return xx, yy
def peak_tool_tip(peak_type):
"""
Default mouseover tooltip for peaks.
:param peak_type: If 2, all time variables are in µs else in ns.
:return: dictionary of tooltips. Can be converted to a list for
bokeh.models.HoverTool.
"""
# Add static time parameters:
tool_tip = {"time_static": ("time [ns]", "@time"),
"center_time": ("center_time [ns]", "@center_time"),
"endtime": ("endtime [ns]", "@endtime"),
}
# Now ns/µs parameters for S1 and S2
tool_tip['dt'] = ("dt [ns/sample]", "@dt")
tool_tip['time_dynamic'] = ("time [ns]", "$x")
tool_tip['rel_center_time'] = ('center time [ns]', '@rel_center_time')
tool_tip['range_50p_width'] = ('50% width [ns]', '@width_50')
tool_tip['range_90p_width'] = ('90% width [ns]', '@width_90')
tool_tip['rise_time'] = ('rise time [ns]', '@rise')
# Add non-time parameters (results in an ordered tooltip)
tool_tip['amplitude'] = ("Amplitude [pe/ns]", "$y")
tool_tip["area"] = ('area [pe]', '@area')
tool_tip["aft"] = ('AFT', '@aft')
tool_tip["nhits"] = ('nhits', '@nhits')
if peak_type == 2:
for k, i in tool_tip.items():
if k not in ["time_static", "center_time", "endtime"]:
tool_tip[k] = (i[0].replace('[ns]', '[µs]'), i[1])
return tool_tip
def default_fig(width=400, height=400, title='', **kwargs):
"""
Helper function which returns a bokeh.plotting.figure instance
with sizing_mode set to 'scale_both' and an aspect ratio set
according to the specified width and height.
:param width: Plot width in pixels
:param height: PLot height in pixels.
:param title: Title of the plot.
Also allows for additional kwargs accepted by bokeh.plotting.
:returns: bokeh.plotting.figure instance.
"""
fig = bklt.figure(plot_width=width,
plot_height=height,
sizing_mode='scale_both',
aspect_ratio=width / height,
title=title,
tools="pan,box_zoom,reset,save",
**kwargs
)
return fig
| 35.630682
| 108
| 0.538032
|
0a3c64cfec651e85acd25be509f609a94ef5b23a
| 10,183
|
py
|
Python
|
tests/test_thermostat.py
|
swingerman/ha-dual-climate
|
3e863eb1ad57021ba4d93decc3720d9cae91f607
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-09-15T15:32:12.000Z
|
2020-09-15T15:32:12.000Z
|
tests/test_thermostat.py
|
swingerman/ha-dual-climate
|
3e863eb1ad57021ba4d93decc3720d9cae91f607
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/test_thermostat.py
|
swingerman/ha-dual-climate
|
3e863eb1ad57021ba4d93decc3720d9cae91f607
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""The tests for the dual_smart_thermostat."""
from datetime import datetime, timezone, date
import logging
import pytest
from typing import Final
from homeassistant.core import HomeAssistant
from homeassistant.components import input_boolean, input_number
from homeassistant.util import dt
from homeassistant.components.climate.const import (
DOMAIN as CLIMATE,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.core import DOMAIN as HASS_DOMAIN, CoreState, State, callback
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from pytest_homeassistant_custom_component.common import (
AsyncMock,
Mock,
MockConfigEntry,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from custom_components.dual_smart_thermostat.const import *
from homeassistant.setup import async_setup_component
from custom_components.dual_smart_thermostat import (
DOMAIN as DUAL_SMART_THERMOSTAT,
)
ENT_SWITCH = "switch.test"
HEAT_ENTITY = "climate.test_heat"
COOL_ENTITY = "climate.test_cool"
ATTR_AWAY_MODE = "away_mode"
MIN_TEMP = 3.0
MAX_TEMP = 65.0
TARGET_TEMP = 42.0
COLD_TOLERANCE = 0.5
HOT_TOLERANCE = 0.5
TARGET_TEMP_STEP = 0.5
SERVICE_SET_TEMPERATURE = "set_temperature"
SERVICE_SET_HVAC_MODE = "set_hvac_mode"
INPUT_SET_VALUE = "set_value"
ENTITY_MATCH_ALL: Final = "all"
ATTR_AUX_HEAT = "aux_heat"
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_CURRENT_TEMPERATURE = "current_temperature"
ATTR_FAN_MODES = "fan_modes"
ATTR_FAN_MODE = "fan_mode"
ATTR_PRESET_MODE = "preset_mode"
ATTR_PRESET_MODES = "preset_modes"
ATTR_HUMIDITY = "humidity"
ATTR_MAX_HUMIDITY = "max_humidity"
ATTR_MIN_HUMIDITY = "min_humidity"
ATTR_MAX_TEMP = "max_temp"
ATTR_MIN_TEMP = "min_temp"
ATTR_HVAC_ACTION = "hvac_action"
ATTR_HVAC_MODES = "hvac_modes"
ATTR_HVAC_MODE = "hvac_mode"
ATTR_SWING_MODES = "swing_modes"
ATTR_SWING_MODE = "swing_mode"
ATTR_TARGET_TEMP_HIGH = "target_temp_high"
ATTR_TARGET_TEMP_LOW = "target_temp_low"
ATTR_TARGET_TEMP_STEP = "target_temp_step"
_LOGGER = logging.getLogger(__name__)
async def test_valid_conf(hass):
"""Test set up dual_smart_thermostat with valid config values."""
assert await async_setup_component(
hass,
CLIMATE,
{
"climate": {
"platform": DUAL_SMART_THERMOSTAT,
"name": "test",
"heater": CONF_HEATER,
"target_sensor": CONF_SENSOR,
}
},
)
@pytest.fixture
async def setup_comp_1(hass):
"""Initialize components."""
hass.config.units = METRIC_SYSTEM
assert await async_setup_component(hass, HASS_DOMAIN, {})
await hass.async_block_till_done()
async def test_heater_mode(hass, setup_comp_1):
"""Test thermostat heater switch in heating mode."""
heater_switch = "input_boolean.test"
assert await async_setup_component(
hass, input_boolean.DOMAIN, {"input_boolean": {"test": None}}
)
temp_input = "input_number.temp"
assert await async_setup_component(
hass,
input_number.DOMAIN,
{
"input_number": {
"temp": {"name": "test", "initial": 10, "min": 0, "max": 40, "step": 1}
}
},
)
assert await async_setup_component(
hass,
CLIMATE,
{
"climate": {
"platform": DUAL_SMART_THERMOSTAT,
"name": "test",
"heater": heater_switch,
"target_sensor": temp_input,
"initial_hvac_mode": HVACMode.HEAT,
}
},
)
await hass.async_block_till_done()
assert hass.states.get(heater_switch).state == STATE_OFF
_setup_sensor(hass, temp_input, 18)
await hass.async_block_till_done()
await async_set_temperature(hass, 23)
assert hass.states.get(heater_switch).state == STATE_ON
async def test_cooler_mode(hass, setup_comp_1):
"""Test thermostat cooler switch in cooling mode."""
cooler_switch = "input_boolean.test"
assert await async_setup_component(
hass, input_boolean.DOMAIN, {"input_boolean": {"test": None}}
)
temp_input = "input_number.temp"
assert await async_setup_component(
hass,
input_number.DOMAIN,
{
"input_number": {
"temp": {"name": "test", "initial": 10, "min": 0, "max": 40, "step": 1}
}
},
)
assert await async_setup_component(
hass,
CLIMATE,
{
"climate": {
"platform": DUAL_SMART_THERMOSTAT,
"name": "test",
"heater": cooler_switch,
"ac_mode": "true",
"target_sensor": temp_input,
"initial_hvac_mode": HVACMode.COOL,
}
},
)
await hass.async_block_till_done()
assert hass.states.get(cooler_switch).state == STATE_OFF
_setup_sensor(hass, temp_input, 23)
await hass.async_block_till_done()
await async_set_temperature(hass, 18)
assert hass.states.get(cooler_switch).state == STATE_ON
async def test_heater_cooler_mode(hass, setup_comp_1):
"""Test thermostat heater and cooler switch in heat/cool mode."""
heater_switch = "input_boolean.heater"
cooler_switch = "input_boolean.cooler"
assert await async_setup_component(
hass,
input_boolean.DOMAIN,
{"input_boolean": {"heater": None, "cooler": None}},
)
temp_input = "input_number.temp"
assert await async_setup_component(
hass,
input_number.DOMAIN,
{
"input_number": {
"temp": {"name": "test", "initial": 10, "min": 0, "max": 40, "step": 1}
}
},
)
assert await async_setup_component(
hass,
CLIMATE,
{
"climate": {
"platform": DUAL_SMART_THERMOSTAT,
"name": "test",
"cooler": cooler_switch,
"heater": heater_switch,
"target_sensor": temp_input,
"initial_hvac_mode": HVACMode.HEAT_COOL,
}
},
)
await hass.async_block_till_done()
assert hass.states.get(heater_switch).state == STATE_OFF
assert hass.states.get(cooler_switch).state == STATE_OFF
_setup_sensor(hass, temp_input, 26)
await hass.async_block_till_done()
await async_set_temperature(hass, 18, "all", 25, 22)
assert hass.states.get(heater_switch).state == STATE_OFF
assert hass.states.get(cooler_switch).state == STATE_ON
_setup_sensor(hass, temp_input, 24)
await hass.async_block_till_done()
await async_set_temperature(hass, 18, "all", 25, 22)
assert hass.states.get(heater_switch).state == STATE_OFF
assert hass.states.get(cooler_switch).state == STATE_OFF
_setup_sensor(hass, temp_input, 18)
await hass.async_block_till_done()
await async_set_temperature(hass, 18, "all", 25, 22)
assert hass.states.get(heater_switch).state == STATE_ON
assert hass.states.get(cooler_switch).state == STATE_OFF
async def test_heater_cooler_switch_hvac_modes(hass, setup_comp_1):
"""Test thermostat heater and cooler switch to heater only mode."""
heater_switch = "input_boolean.heater"
cooler_switch = "input_boolean.cooler"
assert await async_setup_component(
hass,
input_boolean.DOMAIN,
{"input_boolean": {"heater": None, "cooler": None}},
)
temp_input = "input_number.temp"
assert await async_setup_component(
hass,
input_number.DOMAIN,
{
"input_number": {
"temp": {"name": "test", "initial": 10, "min": 0, "max": 40, "step": 1}
}
},
)
assert await async_setup_component(
hass,
CLIMATE,
{
"climate": {
"platform": DUAL_SMART_THERMOSTAT,
"name": "test",
"cooler": cooler_switch,
"heater": heater_switch,
"target_sensor": temp_input,
"initial_hvac_mode": HVACMode.HEAT_COOL,
}
},
)
await hass.async_block_till_done()
assert hass.states.get(heater_switch).state == STATE_OFF
assert hass.states.get(cooler_switch).state == STATE_OFF
_setup_sensor(hass, temp_input, 26)
await hass.async_block_till_done()
await async_set_hvac_mode(hass, "all", HVACMode.HEAT)
assert hass.states.get("climate.test").state == HVAC_MODE_HEAT
await async_set_hvac_mode(hass, "all", HVACMode.COOL)
assert hass.states.get("climate.test").state == HVAC_MODE_COOL
def _setup_sensor(hass, sensor, temp):
"""Set up the test sensor."""
hass.states.async_set(sensor, temp)
async def async_set_temperature(
hass,
temperature=None,
entity_id="all",
target_temp_high=None,
target_temp_low=None,
hvac_mode=None,
):
"""Set new target temperature."""
kwargs = {
key: value
for key, value in [
(ATTR_TEMPERATURE, temperature),
(ATTR_TARGET_TEMP_HIGH, target_temp_high),
(ATTR_TARGET_TEMP_LOW, target_temp_low),
(ATTR_ENTITY_ID, entity_id),
(ATTR_HVAC_MODE, hvac_mode),
]
if value is not None
}
_LOGGER.debug("set_temperature start data=%s", kwargs)
await hass.services.async_call(
CLIMATE, SERVICE_SET_TEMPERATURE, kwargs, blocking=True
)
async def async_set_hvac_mode(
hass,
entity_id="all",
hvac_mode=HVACMode.OFF,
):
"""Set new HVAC mode."""
kwargs = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_HVAC_MODE, hvac_mode),
]
if value is not None
}
_LOGGER.debug("%s start data=%s", SERVICE_SET_HVAC_MODE, kwargs)
await hass.services.async_call(
CLIMATE, SERVICE_SET_HVAC_MODE, kwargs, blocking=True
)
| 28.207756
| 87
| 0.646077
|
08d6bcea50bd0845304c14dcceacc0c8d341627c
| 3,225
|
py
|
Python
|
gpytorch/kernels/product_structure_kernel.py
|
pierocor/gpytorch
|
c58dc3a77c7be7ee4434484e359a0bbc1c2d27f6
|
[
"MIT"
] | null | null | null |
gpytorch/kernels/product_structure_kernel.py
|
pierocor/gpytorch
|
c58dc3a77c7be7ee4434484e359a0bbc1c2d27f6
|
[
"MIT"
] | null | null | null |
gpytorch/kernels/product_structure_kernel.py
|
pierocor/gpytorch
|
c58dc3a77c7be7ee4434484e359a0bbc1c2d27f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from typing import Optional, Tuple
from ..lazy import lazify
from .kernel import Kernel
class ProductStructureKernel(Kernel):
r"""
A Kernel decorator for kernels with product structure. If a kernel decomposes
multiplicatively, then this module will be much more computationally efficient.
A kernel function `k` has product structure if it can be written as
.. math::
\begin{equation*}
k(\mathbf{x_1}, \mathbf{x_2}) = k'(x_1^{(1)}, x_2^{(1)}) * \ldots * k'(x_1^{(d)}, x_2^{(d)})
\end{equation*}
for some kernel :math:`k'` that operates on each dimension.
Given a `b x n x d` input, `ProductStructureKernel` computes `d` one-dimensional kernels
(using the supplied base_kernel), and then multiplies the component kernels together.
Unlike :class:`~gpytorch.kernels.ProductKernel`, `ProductStructureKernel` computes each
of the product terms in batch, making it very fast.
See `Product Kernel Interpolation for Scalable Gaussian Processes`_ for more detail.
Args:
- :attr:`base_kernel` (Kernel):
The kernel to approximate with KISS-GP
- :attr:`num_dims` (int):
The dimension of the input data.
- :attr:`active_dims` (tuple of ints, optional):
Passed down to the `base_kernel`.
.. _Product Kernel Interpolation for Scalable Gaussian Processes:
https://arxiv.org/pdf/1802.08903
"""
@property
def is_stationary(self) -> bool:
"""
Kernel is stationary if the base kernel is stationary.
"""
return self.base_kernel.is_stationary
def __init__(
self, base_kernel: Kernel, num_dims: int, active_dims: Optional[Tuple[int, ...]] = None,
):
super(ProductStructureKernel, self).__init__(active_dims=active_dims)
self.base_kernel = base_kernel
self.num_dims = num_dims
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, **params):
if last_dim_is_batch:
raise RuntimeError("ProductStructureKernel does not accept the last_dim_is_batch argument.")
res = self.base_kernel(x1, x2, diag=diag, last_dim_is_batch=True, **params)
res = res.prod(-2 if diag else -3)
return res
def num_outputs_per_input(self, x1, x2):
return self.base_kernel.num_outputs_per_input(x1, x2)
def __call__(self, x1_, x2_=None, diag=False, last_dim_is_batch=False, **params):
"""
We cannot lazily evaluate actual kernel calls when using SKIP, because we
cannot root decompose rectangular matrices.
Because we slice in to the kernel during prediction to get the test x train
covar before calling evaluate_kernel, the order of operations would mean we
would get a MulLazyTensor representing a rectangular matrix, which we
cannot matmul with because we cannot root decompose it. Thus, SKIP actually
*requires* that we work with the full (train + test) x (train + test)
kernel matrix.
"""
res = super().__call__(x1_, x2_, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
res = lazify(res).evaluate_kernel()
return res
| 38.855422
| 104
| 0.672248
|
aebf2ebd8fe9408e72ffa8037a467570120bf4e8
| 6,330
|
py
|
Python
|
securetea/lib/log_monitor/system_log/failed_login.py
|
neerajv18/SecureTea-Project
|
e999cbe7c8e497c69b76b4c886de0d063169ea03
|
[
"MIT"
] | 257
|
2018-03-28T12:43:20.000Z
|
2022-03-29T07:07:23.000Z
|
securetea/lib/log_monitor/system_log/failed_login.py
|
neerajv18/SecureTea-Project
|
e999cbe7c8e497c69b76b4c886de0d063169ea03
|
[
"MIT"
] | 155
|
2018-03-31T14:57:46.000Z
|
2022-03-17T18:12:41.000Z
|
securetea/lib/log_monitor/system_log/failed_login.py
|
neerajv18/SecureTea-Project
|
e999cbe7c8e497c69b76b4c886de0d063169ea03
|
[
"MIT"
] | 132
|
2018-03-27T06:25:20.000Z
|
2022-03-28T11:32:45.000Z
|
# -*- coding: utf-8 -*-
u"""Failed login module for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jun 1 2019
Version: 1.3
Module: SecureTea
"""
import re
import time
from securetea.lib.log_monitor.system_log import utils
from securetea import logger
class FailedLogin(object):
"""FailedLogin Class."""
def __init__(self, debug=False):
"""
Initialize FailedLogin.
Args:
debug (bool): Log on terminal or not
Raises:
None
Returns:
None
Working:
- Detect login attempts
- Detect password brute-force
"""
# Initialize logger
self.logger = logger.SecureTeaLogger(
__name__,
debug=debug
)
# OS name to auth-log path map
self.system_log_map = {
"debian": "/var/log/auth.log"
}
os_name = utils.categorize_os()
self.log_file = None
if os_name:
try:
self.log_file = self.system_log_map[os_name]
except KeyError:
self.logger.log(
"Could not find path for the auth-log file",
logtype="error"
)
return
else:
return
# Salt to generate hashed username
self.SALT = "<!@?>"
# Regex to extract details
self.AUTH_FAILURE = r'^[a-zA-Z]+.*authentication failure.*\s'
self.USERNAME = r'(user=)([a-zA-Z0-9]+)'
self.MESSAGE_REPEAT = r'message repeated\s([0-9]+)'
# Initialize user to login attempt count dict
self.user_to_count = dict()
# Set threshold to 5 attempts per second to detect brute-force
self.THRESHOLD = 5 # inter = 0.2
def parse_log_file(self):
"""
Parse the log file to extract
authentication failure / login attempts.
Args:
None
Raises:
None
Returns:
None
"""
# Open the log file
log_data = utils.open_file(self.log_file)
for line in log_data:
found = re.findall(self.AUTH_FAILURE, line)
if (found is not None and found != []):
username = re.findall(self.USERNAME, found[0])[0][1]
data_in_list = found[0].split(" ")
if data_in_list[1] != "": # if double digit day
month = data_in_list[0]
day = data_in_list[1]
last_time = data_in_list[2]
date = month + " " + day
else: # if single digit day
month = data_in_list[0]
day = data_in_list[2]
last_time = data_in_list[3]
date = month + " " + day
# convert date, time to epoch time
epoch_time = utils.get_epoch_time(month, day, last_time)
count = 1 # number of attempts (by default is 1)
message_repeated = re.findall(self.MESSAGE_REPEAT, found[0])
if message_repeated != []:
count = int(message_repeated[0])
# update user_to_count dict
self.update_user_dict(username, date, epoch_time, count)
def update_user_dict(self, username, date, epoch_time, count):
"""
Update username to attempts dict with
the new number of failure attempts.
Args:
username (str): Name of the user
date (str): Date (eg. Jun 1)
epoch_time (int): Time during the attempt in epoch format
count (int): Number of attempts made
Raises:
None
Returns:
None
"""
# Generate a hashed username using salt
hashed_username = username + self.SALT + date
if self.user_to_count.get(hashed_username) is None:
# if user not in dict, add to dict
self.user_to_count[hashed_username] = {
"date": date,
"last_time": epoch_time,
"count": count
}
else:
# if user in dict, update user attempt
previous_count = self.user_to_count[hashed_username]["count"]
new_count = previous_count + count
self.user_to_count[hashed_username]["count"] = new_count
self.user_to_count[hashed_username]["last_time"] = epoch_time
def check_brute_force(self):
"""
Detect login attempts & password brute-force
by comparing ratio with the set threshold.
Args:
None
Raises:
None
Returns:
None
"""
for username in self.user_to_count.keys():
last_time = self.user_to_count[username]["last_time"]
count = self.user_to_count[username]["count"]
current_time = int(time.time())
delta_time = int(current_time - last_time)
try:
calc_threshold = count / delta_time
except ZeroDivisionError:
calc_threshold = count
if calc_threshold > self.THRESHOLD: # Brute-force detected
user = username.split(self.SALT)[0]
day = username.split(self.SALT)[1]
msg = "Too much failed login attempts: " + user + " on: " + \
day + " failed attempts: " + str(count)
self.logger.log(
msg,
logtype="warning"
)
def run(self):
"""
Start monitoring the authentication-log file
for login attempts & possible password brute-force.
Args:
None
Raises:
None
Returns:
None
"""
if self.log_file: # if path of auth-log file is valid
# Rotate & parse the log file
self.parse_log_file()
# Analyze the log for brute-force
self.check_brute_force()
# Empty the dict to rotate the log-file
self.user_to_count.clear()
| 29.44186
| 77
| 0.514534
|
952c6c77ce81e631b2c50cd2e43405411512a0c3
| 99,778
|
py
|
Python
|
keras/layers/recurrent.py
|
bklebel/keras
|
af804d0a5db9a8f20fbb083b48655b2687ce89d9
|
[
"MIT"
] | 95
|
2018-04-13T03:34:51.000Z
|
2022-03-30T10:10:28.000Z
|
_bak/v0.5/StudyTensroFlow/keras/keras/layers/recurrent.py
|
Dlaiven/DeepLearning_Wavelet-LSTM
|
1606c16005a5338333b4943f782f57311c6b5e49
|
[
"MIT"
] | 3
|
2019-07-18T11:19:53.000Z
|
2020-12-28T05:45:19.000Z
|
_bak/v0.5/StudyTensroFlow/keras/keras/layers/recurrent.py
|
Dlaiven/DeepLearning_Wavelet-LSTM
|
1606c16005a5338333b4943f782f57311c6b5e49
|
[
"MIT"
] | 35
|
2018-07-27T09:21:18.000Z
|
2021-11-30T02:13:01.000Z
|
# -*- coding: utf-8 -*-
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import warnings
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from ..utils.generic_utils import has_arg
# Legacy support.
from ..legacy.layers import Recurrent
from ..legacy import interfaces
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
# Arguments
cells: List of RNN cell instances.
# Examples
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
# States are a flat list
# in reverse order of the cell stack.
# This allows to preserve the requirement
# `stack.state_size[0] == output_dim`.
# e.g. states of a 2-layer LSTM would be
# `[h2, c2, h1, c1]`
# (assuming one LSTM has states [h, c])
state_size = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
state_size += list(cell.state_size)
else:
state_size.append(cell.state_size)
return tuple(state_size)
def call(self, inputs, states, constants=None, **kwargs):
# Recover per-cell states.
nested_states = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
nested_states.append(states[:len(cell.state_size)])
states = states[len(cell.state_size):]
else:
nested_states.append([states[0]])
states = states[1:]
nested_states = nested_states[::-1]
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
if has_arg(cell.call, 'constants'):
inputs, states = cell.call(inputs, states,
constants=constants,
**kwargs)
else:
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
# Format the new states as a flat list
# in reverse cell order.
states = []
for cell_states in new_nested_states[::-1]:
states += cell_states
return inputs, states
def build(self, input_shape):
if isinstance(input_shape, list):
constants_shape = input_shape[1:]
input_shape = input_shape[0]
for cell in self.cells:
if isinstance(cell, Layer):
if has_arg(cell.call, 'constants'):
cell.build([input_shape] + constants_shape)
else:
cell.build(input_shape)
if hasattr(cell.state_size, '__len__'):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = (input_shape[0], output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({'class_name': cell.__class__.__name__,
'config': cell.get_config()})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cells = []
for cell_config in config.pop('cells'):
cells.append(deserialize_layer(cell_config,
custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
"""Retrieves the weights of the model.
# Returns
A flat list of Numpy arrays.
"""
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
# Arguments
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.losses
losses += cell_losses
return losses
def get_losses_for(self, inputs=None):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.get_losses_for(inputs)
losses += cell_losses
return losses
class RNN(Layer):
"""Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self, cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
@property
def states(self):
if self._states is None:
if isinstance(self.cell.state_size, int):
num_states = 1
else:
num_states = len(self.cell.state_size)
return [None for _ in range(num_states)]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
output_dim = state_size[0]
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], output_dim)
else:
output_shape = (input_shape[0], output_dim)
if self.return_state:
state_shape = [(input_shape[0], dim) for dim in state_size]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
input_dim = input_shape[-1]
self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if [spec.shape[-1] for spec in self.state_spec] != state_size:
raise ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(self.state_spec, self.cell.state_size))
else:
self.state_spec = [InputSpec(shape=(None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
if hasattr(self.cell.state_size, '__len__'):
return [K.tile(initial_state, [1, dim])
for dim in self.cell.state_size]
else:
return [K.tile(initial_state, [1, self.cell.state_size])]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = self._standardize_args(
inputs, initial_state, constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = K.is_keras_tensor(additional_inputs[0])
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def _standardize_args(self, inputs, initial_state, constants):
"""Standardize `__call__` to a single list of tensor inputs.
When running a model loaded from file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
# Arguments
inputs: tensor or list/tuple of tensors
initial_state: tensor or list of tensors or None
constants: tensor or list of tensors or None
# Returns
inputs: tensor
initial_state: list of tensors or None
constants: list of tensors or None
"""
if isinstance(inputs, list):
assert initial_state is None and constants is None
if self._num_constants is not None:
constants = inputs[-self._num_constants:]
inputs = inputs[:-self._num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros((batch_size, dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros((batch_size, self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros((batch_size, dim)))
else:
K.set_value(self.states[0],
np.zeros((batch_size, self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != (batch_size, dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, dim)) +
', found shape=' + str(value.shape))
# TODO: consider batch calls to `set_value`.
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
cell_config = self.cell.get_config()
config['cell'] = {'class_name': self.cell.__class__.__name__,
'config': cell_config}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cell = deserialize_layer(config.pop('cell'),
custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
layer_losses = super(RNN, self).losses
if isinstance(self.cell, Layer):
return self.cell.losses + layer_losses
return layer_losses
def get_losses_for(self, inputs=None):
if isinstance(self.cell, Layer):
cell_losses = self.cell.get_losses_for(inputs)
return cell_losses + super(RNN, self).get_losses_for(inputs)
return super(RNN, self).get_losses_for(inputs)
class SimpleRNNCell(Layer):
"""Cell class for SimpleRNN.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
output._uses_learning_phase = True
return output, [output]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
warnings.warn('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if K.backend() == 'theano' and (dropout or recurrent_dropout):
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = SimpleRNNCell(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(SimpleRNN, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
class GRUCell(Layer):
"""Cell class for the GRU layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
reset_after=False,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
if not self.reset_after:
self.input_bias, self.recurrent_bias = self.bias, None
else:
# NOTE: need to flatten, since slicing in CNTK gives 2D array
self.input_bias = K.flatten(self.bias[0])
self.recurrent_bias = K.flatten(self.bias[1])
else:
self.bias = None
# update gate
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
# reset gate
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
# new gate
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
# bias for inputs
self.input_bias_z = self.input_bias[:self.units]
self.input_bias_r = self.input_bias[self.units: self.units * 2]
self.input_bias_h = self.input_bias[self.units * 2:]
# bias for hidden state - just for compatibility with CuDNN
if self.reset_after:
self.recurrent_bias_z = self.recurrent_bias[:self.units]
self.recurrent_bias_r = self.recurrent_bias[self.units: self.units * 2]
self.recurrent_bias_h = self.recurrent_bias[self.units * 2:]
else:
self.input_bias_z = None
self.input_bias_r = None
self.input_bias_h = None
if self.reset_after:
self.recurrent_bias_z = None
self.recurrent_bias_r = None
self.recurrent_bias_h = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=3)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel_z)
x_r = K.dot(inputs_r, self.kernel_r)
x_h = K.dot(inputs_h, self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.input_bias_z)
x_r = K.bias_add(x_r, self.input_bias_r)
x_h = K.bias_add(x_h, self.input_bias_h)
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel_z)
recurrent_r = K.dot(h_tm1_r, self.recurrent_kernel_r)
if self.reset_after and self.use_bias:
recurrent_z = K.bias_add(recurrent_z, self.recurrent_bias_z)
recurrent_r = K.bias_add(recurrent_r, self.recurrent_bias_r)
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel_h)
if self.use_bias:
recurrent_h = K.bias_add(recurrent_h, self.recurrent_bias_h)
recurrent_h = r * recurrent_h
else:
recurrent_h = K.dot(r * h_tm1_h, self.recurrent_kernel_h)
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = K.bias_add(matrix_x, self.input_bias)
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
x_h = matrix_x[:, 2 * self.units:]
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = K.bias_add(matrix_inner, self.recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = K.dot(h_tm1,
self.recurrent_kernel[:, :2 * self.units])
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * matrix_inner[:, 2 * self.units:]
else:
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
# References
- [Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation](https://arxiv.org/abs/1406.1078)
- [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano' and (dropout or recurrent_dropout):
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = GRUCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after)
super(GRU, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(GRU, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
class LSTMCell(Layer):
"""Cell class for the LSTM layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).x
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = (self.units, self.units)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel_i)
x_f = K.dot(inputs_f, self.kernel_f)
x_c = K.dot(inputs_c, self.kernel_c)
x_o = K.dot(inputs_o, self.kernel_o)
if self.use_bias:
x_i = K.bias_add(x_i, self.bias_i)
x_f = K.bias_add(x_f, self.bias_f)
x_c = K.bias_add(x_c, self.bias_c)
x_o = K.bias_add(x_o, self.bias_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
self.recurrent_kernel_o))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# References
- [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano' and (dropout or recurrent_dropout):
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(LSTM, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_ones(inputs, dims):
# Currently, CNTK can't instantiate `ones` with symbolic shapes.
# Will update workaround once CNTK supports it.
if K.backend() == 'cntk':
ones = K.ones_like(K.reshape(inputs[:, 0], (-1, 1)))
return K.tile(ones, (1, dims))
else:
return K.ones((K.shape(inputs)[0], dims))
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [K.in_train_phase(
dropped_inputs,
ones,
training=training) for _ in range(count)]
return K.in_train_phase(
dropped_inputs,
ones,
training=training)
| 43.858462
| 138
| 0.584888
|
600351c607f78535763f781810fa2fc29740ea79
| 5,532
|
py
|
Python
|
docs/conf.py
|
edaniszewski/sanic-healthcheck
|
25acc8c7fe71a9802ad83f95c5ed01354fc868fb
|
[
"MIT"
] | 3
|
2020-07-17T05:19:17.000Z
|
2022-02-20T04:16:48.000Z
|
docs/conf.py
|
edaniszewski/sanic-healthcheck
|
25acc8c7fe71a9802ad83f95c5ed01354fc868fb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
edaniszewski/sanic-healthcheck
|
25acc8c7fe71a9802ad83f95c5ed01354fc868fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from sanic_healthcheck import __version__
import datetime
# -- Project information -----------------------------------------------------
project = 'sanic-healthcheck'
year = datetime.datetime.now().year
copyright = '{}, Erick Daniszewski'.format(year)
author = 'Erick Daniszewski'
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sanic-healthcheckdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sanic-healthcheck.tex', 'sanic-healthcheck Documentation',
'Erick Daniszewski', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sanic-healthcheck', 'sanic-healthcheck Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sanic-healthcheck', 'sanic-healthcheck Documentation',
author, 'sanic-healthcheck', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 30.065217
| 79
| 0.656182
|
34fb8851360b5329fca319a7e14fa92489db13e8
| 3,458
|
py
|
Python
|
Algorithm.Framework/Alphas/InterestReleaseAlphaModel.py
|
QilongChan/Lean
|
9950634cff67c415045219db4d2f7db5e1ca1e1e
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Framework/Alphas/InterestReleaseAlphaModel.py
|
QilongChan/Lean
|
9950634cff67c415045219db4d2f7db5e1ca1e1e
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Framework/Alphas/InterestReleaseAlphaModel.py
|
QilongChan/Lean
|
9950634cff67c415045219db4d2f7db5e1ca1e1e
|
[
"Apache-2.0"
] | null | null | null |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Data.Custom.TradingEconomics import *
class InterestReleaseAlphaModel(AlphaModel):
'''Alpha model that uses the Interest rate released by Fed to create insights'''
def __init__(self, algorithm, period = 30, resolution = Resolution.Daily):
'''
Initializes a new instance of the InterestReleaseAlphaModel class
Args:
period: The prediction period
resolution: The data resolution
'''
self.predictionInterval = Time.Multiply(Extensions.ToTimeSpan(resolution), period)
self.pairs = [] # forex universe
self.calendar = algorithm.AddData(TradingEconomicsCalendar, TradingEconomics.Calendar.UnitedStates.InterestRate).Symbol
resolutionString = Extensions.GetEnumString(resolution, Resolution)
self.Name = f'{self.__class__.__name__}({period},{resolutionString})'
def Update(self, algorithm, data):
'''
Updates this alpha model with the latest data from the algorithm.
This is called each time the algorithm receives data for subscribed securities
Args:
algorithm: The algorithm instance
data: The new data available
Returns:
The new insights generated
'''
if not data.ContainsKey(self.calendar):
return []
insights = []
fore_IR = data[self.calendar].Forecast # Forecast Interest Rate
prev_IR = data[self.calendar].Previous # Previous released actual Interest Rate
usdValueUp = fore_IR >= prev_IR
for pair in self.pairs:
direction = InsightDirection.Down
if (pair.Value.startswith("USD") and usdValueUp) or (pair.Value.endswith("USD") and not usdValueUp):
direction = InsightDirection.Up
insights.append(Insight.Price(pair, self.predictionInterval, direction))
return insights
def OnSecuritiesChanged(self, algorithm, changes):
'''
Event fired each time the we add securities from the data feed
Args:
algorithm: The algorithm instance that experienced the change in securities
changes: The security additions and removals from the algorithm
'''
self.pairs = [ x.Symbol for x in changes.AddedSecurities ]
| 42.170732
| 127
| 0.686813
|
f9e033c8ded1c547a6e004156845774f11740277
| 7,427
|
py
|
Python
|
test/test_places.py
|
lamantin/google-maps-services-python
|
396e03ce3ffc7d1d98634c9932408272cfc20c18
|
[
"Apache-2.0"
] | 1
|
2021-08-18T18:01:14.000Z
|
2021-08-18T18:01:14.000Z
|
test/test_places.py
|
lamantin/google-maps-services-python
|
396e03ce3ffc7d1d98634c9932408272cfc20c18
|
[
"Apache-2.0"
] | null | null | null |
test/test_places.py
|
lamantin/google-maps-services-python
|
396e03ce3ffc7d1d98634c9932408272cfc20c18
|
[
"Apache-2.0"
] | null | null | null |
# This Python file uses the following encoding: utf-8
#
# Copyright 2016 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Tests for the places module."""
from types import GeneratorType
import responses
import test as _test
import googlemaps
class PlacesTest(_test.TestCase):
def setUp(self):
self.key = 'AIzaasdf'
self.client = googlemaps.Client(self.key)
self.location = (-33.86746, 151.207090)
self.type = 'liquor_store'
self.language = 'en-AU'
self.region = 'AU'
self.radius = 100
@responses.activate
def test_places_text_search(self):
url = 'https://maps.googleapis.com/maps/api/place/textsearch/json'
responses.add(responses.GET, url,
body='{"status": "OK", "results": [], "html_attributions": []}',
status=200, content_type='application/json')
self.client.places('restaurant', location=self.location,
radius=self.radius, region=self.region, language=self.language,
min_price=1, max_price=4, open_now=True,
type=self.type)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual('%s?language=en-AU&location=-33.86746%%2C151.20709&'
'maxprice=4&minprice=1&opennow=true&query=restaurant&'
'radius=100®ion=AU&type=liquor_store&key=%s'
% (url, self.key), responses.calls[0].request.url)
@responses.activate
def test_places_nearby_search(self):
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json'
responses.add(responses.GET, url,
body='{"status": "OK", "results": [], "html_attributions": []}',
status=200, content_type='application/json')
self.client.places_nearby(self.location, keyword='foo',
language=self.language, min_price=1,
max_price=4, name='bar', open_now=True,
rank_by='distance', type=self.type)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual('%s?keyword=foo&language=en-AU&location=-33.86746%%2C151.20709&'
'maxprice=4&minprice=1&name=bar&opennow=true&rankby=distance&'
'type=liquor_store&key=%s'
% (url, self.key), responses.calls[0].request.url)
with self.assertRaises(ValueError):
self.client.places_nearby(self.location, rank_by="distance")
with self.assertRaises(ValueError):
self.client.places_nearby(self.location, rank_by="distance",
keyword='foo', radius=self.radius)
@responses.activate
def test_places_radar_search(self):
url = 'https://maps.googleapis.com/maps/api/place/radarsearch/json'
responses.add(responses.GET, url,
body='{"status": "OK", "results": [], "html_attributions": []}',
status=200, content_type='application/json')
self.client.places_radar(self.location, self.radius, keyword='foo',
min_price=1, max_price=4, name='bar',
open_now=True, type=self.type)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual('%s?keyword=foo&location=-33.86746%%2C151.20709&'
'maxprice=4&minprice=1&name=bar&opennow=true&radius=100&'
'type=liquor_store&key=%s'
% (url, self.key), responses.calls[0].request.url)
with self.assertRaises(ValueError):
self.client.places_radar(self.location, self.radius)
@responses.activate
def test_place_detail(self):
url = 'https://maps.googleapis.com/maps/api/place/details/json'
responses.add(responses.GET, url,
body='{"status": "OK", "result": {}, "html_attributions": []}',
status=200, content_type='application/json')
self.client.place('ChIJN1t_tDeuEmsRUsoyG83frY4', language=self.language)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual('%s?language=en-AU&placeid=ChIJN1t_tDeuEmsRUsoyG83frY4&key=%s'
% (url, self.key), responses.calls[0].request.url)
@responses.activate
def test_photo(self):
url = 'https://maps.googleapis.com/maps/api/place/photo'
responses.add(responses.GET, url, status=200)
ref = 'CnRvAAAAwMpdHeWlXl-lH0vp7lez4znKPIWSWvgvZFISdKx45AwJVP1Qp37YOrH7sqHMJ8C-vBDC546decipPHchJhHZL94RcTUfPa1jWzo-rSHaTlbNtjh-N68RkcToUCuY9v2HNpo5mziqkir37WU8FJEqVBIQ4k938TI3e7bf8xq-uwDZcxoUbO_ZJzPxremiQurAYzCTwRhE_V0'
response = self.client.places_photo(ref, max_width=100)
self.assertTrue(isinstance(response, GeneratorType))
self.assertEqual(1, len(responses.calls))
self.assertURLEqual('%s?maxwidth=100&photoreference=%s&key=%s'
% (url, ref, self.key), responses.calls[0].request.url)
@responses.activate
def test_autocomplete(self):
url = 'https://maps.googleapis.com/maps/api/place/autocomplete/json'
responses.add(responses.GET, url,
body='{"status": "OK", "predictions": []}',
status=200, content_type='application/json')
self.client.places_autocomplete('Google', offset=3,
location=self.location,
radius=self.radius,
language=self.language,
types='geocode',
components={'country': 'au'},
strict_bounds=True)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual('%s?components=country%%3Aau&input=Google&language=en-AU&'
'location=-33.86746%%2C151.20709&offset=3&radius=100&'
'strictbounds=true&types=geocode&key=%s' %
(url, self.key), responses.calls[0].request.url)
@responses.activate
def test_autocomplete_query(self):
url = 'https://maps.googleapis.com/maps/api/place/queryautocomplete/json'
responses.add(responses.GET, url,
body='{"status": "OK", "predictions": []}',
status=200, content_type='application/json')
self.client.places_autocomplete_query('pizza near New York')
self.assertEqual(1, len(responses.calls))
self.assertURLEqual('%s?input=pizza+near+New+York&key=%s' %
(url, self.key), responses.calls[0].request.url)
| 45.845679
| 227
| 0.593106
|
b846261f696abb46328f6acabd0392792442d62a
| 57,373
|
py
|
Python
|
vyper/functions/functions.py
|
erdnaag/vyper
|
22bef3a4b4161db18c7831041e20b917984cff83
|
[
"Apache-2.0"
] | 1
|
2020-07-04T01:47:26.000Z
|
2020-07-04T01:47:26.000Z
|
vyper/functions/functions.py
|
erdnaag/vyper
|
22bef3a4b4161db18c7831041e20b917984cff83
|
[
"Apache-2.0"
] | null | null | null |
vyper/functions/functions.py
|
erdnaag/vyper
|
22bef3a4b4161db18c7831041e20b917984cff83
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import math
import operator
from decimal import Decimal
from vyper import ast as vy_ast
from vyper.ast.validation import validate_call_args
from vyper.context.types.abstract import (
ArrayValueAbstractType,
BytesAbstractType,
IntegerAbstractType,
NumericAbstractType,
)
from vyper.context.types.bases import DataLocation, ValueTypeDefinition
from vyper.context.types.indexable.sequence import ArrayDefinition
from vyper.context.types.utils import get_type_from_annotation
from vyper.context.types.value.address import AddressDefinition
from vyper.context.types.value.array_value import (
BytesArrayDefinition,
BytesArrayPrimitive,
StringDefinition,
StringPrimitive,
)
from vyper.context.types.value.bytes_fixed import Bytes32Definition
from vyper.context.types.value.numeric import (
DecimalDefinition,
Int128Definition,
Uint256Definition,
)
from vyper.context.validation.utils import (
get_common_types,
get_possible_types_from_node,
validate_expected_type,
)
from vyper.exceptions import (
ArgumentException,
CompilerPanic,
InvalidLiteral,
InvalidType,
StateAccessViolation,
StructureException,
TypeMismatch,
UnfoldableNode,
VyperException,
ZeroDivisionException,
)
from vyper.functions.convert import convert
from vyper.opcodes import version_check
from vyper.parser.expr import Expr
from vyper.parser.keccak256_helper import keccak256_helper
from vyper.parser.parser_utils import (
LLLnode,
add_variable_offset,
get_length,
get_number_as_fraction,
getpos,
make_byte_array_copier,
make_byte_slice_copier,
unwrap_location,
)
from vyper.signatures.function_signature import VariableRecord
from vyper.types import BaseType, ByteArrayLike, ByteArrayType, ListType
from vyper.types import StringType as OldStringType
from vyper.types import is_base_type
from vyper.utils import (
DECIMAL_DIVISOR,
MemoryPositions,
SizeLimits,
bytes_to_int,
fourbytes_to_int,
keccak256,
)
from .signatures import Optional, validate_inputs
SHA256_ADDRESS = 2
SHA256_BASE_GAS = 60
SHA256_PER_WORD_GAS = 12
class _SimpleBuiltinFunction:
def fetch_call_return(self, node):
validate_call_args(node, len(self._inputs), getattr(self, "_kwargs", []))
for arg, (_, expected) in zip(node.args, self._inputs):
validate_expected_type(arg, expected)
if self._return_type:
return self._return_type
class Floor(_SimpleBuiltinFunction):
_id = "floor"
_inputs = [("value", DecimalDefinition())]
_return_type = Int128Definition()
def evaluate(self, node):
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Decimal):
raise UnfoldableNode
value = math.floor(node.args[0].value)
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return LLLnode.from_list(
[
"if",
["slt", args[0], 0],
["sdiv", ["sub", args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],
["sdiv", args[0], DECIMAL_DIVISOR],
],
typ=BaseType("int128"),
pos=getpos(expr),
)
class Ceil(_SimpleBuiltinFunction):
_id = "ceil"
_inputs = [("value", DecimalDefinition())]
_return_type = Int128Definition()
def evaluate(self, node):
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Decimal):
raise UnfoldableNode
value = math.ceil(node.args[0].value)
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return LLLnode.from_list(
[
"if",
["slt", args[0], 0],
["sdiv", args[0], DECIMAL_DIVISOR],
["sdiv", ["add", args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],
],
typ=BaseType("int128"),
pos=getpos(expr),
)
class Convert:
# TODO this is just a wireframe, expand it with complete functionality
# https://github.com/vyperlang/vyper/issues/1093
_id = "convert"
def fetch_call_return(self, node):
validate_call_args(node, 2)
target_type = get_type_from_annotation(node.args[1], DataLocation.MEMORY)
validate_expected_type(node.args[0], ValueTypeDefinition())
try:
validate_expected_type(node.args[0], target_type)
except VyperException:
pass
else:
# TODO remove this once it's possible in parser
if not isinstance(target_type, Uint256Definition):
raise InvalidType(f"Value and target type are both '{target_type}'", node)
# TODO!
# try:
# validation_fn = getattr(self, f"validate_to_{target_type._id}")
# except AttributeError:
# raise InvalidType(
# f"Unsupported destination type '{target_type}'", node.args[1]
# ) from None
# validation_fn(initial_type)
return target_type
def validate_to_bool(self, initial_type):
pass
def validate_to_decimal(self, initial_type):
pass
def validate_to_int128(self, initial_type):
pass
def validate_to_uint256(self, initial_type):
pass
def validate_to_bytes32(self, initial_type):
pass
def validate_to_string(self, initial_type):
pass
def validate_to_bytes(self, initial_type):
pass
def validate_to_address(self, initial_type):
pass
def build_LLL(self, expr, context):
return convert(expr, context)
class Slice:
_id = "slice"
_inputs = [("b", ("bytes", "bytes32", "string")), ("start", "uint256"), ("length", "uint256")]
_return_type = None
def fetch_call_return(self, node):
validate_call_args(node, 3)
for arg in node.args[1:]:
validate_expected_type(arg, Uint256Definition())
if isinstance(node.args[2], vy_ast.Int) and node.args[2].value < 1:
raise ArgumentException("Length cannot be less than 1", node.args[2])
validate_expected_type(node.args[0], (BytesAbstractType(), StringPrimitive()))
type_list = get_possible_types_from_node(node.args[0])
try:
validate_expected_type(node.args[0], StringPrimitive())
return_type = StringDefinition()
except VyperException:
return_type = BytesArrayDefinition()
if isinstance(node.args[2], vy_ast.Int):
return_type.set_length(node.args[2].value)
else:
return_type.set_min_length(type_list[0].length)
return return_type
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
sub, start, length = args
if is_base_type(sub.typ, "bytes32"):
if (start.typ.is_literal and length.typ.is_literal) and not (
0 <= start.value + length.value <= 32
):
raise InvalidLiteral(
"Invalid start / length values needs to be between 0 and 32.", expr,
)
sub_typ_maxlen = 32
else:
sub_typ_maxlen = sub.typ.maxlen
# Get returntype string or bytes
if isinstance(args[0].typ, ByteArrayType) or is_base_type(sub.typ, "bytes32"):
ReturnType = ByteArrayType
else:
ReturnType = OldStringType
# Node representing the position of the output in memory
np = context.new_placeholder(ReturnType(maxlen=sub_typ_maxlen + 32))
placeholder_node = LLLnode.from_list(np, typ=sub.typ, location="memory")
placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location="memory")
# Copies over bytearray data
if sub.location == "storage":
adj_sub = LLLnode.from_list(
["add", ["sha3_32", sub], ["add", ["div", "_start", 32], 1]],
typ=sub.typ,
location=sub.location,
)
else:
adj_sub = LLLnode.from_list(
["add", sub, ["add", ["sub", "_start", ["mod", "_start", 32]], 32]],
typ=sub.typ,
location=sub.location,
)
if is_base_type(sub.typ, "bytes32"):
adj_sub = LLLnode.from_list(sub.args[0], typ=sub.typ, location="memory")
copier = make_byte_slice_copier(
placeholder_plus_32_node,
adj_sub,
["add", "_length", 32],
sub_typ_maxlen,
pos=getpos(expr),
)
# New maximum length in the type of the result
newmaxlen = length.value if not len(length.args) else sub_typ_maxlen
if is_base_type(sub.typ, "bytes32"):
maxlen = 32
else:
maxlen = ["mload", Expr(sub, context=context).lll_node] # Retrieve length of the bytes.
out = [
"with",
"_start",
start,
[
"with",
"_length",
length,
[
"with",
"_opos",
["add", placeholder_node, ["mod", "_start", 32]],
[
"seq",
["assert", ["le", ["add", "_start", "_length"], maxlen]],
copier,
["mstore", "_opos", "_length"],
"_opos",
],
],
],
]
return LLLnode.from_list(
out, typ=ReturnType(newmaxlen), location="memory", pos=getpos(expr)
)
class Len(_SimpleBuiltinFunction):
_id = "len"
_inputs = [("b", ArrayValueAbstractType())]
_return_type = Uint256Definition()
def evaluate(self, node):
validate_call_args(node, 1)
arg = node.args[0]
if isinstance(arg, (vy_ast.Str, vy_ast.Bytes)):
length = len(arg.value)
elif isinstance(arg, vy_ast.Hex):
# 2 characters represent 1 byte and we subtract 1 to ignore the leading `0x`
length = len(arg.value) // 2 - 1
else:
raise UnfoldableNode
return vy_ast.Int.from_node(node, value=length)
def build_LLL(self, node, context):
arg = Expr(node.args[0], context).lll_node
return get_length(arg)
class Concat:
_id = "concat"
def fetch_call_return(self, node):
if len(node.args) < 2:
raise ArgumentException("Invalid argument count: expected at least 2", node)
if node.keywords:
raise ArgumentException("Keyword arguments are not accepted here", node.keywords[0])
type_ = None
for expected in (BytesAbstractType(), StringPrimitive()):
try:
validate_expected_type(node.args[0], expected)
type_ = expected
except (InvalidType, TypeMismatch):
pass
if type_ is None:
raise TypeMismatch("Concat values must be bytes or string", node.args[0])
length = 0
for arg in node.args[1:]:
validate_expected_type(arg, type_)
length = 0
for arg in node.args:
length += get_possible_types_from_node(arg).pop().length
if isinstance(type_, BytesAbstractType):
return_type = BytesArrayDefinition()
else:
return_type = StringDefinition()
return_type.set_length(length)
return return_type
def build_LLL(self, expr, context):
args = [Expr(arg, context).lll_node for arg in expr.args]
if len(args) < 2:
raise StructureException("Concat expects at least two arguments", expr)
prev_type = ""
for _, (expr_arg, arg) in enumerate(zip(expr.args, args)):
if not isinstance(arg.typ, ByteArrayLike) and not is_base_type(arg.typ, "bytes32"):
raise TypeMismatch("Concat expects string, bytes or bytes32 objects", expr_arg)
current_type = (
"bytes"
if isinstance(arg.typ, ByteArrayType) or is_base_type(arg.typ, "bytes32")
else "string"
)
if prev_type and current_type != prev_type:
raise TypeMismatch(
(
"Concat expects consistant use of string or byte types, "
"user either bytes or string."
),
expr_arg,
)
prev_type = current_type
if current_type == "string":
ReturnType = OldStringType
else:
ReturnType = ByteArrayType
# Maximum length of the output
total_maxlen = sum(
[arg.typ.maxlen if isinstance(arg.typ, ByteArrayLike) else 32 for arg in args]
)
# Node representing the position of the output in memory
placeholder = context.new_placeholder(ReturnType(total_maxlen))
# Object representing the output
seq = []
# For each argument we are concatenating...
for arg in args:
# Start pasting into a position the starts at zero, and keeps
# incrementing as we concatenate arguments
placeholder_node = LLLnode.from_list(
["add", placeholder, "_poz"], typ=ReturnType(total_maxlen), location="memory",
)
placeholder_node_plus_32 = LLLnode.from_list(
["add", ["add", placeholder, "_poz"], 32],
typ=ReturnType(total_maxlen),
location="memory",
)
if isinstance(arg.typ, ReturnType):
# Ignore empty strings
if arg.typ.maxlen == 0:
continue
# Get the length of the current argument
if arg.location == "memory":
length = LLLnode.from_list(["mload", "_arg"], typ=BaseType("int128"))
argstart = LLLnode.from_list(
["add", "_arg", 32], typ=arg.typ, location=arg.location,
)
elif arg.location == "storage":
length = LLLnode.from_list(
["sload", ["sha3_32", "_arg"]], typ=BaseType("int128")
)
argstart = LLLnode.from_list(
["add", ["sha3_32", "_arg"], 1], typ=arg.typ, location=arg.location,
)
# Make a copier to copy over data from that argument
seq.append(
[
"with",
"_arg",
arg,
[
"seq",
make_byte_slice_copier(
placeholder_node_plus_32,
argstart,
length,
arg.typ.maxlen,
pos=getpos(expr),
),
# Change the position to start at the correct
# place to paste the next value
["set", "_poz", ["add", "_poz", length]],
],
]
)
else:
seq.append(
[
"seq",
["mstore", ["add", placeholder_node, 32], unwrap_location(arg)],
["set", "_poz", ["add", "_poz", 32]],
]
)
# The position, after all arguments are processing, equals the total
# length. Paste this in to make the output a proper bytearray
seq.append(["mstore", placeholder, "_poz"])
# Memory location of the output
seq.append(placeholder)
return LLLnode.from_list(
["with", "_poz", 0, ["seq"] + seq],
typ=ReturnType(total_maxlen),
location="memory",
pos=getpos(expr),
annotation="concat",
)
class Keccak256(_SimpleBuiltinFunction):
_id = "keccak256"
_inputs = [("value", (Bytes32Definition(), BytesArrayPrimitive(), StringPrimitive()))]
_return_type = Bytes32Definition()
def evaluate(self, node):
validate_call_args(node, 1)
if isinstance(node.args[0], vy_ast.Bytes):
value = node.args[0].value
elif isinstance(node.args[0], vy_ast.Str):
value = node.args[0].value.encode()
elif isinstance(node.args[0], vy_ast.Hex):
length = len(node.args[0].value) // 2 - 1
value = int(node.args[0].value, 16).to_bytes(length, "big")
else:
raise UnfoldableNode
hash_ = f"0x{keccak256(value).hex()}"
return vy_ast.Hex.from_node(node, value=hash_)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return keccak256_helper(expr, args, kwargs, context)
def _make_sha256_call(inp_start, inp_len, out_start, out_len):
return [
"assert",
[
"staticcall",
["gas"], # gas
SHA256_ADDRESS, # address
inp_start,
inp_len,
out_start,
out_len,
],
]
class Sha256(_SimpleBuiltinFunction):
_id = "sha256"
_inputs = [("value", (Bytes32Definition(), BytesArrayPrimitive(), StringPrimitive()))]
_return_type = Bytes32Definition()
def evaluate(self, node):
validate_call_args(node, 1)
if isinstance(node.args[0], vy_ast.Bytes):
value = node.args[0].value
elif isinstance(node.args[0], vy_ast.Str):
value = node.args[0].value.encode()
elif isinstance(node.args[0], vy_ast.Hex):
length = len(node.args[0].value) // 2 - 1
value = int(node.args[0].value, 16).to_bytes(length, "big")
else:
raise UnfoldableNode
hash_ = f"0x{hashlib.sha256(value).hexdigest()}"
return vy_ast.Hex.from_node(node, value=hash_)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
sub = args[0]
# bytes32 input
if is_base_type(sub.typ, "bytes32"):
return LLLnode.from_list(
[
"seq",
["mstore", MemoryPositions.FREE_VAR_SPACE, sub],
_make_sha256_call(
inp_start=MemoryPositions.FREE_VAR_SPACE,
inp_len=32,
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32,
),
["mload", MemoryPositions.FREE_VAR_SPACE], # push value onto stack
],
typ=BaseType("bytes32"),
pos=getpos(expr),
add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS,
)
# bytearay-like input
if sub.location == "storage":
# Copy storage to memory
placeholder = context.new_placeholder(sub.typ)
placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location="memory")
copier = make_byte_array_copier(
placeholder_node, LLLnode.from_list("_sub", typ=sub.typ, location=sub.location),
)
return LLLnode.from_list(
[
"with",
"_sub",
sub,
[
"seq",
copier,
_make_sha256_call(
inp_start=["add", placeholder, 32],
inp_len=["mload", placeholder],
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32,
),
["mload", MemoryPositions.FREE_VAR_SPACE],
],
],
typ=BaseType("bytes32"),
pos=getpos(expr),
add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS,
)
elif sub.location == "memory":
return LLLnode.from_list(
[
"with",
"_sub",
sub,
[
"seq",
_make_sha256_call(
inp_start=["add", "_sub", 32],
inp_len=["mload", "_sub"],
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32,
),
["mload", MemoryPositions.FREE_VAR_SPACE],
],
],
typ=BaseType("bytes32"),
pos=getpos(expr),
add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS,
)
else:
# This should never happen, but just left here for future compiler-writers.
raise Exception(f"Unsupported location: {sub.location}") # pragma: no test
class MethodID:
_id = "method_id"
def evaluate(self, node):
validate_call_args(node, 1, ["output_type"])
args = node.args
if not isinstance(args[0], vy_ast.Str):
raise InvalidType("method id must be given as a literal string", args[0])
if " " in args[0].value:
raise InvalidLiteral("Invalid function signature - no spaces allowed.")
if node.keywords:
return_type = get_type_from_annotation(node.keywords[0].value, DataLocation.UNSET)
if isinstance(return_type, Bytes32Definition):
length = 32
elif isinstance(return_type, BytesArrayDefinition) and return_type.length == 4:
length = 4
else:
raise ArgumentException("output_type must be bytes[4] or bytes32", node.keywords[0])
else:
# if `output_type` is not given, default to `bytes[4]`
length = 4
method_id = fourbytes_to_int(keccak256(args[0].value.encode())[:4])
value = method_id.to_bytes(length, "big")
if length == 32:
return vy_ast.Hex.from_node(node, value=f"0x{value.hex()}")
elif length == 4:
return vy_ast.Bytes.from_node(node, value=value)
else:
raise CompilerPanic
def fetch_call_return(self, node):
raise CompilerPanic("method_id should always be folded")
def build_LLL(self, *args, **kwargs):
raise CompilerPanic("method_id should always be folded")
class ECRecover(_SimpleBuiltinFunction):
_id = "ecrecover"
_inputs = [
("hash", Bytes32Definition()),
("v", Uint256Definition()),
("r", Uint256Definition()),
("s", Uint256Definition()),
]
_return_type = AddressDefinition()
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location="memory"
)
return LLLnode.from_list(
[
"seq",
["mstore", placeholder_node, args[0]],
["mstore", ["add", placeholder_node, 32], args[1]],
["mstore", ["add", placeholder_node, 64], args[2]],
["mstore", ["add", placeholder_node, 96], args[3]],
[
"pop",
[
"staticcall",
["gas"],
1,
placeholder_node,
128,
MemoryPositions.FREE_VAR_SPACE,
32,
],
],
["mload", MemoryPositions.FREE_VAR_SPACE],
],
typ=BaseType("address"),
pos=getpos(expr),
)
def avo(arg, ind, pos):
return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, "int128"), pos=pos))
class ECAdd(_SimpleBuiltinFunction):
_id = "ecadd"
_inputs = [
("a", ArrayDefinition(Uint256Definition(), 2)),
("b", ArrayDefinition(Uint256Definition(), 2)),
]
_return_type = ArrayDefinition(Uint256Definition(), 2)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location="memory"
)
pos = getpos(expr)
o = LLLnode.from_list(
[
"seq",
["mstore", placeholder_node, avo(args[0], 0, pos)],
["mstore", ["add", placeholder_node, 32], avo(args[0], 1, pos)],
["mstore", ["add", placeholder_node, 64], avo(args[1], 0, pos)],
["mstore", ["add", placeholder_node, 96], avo(args[1], 1, pos)],
["assert", ["staticcall", ["gas"], 6, placeholder_node, 128, placeholder_node, 64]],
placeholder_node,
],
typ=ListType(BaseType("uint256"), 2),
pos=getpos(expr),
location="memory",
)
return o
class ECMul(_SimpleBuiltinFunction):
_id = "ecmul"
_inputs = [("point", ArrayDefinition(Uint256Definition(), 2)), ("scalar", Uint256Definition())]
_return_type = ArrayDefinition(Uint256Definition(), 2)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location="memory"
)
pos = getpos(expr)
o = LLLnode.from_list(
[
"seq",
["mstore", placeholder_node, avo(args[0], 0, pos)],
["mstore", ["add", placeholder_node, 32], avo(args[0], 1, pos)],
["mstore", ["add", placeholder_node, 64], args[1]],
["assert", ["staticcall", ["gas"], 7, placeholder_node, 96, placeholder_node, 64]],
placeholder_node,
],
typ=ListType(BaseType("uint256"), 2),
pos=pos,
location="memory",
)
return o
def _memory_element_getter(index):
return LLLnode.from_list(
["mload", ["add", "_sub", ["add", 32, ["mul", 32, index]]]], typ=BaseType("int128"),
)
def _storage_element_getter(index):
return LLLnode.from_list(
["sload", ["add", ["sha3_32", "_sub"], ["add", 1, index]]], typ=BaseType("int128"),
)
class Extract32(_SimpleBuiltinFunction):
_id = "extract32"
_inputs = [("b", BytesArrayPrimitive()), ("start", Int128Definition())]
_kwargs = {"output_type": Optional("name_literal", "bytes32")}
_return_type = None
def fetch_call_return(self, node):
super().fetch_call_return(node)
if node.keywords:
return_type = get_type_from_annotation(node.keywords[0].value, DataLocation.MEMORY)
if not isinstance(
return_type, (AddressDefinition, Bytes32Definition, IntegerAbstractType)
):
raise
else:
return_type = Bytes32Definition()
return return_type
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
sub, index = args
ret_type = kwargs["output_type"]
# Get length and specific element
if sub.location == "memory":
lengetter = LLLnode.from_list(["mload", "_sub"], typ=BaseType("int128"))
elementgetter = _memory_element_getter
elif sub.location == "storage":
lengetter = LLLnode.from_list(["sload", ["sha3_32", "_sub"]], typ=BaseType("int128"))
elementgetter = _storage_element_getter
# TODO: unclosed if/elif clause. Undefined behavior if `sub.location`
# isn't one of `memory`/`storage`
# Special case: index known to be a multiple of 32
if isinstance(index.value, int) and not index.value % 32:
o = LLLnode.from_list(
[
"with",
"_sub",
sub,
elementgetter(["div", ["clamp", 0, index, ["sub", lengetter, 32]], 32]),
],
typ=BaseType(ret_type),
annotation="extracting 32 bytes",
)
# General case
else:
o = LLLnode.from_list(
[
"with",
"_sub",
sub,
[
"with",
"_len",
lengetter,
[
"with",
"_index",
["clamp", 0, index, ["sub", "_len", 32]],
[
"with",
"_mi32",
["mod", "_index", 32],
[
"with",
"_di32",
["div", "_index", 32],
[
"if",
"_mi32",
[
"add",
["mul", elementgetter("_di32"), ["exp", 256, "_mi32"]],
[
"div",
elementgetter(["add", "_di32", 1]),
["exp", 256, ["sub", 32, "_mi32"]],
],
],
elementgetter("_di32"),
],
],
],
],
],
],
typ=BaseType(ret_type),
pos=getpos(expr),
annotation="extracting 32 bytes",
)
if ret_type == "int128":
return LLLnode.from_list(
["clamp", ["mload", MemoryPositions.MINNUM], o, ["mload", MemoryPositions.MAXNUM]],
typ=BaseType("int128"),
pos=getpos(expr),
)
elif ret_type == "address":
return LLLnode.from_list(
["uclamplt", o, ["mload", MemoryPositions.ADDRSIZE]],
typ=BaseType(ret_type),
pos=getpos(expr),
)
else:
return o
class AsWeiValue:
_id = "as_wei_value"
_inputs = [("value", ("int128", "uint256", "decimal")), ("unit", "str_literal")]
_return_type = Uint256Definition()
wei_denoms = {
("wei",): 1,
("femtoether", "kwei", "babbage"): 10 ** 3,
("picoether", "mwei", "lovelace"): 10 ** 6,
("nanoether", "gwei", "shannon"): 10 ** 9,
("microether", "szabo",): 10 ** 12,
("milliether", "finney",): 10 ** 15,
("ether",): 10 ** 18,
("kether", "grand"): 10 ** 21,
}
def evaluate(self, node):
validate_call_args(node, 2)
if not isinstance(node.args[1], vy_ast.Str):
raise ArgumentException(
"Wei denomination must be given as a literal string", node.args[1]
)
try:
denom = next(v for k, v in self.wei_denoms.items() if node.args[1].value in k)
except StopIteration:
raise ArgumentException(
f"Unknown denomination: {node.args[1].value}", node.args[1]
) from None
if not isinstance(node.args[0], (vy_ast.Decimal, vy_ast.Int)):
raise UnfoldableNode
value = node.args[0].value
if value < 0:
raise InvalidLiteral("Negative wei value not allowed", node.args[0])
if isinstance(value, int) and value >= 2 ** 256:
raise InvalidLiteral("Value out of range for uint256", node.args[0])
if isinstance(value, Decimal) and value >= 2 ** 127:
raise InvalidLiteral("Value out of range for decimal", node.args[0])
return vy_ast.Int.from_node(node, value=int(value * denom))
def fetch_call_return(self, node):
validate_expected_type(node.args[0], NumericAbstractType())
return self._return_type
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
value, denom_name = args[0], args[1].decode()
denom_divisor = next((v for k, v in self.wei_denoms.items() if denom_name in k), False)
if not denom_divisor:
raise InvalidLiteral(
f"Invalid denomination: {denom_name}, valid denominations are: "
f"{','.join(x[0] for x in self.wei_denoms)}",
expr.args[1],
)
# Compute the amount of wei and return that value
if isinstance(value, (int, Decimal)):
expr_args_0 = expr.args[0]
# On constant reference fetch value node of constant assignment.
if context.constants.ast_is_constant(expr.args[0]):
expr_args_0 = context.constants._constants_ast[expr.args[0].id]
numstring, num, den = get_number_as_fraction(expr_args_0, context)
if denom_divisor % den:
max_len = len(str(denom_divisor)) - 1
raise InvalidLiteral(
f"Wei value of denomination '{denom_name}' has max {max_len} decimal places",
expr.args[0],
)
sub = num * denom_divisor // den
elif value.typ.is_literal:
if value.value <= 0:
raise InvalidLiteral("Negative wei value not allowed", expr)
sub = ["mul", value.value, denom_divisor]
elif value.typ.typ == "uint256":
sub = ["mul", value, denom_divisor]
else:
sub = ["div", ["mul", value, denom_divisor], DECIMAL_DIVISOR]
return LLLnode.from_list(sub, typ=BaseType("uint256"), location=None, pos=getpos(expr),)
zero_value = LLLnode.from_list(0, typ=BaseType("uint256"))
false_value = LLLnode.from_list(0, typ=BaseType("bool", is_literal=True))
class RawCall(_SimpleBuiltinFunction):
_id = "raw_call"
_inputs = [("to", AddressDefinition()), ("data", BytesArrayPrimitive())]
_kwargs = {
"max_outsize": Optional("num_literal", 0),
"gas": Optional("uint256", "gas"),
"value": Optional("uint256", zero_value),
"is_delegate_call": Optional("bool", false_value),
"is_static_call": Optional("bool", false_value),
}
_return_type = None
def fetch_call_return(self, node):
super().fetch_call_return(node)
outsize = next((i.value for i in node.keywords if i.arg == "max_outsize"), None)
if outsize is None:
return None
if not isinstance(outsize, vy_ast.Int) or outsize.value < 0:
raise
if outsize.value:
return_type = BytesArrayDefinition()
return_type.set_min_length(outsize.value)
return return_type
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
to, data = args
gas, value, outsize, delegate_call, static_call = (
kwargs["gas"],
kwargs["value"],
kwargs["max_outsize"],
kwargs["is_delegate_call"],
kwargs["is_static_call"],
)
for key in ("is_delegate_call", "is_static_call"):
if kwargs[key].typ.is_literal is False:
raise TypeMismatch(
f"The `{key}` parameter must be a static/literal boolean value", expr
)
if delegate_call.value and static_call.value:
raise ArgumentException(
"Call may use one of `is_delegate_call` or `is_static_call`, not both", expr
)
if not static_call.value and context.is_constant():
raise StateAccessViolation(
f"Cannot make modifying calls from {context.pp_constancy()},"
" use `is_static_call=True` to perform this action",
expr,
)
placeholder = context.new_placeholder(data.typ)
placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location="memory")
copier = make_byte_array_copier(placeholder_node, data, pos=getpos(expr))
output_placeholder = context.new_placeholder(ByteArrayType(outsize))
output_node = LLLnode.from_list(
output_placeholder, typ=ByteArrayType(outsize), location="memory",
)
# build LLL for call or delegatecall
common_call_lll = [
["add", placeholder_node, 32],
["mload", placeholder_node],
# if there is no return value, the return offset can be 0
["add", output_node, 32] if outsize else 0,
outsize,
]
if delegate_call.value == 1:
call_lll = ["delegatecall", gas, to] + common_call_lll
elif static_call.value == 1:
call_lll = ["staticcall", gas, to] + common_call_lll
else:
call_lll = ["call", gas, to, value] + common_call_lll
# build sequence LLL
if outsize:
# return minimum of outsize and returndatasize
size = [
"with",
"_l",
outsize,
["with", "_r", "returndatasize", ["if", ["gt", "_l", "_r"], "_r", "_l"]],
]
seq = ["seq", copier, ["assert", call_lll], ["mstore", output_node, size], output_node]
typ = ByteArrayType(outsize)
else:
seq = ["seq", copier, ["assert", call_lll]]
typ = None
return LLLnode.from_list(seq, typ=typ, location="memory", pos=getpos(expr))
class Send(_SimpleBuiltinFunction):
_id = "send"
_inputs = [("to", AddressDefinition()), ("value", Uint256Definition())]
_return_type = None
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
to, value = args
if context.is_constant():
raise StateAccessViolation(
f"Cannot send ether inside {context.pp_constancy()}!", expr,
)
return LLLnode.from_list(
["assert", ["call", 0, to, value, 0, 0, 0, 0]], typ=None, pos=getpos(expr),
)
class SelfDestruct(_SimpleBuiltinFunction):
_id = "selfdestruct"
_inputs = [("to", AddressDefinition())]
_return_type = None
_is_terminus = True
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
if context.is_constant():
raise StateAccessViolation(
f"Cannot {expr.func.id} inside {context.pp_constancy()}!", expr.func,
)
return LLLnode.from_list(["selfdestruct", args[0]], typ=None, pos=getpos(expr))
class BlockHash(_SimpleBuiltinFunction):
_id = "blockhash"
_inputs = [("block_num", Uint256Definition())]
_return_type = Bytes32Definition()
@validate_inputs
def build_LLL(self, expr, args, kwargs, contact):
return LLLnode.from_list(
["blockhash", ["uclamplt", ["clampge", args[0], ["sub", ["number"], 256]], "number"]],
typ=BaseType("bytes32"),
pos=getpos(expr),
)
class RawLog:
_id = "raw_log"
_inputs = [("topics", "*"), ("data", ("bytes32", "bytes"))]
def fetch_call_return(self, node):
validate_call_args(node, 2)
if not isinstance(node.args[0], vy_ast.List) or len(node.args[0].elements) > 4:
raise InvalidType("Expecting a list of 0-4 topics as first argument", node.args[0])
if node.args[0].elements:
validate_expected_type(
node.args[0], ArrayDefinition(Bytes32Definition(), len(node.args[0].elements))
)
validate_expected_type(node.args[1], BytesAbstractType())
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
if not isinstance(args[0], vy_ast.List) or len(args[0].elements) > 4:
raise StructureException("Expecting a list of 0-4 topics as first argument", args[0])
topics = []
for elt in args[0].elements:
arg = Expr.parse_value_expr(elt, context)
if not is_base_type(arg.typ, "bytes32"):
raise TypeMismatch("Expecting a bytes32 argument as topic", elt)
topics.append(arg)
if args[1].typ == BaseType("bytes32"):
placeholder = context.new_placeholder(BaseType("bytes32"))
return LLLnode.from_list(
[
"seq",
["mstore", placeholder, unwrap_location(args[1])],
["log" + str(len(topics)), placeholder, 32] + topics,
],
typ=None,
pos=getpos(expr),
)
if args[1].location == "memory":
return LLLnode.from_list(
[
"with",
"_arr",
args[1],
["log" + str(len(topics)), ["add", "_arr", 32], ["mload", "_arr"]] + topics,
],
typ=None,
pos=getpos(expr),
)
placeholder = context.new_placeholder(args[1].typ)
placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location="memory")
copier = make_byte_array_copier(
placeholder_node,
LLLnode.from_list("_sub", typ=args[1].typ, location=args[1].location),
pos=getpos(expr),
)
return LLLnode.from_list(
[
"with",
"_sub",
args[1],
[
"seq",
copier,
[
"log" + str(len(topics)),
["add", placeholder_node, 32],
["mload", placeholder_node],
]
+ topics,
],
],
typ=None,
pos=getpos(expr),
)
class BitwiseAnd(_SimpleBuiltinFunction):
_id = "bitwise_and"
_inputs = [("x", Uint256Definition()), ("y", Uint256Definition())]
_return_type = Uint256Definition()
def evaluate(self, node):
validate_call_args(node, 2)
for arg in node.args:
if not isinstance(arg, vy_ast.Num):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2 ** 256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = node.args[0].value & node.args[1].value
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return LLLnode.from_list(
["and", args[0], args[1]], typ=BaseType("uint256"), pos=getpos(expr)
)
class BitwiseOr(_SimpleBuiltinFunction):
_id = "bitwise_or"
_inputs = [("x", Uint256Definition()), ("y", Uint256Definition())]
_return_type = Uint256Definition()
def evaluate(self, node):
validate_call_args(node, 2)
for arg in node.args:
if not isinstance(arg, vy_ast.Num):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2 ** 256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = node.args[0].value | node.args[1].value
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return LLLnode.from_list(
["or", args[0], args[1]], typ=BaseType("uint256"), pos=getpos(expr)
)
class BitwiseXor(_SimpleBuiltinFunction):
_id = "bitwise_xor"
_inputs = [("x", Uint256Definition()), ("y", Uint256Definition())]
_return_type = Uint256Definition()
def evaluate(self, node):
validate_call_args(node, 2)
for arg in node.args:
if not isinstance(arg, vy_ast.Num):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2 ** 256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = node.args[0].value ^ node.args[1].value
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return LLLnode.from_list(
["xor", args[0], args[1]], typ=BaseType("uint256"), pos=getpos(expr)
)
class BitwiseNot(_SimpleBuiltinFunction):
_id = "bitwise_not"
_inputs = [("x", Uint256Definition())]
_return_type = Uint256Definition()
def evaluate(self, node):
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Num):
raise UnfoldableNode
value = node.args[0].value
if value < 0 or value >= 2 ** 256:
raise InvalidLiteral("Value out of range for uint256", node.args[0])
value = (2 ** 256 - 1) - value
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return LLLnode.from_list(["not", args[0]], typ=BaseType("uint256"), pos=getpos(expr))
class Shift(_SimpleBuiltinFunction):
_id = "shift"
_inputs = [("x", Uint256Definition()), ("_shift", Int128Definition())]
_return_type = Uint256Definition()
def evaluate(self, node):
validate_call_args(node, 2)
if [i for i in node.args if not isinstance(i, vy_ast.Num)]:
raise UnfoldableNode
value, shift = [i.value for i in node.args]
if value < 0 or value >= 2 ** 256:
raise InvalidLiteral("Value out of range for uint256", node.args[0])
if shift < -(2 ** 127) or shift >= 2 ** 127:
raise InvalidLiteral("Value out of range for int128", node.args[1])
if shift < 0:
value = value >> -shift
else:
value = (value << shift) % (2 ** 256)
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
if args[1].typ.is_literal:
shift_abs = abs(args[1].value)
else:
shift_abs = ["sub", 0, "_s"]
if version_check(begin="constantinople"):
left_shift = ["shl", "_s", "_v"]
right_shift = ["shr", shift_abs, "_v"]
else:
# If second argument is positive, left-shift so multiply by a power of two
# If it is negative, divide by a power of two
# node that if the abs of the second argument >= 256, then in the EVM
# 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0
left_shift = ["mul", "_v", ["exp", 2, "_s"]]
right_shift = ["div", "_v", ["exp", 2, shift_abs]]
if not args[1].typ.is_literal:
node_list = ["if", ["slt", "_s", 0], right_shift, left_shift]
elif args[1].value >= 0:
node_list = left_shift
else:
node_list = right_shift
return LLLnode.from_list(
["with", "_v", args[0], ["with", "_s", args[1], node_list]],
typ=BaseType("uint256"),
pos=getpos(expr),
)
class _AddMulMod(_SimpleBuiltinFunction):
_inputs = [("a", Uint256Definition()), ("b", Uint256Definition()), ("c", Uint256Definition())]
_return_type = Uint256Definition()
def evaluate(self, node):
validate_call_args(node, 3)
if isinstance(node.args[2], vy_ast.Num) and node.args[2].value == 0:
raise ZeroDivisionException("Modulo by 0", node.args[2])
for arg in node.args:
if not isinstance(arg, vy_ast.Num):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2 ** 256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = self._eval_fn(node.args[0].value, node.args[1].value) % node.args[2].value
return vy_ast.Int.from_node(node, value=value)
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
return LLLnode.from_list(
["seq", ["assert", args[2]], [self._opcode, args[0], args[1], args[2]]],
typ=BaseType("uint256"),
pos=getpos(expr),
)
class AddMod(_AddMulMod):
_id = "uint256_addmod"
_eval_fn = operator.add
_opcode = "addmod"
class MulMod(_AddMulMod):
_id = "uint256_mulmod"
_eval_fn = operator.mul
_opcode = "mulmod"
def get_create_forwarder_to_bytecode():
from vyper.compile_lll import assembly_to_evm, num_to_bytearray
code_a = [
"PUSH1",
0x33,
"PUSH1",
0x0C,
"PUSH1",
0x00,
"CODECOPY",
"PUSH1",
0x33,
"PUSH1",
0x00,
"RETURN",
"CALLDATASIZE",
"PUSH1",
0x00,
"PUSH1",
0x00,
"CALLDATACOPY",
"PUSH2",
num_to_bytearray(0x1000),
"PUSH1",
0x00,
"CALLDATASIZE",
"PUSH1",
0x00,
"PUSH20", # [address to delegate to]
]
code_b = [
"GAS",
"DELEGATECALL",
"PUSH1",
0x2C, # jumpdest of whole program.
"JUMPI",
"PUSH1",
0x0,
"DUP1",
"REVERT",
"JUMPDEST",
"PUSH2",
num_to_bytearray(0x1000),
"PUSH1",
0x00,
"RETURN",
]
return assembly_to_evm(code_a)[0] + (b"\x00" * 20) + assembly_to_evm(code_b)[0]
class CreateForwarderTo(_SimpleBuiltinFunction):
_id = "create_forwarder_to"
_inputs = [("target", AddressDefinition())]
_kwargs = {"value": Optional("uint256", zero_value)}
_return_type = AddressDefinition()
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
value = kwargs["value"]
if context.is_constant():
raise StateAccessViolation(
f"Cannot make calls from {context.pp_constancy()}", expr,
)
placeholder = context.new_placeholder(ByteArrayType(96))
kode = get_create_forwarder_to_bytecode()
high = bytes_to_int(kode[:32])
low = bytes_to_int((kode + b"\x00" * 32)[47:79])
return LLLnode.from_list(
[
"seq",
["mstore", placeholder, high],
["mstore", ["add", placeholder, 27], ["mul", args[0], 2 ** 96]],
["mstore", ["add", placeholder, 47], low],
["clamp_nonzero", ["create", value, placeholder, 96]],
],
typ=BaseType("address"),
pos=getpos(expr),
add_gas_estimate=11000,
)
class _MinMax:
_inputs = [("a", ("int128", "decimal", "uint256")), ("b", ("int128", "decimal", "uint256"))]
def evaluate(self, node):
validate_call_args(node, 2)
if not isinstance(node.args[0], type(node.args[1])):
raise UnfoldableNode
if not isinstance(node.args[0], (vy_ast.Decimal, vy_ast.Int)):
raise UnfoldableNode
left, right = (i.value for i in node.args)
if isinstance(left, Decimal) and (
min(left, right) < -(2 ** 127) or max(left, right) >= 2 ** 127
):
raise InvalidType("Decimal value is outside of allowable range", node)
if isinstance(left, int) and (min(left, right) < 0 and max(left, right) >= 2 ** 127):
raise TypeMismatch("Cannot perform action between dislike numeric types", node)
value = self._eval_fn(left, right)
return type(node.args[0]).from_node(node, value=value)
def fetch_call_return(self, node):
validate_call_args(node, 2)
types_list = get_common_types(
*node.args, filter_fn=lambda x: isinstance(x, NumericAbstractType)
)
if not types_list:
raise TypeMismatch
return types_list.pop()
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
def _can_compare_with_uint256(operand):
if operand.typ.typ == "uint256":
return True
elif (
operand.typ.typ == "int128"
and operand.typ.is_literal
and SizeLimits.in_bounds("uint256", operand.value)
): # noqa: E501
return True
return False
comparator = self._opcode
left, right = args[0], args[1]
if left.typ.typ == right.typ.typ:
if left.typ.typ != "uint256":
# if comparing like types that are not uint256, use SLT or SGT
comparator = f"s{comparator}"
o = ["if", [comparator, "_l", "_r"], "_r", "_l"]
otyp = left.typ
otyp.is_literal = False
elif _can_compare_with_uint256(left) and _can_compare_with_uint256(right):
o = ["if", [comparator, "_l", "_r"], "_r", "_l"]
if right.typ.typ == "uint256":
otyp = right.typ
else:
otyp = left.typ
otyp.is_literal = False
else:
raise TypeMismatch(f"Minmax types incompatible: {left.typ.typ} {right.typ.typ}")
return LLLnode.from_list(
["with", "_l", left, ["with", "_r", right, o]], typ=otyp, pos=getpos(expr),
)
class Min(_MinMax):
_id = "min"
_eval_fn = min
_opcode = "gt"
class Max(_MinMax):
_id = "max"
_eval_fn = max
_opcode = "lt"
class Sqrt(_SimpleBuiltinFunction):
_id = "sqrt"
_inputs = [("d", DecimalDefinition())]
_return_type = DecimalDefinition()
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
from vyper.functions.utils import generate_inline_function
arg = args[0]
sqrt_code = """
assert x >= 0.0
z: decimal = 0.0
if x == 0.0:
z = 0.0
else:
z = x / 2.0 + 0.5
y: decimal = x
for i in range(256):
if z == y:
break
y = z
z = (x / z + z) / 2.0
"""
x_type = BaseType("decimal")
placeholder_copy = ["pass"]
# Steal current position if variable is already allocated.
if arg.value == "mload":
new_var_pos = arg.args[0]
# Other locations need to be copied.
else:
new_var_pos = context.new_placeholder(x_type)
placeholder_copy = ["mstore", new_var_pos, arg]
# Create input variables.
variables = {"x": VariableRecord(name="x", pos=new_var_pos, typ=x_type, mutable=False)}
# Generate inline LLL.
new_ctx, sqrt_lll = generate_inline_function(
code=sqrt_code, variables=variables, memory_allocator=context.memory_allocator
)
return LLLnode.from_list(
[
"seq_unchecked",
placeholder_copy, # load x variable
sqrt_lll,
["mload", new_ctx.vars["z"].pos], # unload z variable into the stack,
],
typ=BaseType("decimal"),
pos=getpos(expr),
)
class Empty:
_id = "empty"
_inputs = [("typename", "*")]
def fetch_call_return(self, node):
validate_call_args(node, 1)
type_ = get_type_from_annotation(node.args[0], DataLocation.MEMORY)
return type_
@validate_inputs
def build_LLL(self, expr, args, kwargs, context):
output_type = context.parse_type(expr.args[0], expr.args[0])
return LLLnode(None, typ=output_type, pos=getpos(expr))
DISPATCH_TABLE = {
"floor": Floor(),
"ceil": Ceil(),
"convert": Convert(),
"slice": Slice(),
"len": Len(),
"concat": Concat(),
"sha256": Sha256(),
"method_id": MethodID(),
"keccak256": Keccak256(),
"ecrecover": ECRecover(),
"ecadd": ECAdd(),
"ecmul": ECMul(),
"extract32": Extract32(),
"as_wei_value": AsWeiValue(),
"raw_call": RawCall(),
"blockhash": BlockHash(),
"bitwise_and": BitwiseAnd(),
"bitwise_or": BitwiseOr(),
"bitwise_xor": BitwiseXor(),
"bitwise_not": BitwiseNot(),
"uint256_addmod": AddMod(),
"uint256_mulmod": MulMod(),
"sqrt": Sqrt(),
"shift": Shift(),
"create_forwarder_to": CreateForwarderTo(),
"min": Min(),
"max": Max(),
"empty": Empty(),
}
STMT_DISPATCH_TABLE = {
"send": Send(),
"selfdestruct": SelfDestruct(),
"raw_call": RawCall(),
"raw_log": RawLog(),
"create_forwarder_to": CreateForwarderTo(),
}
BUILTIN_FUNCTIONS = {**STMT_DISPATCH_TABLE, **DISPATCH_TABLE}.keys()
def get_builtin_functions():
return {**STMT_DISPATCH_TABLE, **DISPATCH_TABLE}
| 34.252537
| 100
| 0.541771
|
cf4f63b23ffc80a6b4338ddef351edf4708ef429
| 6,272
|
py
|
Python
|
deepxde/geometry/geometry_nd.py
|
Anilith/deepxde
|
d3a411a8023dba176b1489567ebab549ac6e7ea2
|
[
"Apache-2.0"
] | null | null | null |
deepxde/geometry/geometry_nd.py
|
Anilith/deepxde
|
d3a411a8023dba176b1489567ebab549ac6e7ea2
|
[
"Apache-2.0"
] | null | null | null |
deepxde/geometry/geometry_nd.py
|
Anilith/deepxde
|
d3a411a8023dba176b1489567ebab549ac6e7ea2
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from scipy import stats
from sklearn import preprocessing
from .geometry import Geometry
from .sampler import sample
from .. import config
class Hypercube(Geometry):
def __init__(self, xmin, xmax):
if len(xmin) != len(xmax):
raise ValueError("Dimensions of xmin and xmax do not match.")
if np.any(np.array(xmin) >= np.array(xmax)):
raise ValueError("xmin >= xmax")
self.xmin = np.array(xmin, dtype=config.real(np))
self.xmax = np.array(xmax, dtype=config.real(np))
self.side_length = self.xmax - self.xmin
super(Hypercube, self).__init__(
len(xmin), (self.xmin, self.xmax), np.linalg.norm(self.side_length)
)
self.volume = np.prod(self.side_length)
def inside(self, x):
return np.logical_and(
np.all(x >= self.xmin, axis=-1), np.all(x <= self.xmax, axis=-1)
)
def on_boundary(self, x):
_on_boundary = np.logical_or(
np.any(np.isclose(x, self.xmin), axis=-1),
np.any(np.isclose(x, self.xmax), axis=-1),
)
return np.logical_and(self.inside(x), _on_boundary)
def boundary_normal(self, x):
_n = -np.isclose(x, self.xmin).astype(config.real(np)) + np.isclose(
x, self.xmax
)
# For vertices, the normal is averaged for all directions
idx = np.count_nonzero(_n, axis=-1) > 1
if np.any(idx):
print(
f"Warning: {self.__class__.__name__} boundary_normal called on vertices. "
"You may use PDE(..., exclusions=...) to exclude the vertices."
)
l = np.linalg.norm(_n[idx], axis=-1, keepdims=True)
_n[idx] /= l
return _n
def uniform_points(self, n, boundary=True):
dx = (self.volume / n) ** (1 / self.dim)
xi = []
for i in range(self.dim):
ni = int(np.ceil(self.side_length[i] / dx))
if boundary:
xi.append(
np.linspace(
self.xmin[i], self.xmax[i], num=ni, dtype=config.real(np)
)
)
else:
xi.append(
np.linspace(
self.xmin[i],
self.xmax[i],
num=ni + 1,
endpoint=False,
dtype=config.real(np),
)[1:]
)
x = np.array(list(itertools.product(*xi)))
if n != len(x):
print(
"Warning: {} points required, but {} points sampled.".format(n, len(x))
)
return x
def random_points(self, n, random="pseudo"):
x = sample(n, self.dim, random)
return (self.xmax - self.xmin) * x + self.xmin
def random_boundary_points(self, n, random="pseudo"):
x = sample(n, self.dim, random)
# Randomly pick a dimension
rng = np.random.default_rng()
rand_dim = rng.integers(self.dim, size=n)
# Replace value of the randomly picked dimension with the nearest boundary value (0 or 1)
x[np.arange(n), rand_dim] = np.round(x[np.arange(n), rand_dim])
return (self.xmax - self.xmin) * x + self.xmin
def periodic_point(self, x, component):
y = np.copy(x)
_on_xmin = np.isclose(y[:, component], self.xmin[component])
_on_xmax = np.isclose(y[:, component], self.xmax[component])
y[:, component][_on_xmin] = self.xmax[component]
y[:, component][_on_xmax] = self.xmin[component]
return y
class Hypersphere(Geometry):
def __init__(self, center, radius):
self.center = np.array(center, dtype=config.real(np))
self.radius = radius
super(Hypersphere, self).__init__(
len(center), (self.center - radius, self.center + radius), 2 * radius
)
self._r2 = radius ** 2
def inside(self, x):
return np.linalg.norm(x - self.center, axis=-1) <= self.radius
def on_boundary(self, x):
return np.isclose(np.linalg.norm(x - self.center, axis=-1), self.radius)
def distance2boundary_unitdirn(self, x, dirn):
"""https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection"""
xc = x - self.center
ad = np.dot(xc, dirn)
return -ad + (ad ** 2 - np.sum(xc * xc, axis=-1) + self._r2) ** 0.5
def distance2boundary(self, x, dirn):
return self.distance2boundary_unitdirn(x, dirn / np.linalg.norm(dirn))
def mindist2boundary(self, x):
return np.amin(self.radius - np.linalg.norm(x - self.center, axis=-1))
def boundary_normal(self, x):
_n = x - self.center
l = np.linalg.norm(_n, axis=-1, keepdims=True)
_n = _n / l * np.isclose(l, self.radius)
return _n
def random_points(self, n, random="pseudo"):
"""https://math.stackexchange.com/questions/87230/picking-random-points-in-the-volume-of-sphere-with-uniform-probability"""
if random == "pseudo":
U = np.random.rand(n, 1)
X = np.random.normal(size=(n, self.dim))
else:
rng = sample(n, self.dim + 1, random)
U, X = rng[:, 0:1], rng[:, 1:]
X = stats.norm.ppf(X)
X = preprocessing.normalize(X)
X = U ** (1 / self.dim) * X
return self.radius * X + self.center
def random_boundary_points(self, n, random="pseudo"):
"""http://mathworld.wolfram.com/HyperspherePointPicking.html"""
if random == "pseudo":
X = np.random.normal(size=(n, self.dim)).astype(config.real(np))
else:
U = sample(n, self.dim, random)
X = stats.norm.ppf(U)
X = preprocessing.normalize(X)
return self.radius * X + self.center
def background_points(self, x, dirn, dist2npt, shift):
dirn = dirn / np.linalg.norm(dirn)
dx = self.distance2boundary_unitdirn(x, -dirn)
n = max(dist2npt(dx), 1)
h = dx / n
pts = x - np.arange(-shift, n - shift + 1)[:, None] * h * dirn
return pts
| 36.465116
| 131
| 0.558992
|
e45558d613fb6b83c513fbc22974928656422990
| 1,748
|
py
|
Python
|
backend/home/models.py
|
crowdbotics-apps/jonathan-kim-teenge-33637
|
be6c1dabd7534d41d7f0b776fff0b12d66924955
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/models.py
|
crowdbotics-apps/jonathan-kim-teenge-33637
|
be6c1dabd7534d41d7f0b776fff0b12d66924955
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/models.py
|
crowdbotics-apps/jonathan-kim-teenge-33637
|
be6c1dabd7534d41d7f0b776fff0b12d66924955
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from users.models import User
class BaseModel(models.Model):
created = models.DateTimeField(auto_now_add=True, help_text='Date Created')
class Meta:
abstract = True
ordering = ('-created',)
class Wish(BaseModel):
user = models.ForeignKey(User, on_delete=models.CASCADE, default=None)
location = models.CharField(_("Location"), null=True, max_length=255)
golfers = models.IntegerField(_("Number of Golfers"), default=0)
from_date = models.DateTimeField(_('From Date'), default=timezone.now)
to_date = models.DateTimeField(_('To Date'), default=False)
is_before_selected = models.BooleanField(default=False)
# def __str__(self):
# return self.title
class Course(BaseModel):
location = models.CharField(_("Golf course Location"), blank=True, null=True, max_length=255)
tee_datetime = models.DateTimeField(_('Date and time'), default=timezone.now)
no_of_selected_players = models.IntegerField(_("Number of selected players"), default=0)
no_of_slots_available = models.IntegerField(_("All slots available for selected parameters"), default=0)
no_of_max_players = models.IntegerField(_("Number of maximum players for opened slot"), default=0)
website_link = models.CharField(_("Website Link"), blank=True, null=True, max_length=255)
class Alert(BaseModel):
wish = models.ForeignKey('Wish', on_delete=models.CASCADE, default=None)
course = models.ForeignKey('Course', on_delete=models.CASCADE, default=None)
user = models.ForeignKey(User, on_delete=models.CASCADE, default=None)
is_read = models.BooleanField(default=False)
| 40.651163
| 108
| 0.739703
|
cef4c833ffa86075777c45e23730d2ec13ae240f
| 1,886
|
py
|
Python
|
ormar/exceptions.py
|
naterenegar/ormar
|
ef3edd4594867569a2fd72971fde9666cff42395
|
[
"MIT"
] | null | null | null |
ormar/exceptions.py
|
naterenegar/ormar
|
ef3edd4594867569a2fd72971fde9666cff42395
|
[
"MIT"
] | null | null | null |
ormar/exceptions.py
|
naterenegar/ormar
|
ef3edd4594867569a2fd72971fde9666cff42395
|
[
"MIT"
] | null | null | null |
"""
Gathers all exceptions thrown by ormar.
"""
class AsyncOrmException(Exception):
"""
Base ormar Exception
"""
pass
class ModelDefinitionError(AsyncOrmException):
"""
Raised for errors related to the model definition itself:
* setting @property_field on method with arguments other than func(self)
* defining a Field without required parameters
* defining a model with more than one primary_key
* defining a model without primary_key
* setting primary_key column as pydantic_only
"""
pass
class ModelError(AsyncOrmException):
"""
Raised for initialization of model with non-existing field keyword.
"""
pass
class NoMatch(AsyncOrmException):
"""
Raised for database queries that has no matching result (empty result).
"""
pass
class MultipleMatches(AsyncOrmException):
"""
Raised for database queries that should return one row (i.e. get, first etc.)
but has multiple matching results in response.
"""
pass
class QueryDefinitionError(AsyncOrmException):
"""
Raised for errors in query definition:
* using contains or icontains filter with instance of the Model
* using Queryset.update() without filter and setting each flag to True
* using Queryset.delete() without filter and setting each flag to True
"""
pass
class RelationshipInstanceError(AsyncOrmException):
pass
class ModelPersistenceError(AsyncOrmException):
"""
Raised for update of models without primary_key set (cannot retrieve from db)
or for saving a model with relation to unsaved model (cannot extract fk value).
"""
pass
class SignalDefinitionError(AsyncOrmException):
"""
Raised when non callable receiver is passed as signal callback.
"""
pass
| 22.452381
| 87
| 0.678155
|
f4ee59fe1d8c16455def9c810e17ea17559a11d2
| 383
|
py
|
Python
|
polls/migrations/0002_question_author.py
|
codermahiuddin/Azure
|
6f2c7d695807e9f9ee51cd297a744350148b0987
|
[
"MIT"
] | null | null | null |
polls/migrations/0002_question_author.py
|
codermahiuddin/Azure
|
6f2c7d695807e9f9ee51cd297a744350148b0987
|
[
"MIT"
] | null | null | null |
polls/migrations/0002_question_author.py
|
codermahiuddin/Azure
|
6f2c7d695807e9f9ee51cd297a744350148b0987
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-06-19 02:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='question',
name='author',
field=models.CharField(default='', max_length=50),
),
]
| 20.157895
| 62
| 0.584856
|
2888107e369e410e4ac04c657ce44a44991aae62
| 870
|
py
|
Python
|
sdks/python/apache_beam/version.py
|
h4rr21/beam
|
e5bb291f95bd8cbb7942ed2a3b007df70eaf71be
|
[
"Apache-2.0"
] | 1
|
2018-08-11T14:06:57.000Z
|
2018-08-11T14:06:57.000Z
|
sdks/python/apache_beam/version.py
|
h4rr21/beam
|
e5bb291f95bd8cbb7942ed2a3b007df70eaf71be
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/version.py
|
h4rr21/beam
|
e5bb291f95bd8cbb7942ed2a3b007df70eaf71be
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK version information and utilities."""
__version__ = '2.7.0.dev'
| 39.545455
| 74
| 0.766667
|
687b8c32e10697c1c06bc9c1d20a0f64723c6890
| 485
|
py
|
Python
|
Exercise/ExerciseTools.py
|
Mihai925/EduCoding-Legacy
|
7c6de105deb186c3442f8d7f9f1b9f99708f8fb6
|
[
"MIT"
] | null | null | null |
Exercise/ExerciseTools.py
|
Mihai925/EduCoding-Legacy
|
7c6de105deb186c3442f8d7f9f1b9f99708f8fb6
|
[
"MIT"
] | null | null | null |
Exercise/ExerciseTools.py
|
Mihai925/EduCoding-Legacy
|
7c6de105deb186c3442f8d7f9f1b9f99708f8fb6
|
[
"MIT"
] | null | null | null |
from models import Exercise
class ExerciseTools():
def __init__(self):
pass
def submit_exercise(self, title, description, content):
exercise = Exercise(title=title, description=description, content=content)
exercise.save()
return True
def get_exercise_by_id(self, ex_id):
exercise = None
try:
exercise = Exercise.objects.get(ex_id=ex_id)
except Exception as e:
pass
return exercise
| 25.526316
| 82
| 0.63299
|
171587089721140d33c4364e96d05171bd9cdac5
| 388
|
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/farm/FarmFactory.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 1
|
2020-10-05T08:53:57.000Z
|
2020-10-05T08:53:57.000Z
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/farm/FarmFactory.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 17
|
2019-11-14T08:41:37.000Z
|
2020-05-27T09:23:51.000Z
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/farm/FarmFactory.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
from Jumpscale import j
JSBASE = j.baseclasses.object
from .Farm import Farm
class FarmFactory(JSBASE):
__jslocation__ = "j.sal_zos.farm"
def get(self, farmer_iyo_org):
"""
Get sal for farm
Arguments:
farmer_iyo_org: the farmer iyo organisation
Returns:
the sal layer
"""
return Farm(farmer_iyo_org)
| 16.869565
| 55
| 0.608247
|
d1004ac49b05b1a76c802adad90b763487fc89b0
| 2,884
|
py
|
Python
|
is_core/forms/generic.py
|
zzuzzy/django-is-core
|
3f87ec56a814738683c732dce5f07e0328c2300d
|
[
"BSD-3-Clause"
] | null | null | null |
is_core/forms/generic.py
|
zzuzzy/django-is-core
|
3f87ec56a814738683c732dce5f07e0328c2300d
|
[
"BSD-3-Clause"
] | null | null | null |
is_core/forms/generic.py
|
zzuzzy/django-is-core
|
3f87ec56a814738683c732dce5f07e0328c2300d
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.forms.models import ModelForm
from django.contrib.contenttypes.forms import BaseGenericInlineFormSet as OriginBaseGenericInlineFormSet
from is_core.forms.models import smartmodelformset_factory
from is_core.forms.formsets import BaseFormSetMixin
class BaseGenericInlineFormSet(BaseFormSetMixin, OriginBaseGenericInlineFormSet):
pass
def smart_generic_inlineformset_factory(model, request, form=ModelForm, formset=BaseGenericInlineFormSet,
ct_field='content_type', fk_field='object_id', fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, min_num=None, max_num=None,
formfield_callback=None, widgets=None, validate_min=False, validate_max=False,
localized_fields=None, labels=None, help_texts=None, error_messages=None,
formreadonlyfield_callback=None, readonly_fields=None, for_concrete_model=True,
readonly=False):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.related_model != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'min_num': min_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'formreadonlyfield_callback': formreadonlyfield_callback,
'readonly_fields': readonly_fields,
'readonly': readonly,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = smartmodelformset_factory(model, request, **kwargs)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
| 43.044776
| 119
| 0.664702
|
263a0c363f242ed08103117d68a53920b69d6be1
| 1,878
|
py
|
Python
|
electrum/plot.py
|
padenmoss/electrum-ftc
|
dfa3f33350225590db8afb391cd9005bf70253e4
|
[
"MIT"
] | 11
|
2018-03-27T06:35:30.000Z
|
2021-05-14T19:32:36.000Z
|
electrum/plot.py
|
padenmoss/electrum-ftc
|
dfa3f33350225590db8afb391cd9005bf70253e4
|
[
"MIT"
] | 108
|
2018-03-13T10:43:26.000Z
|
2021-05-14T21:26:06.000Z
|
electrum/plot.py
|
padenmoss/electrum-ftc
|
dfa3f33350225590db8afb391cd9005bf70253e4
|
[
"MIT"
] | 15
|
2018-02-24T18:37:17.000Z
|
2021-08-23T18:17:23.000Z
|
import datetime
from collections import defaultdict
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as md
from .i18n import _
from .bitcoin import COIN
class NothingToPlotException(Exception):
def __str__(self):
return _("Nothing to plot.")
def plot_history(history):
if len(history) == 0:
raise NothingToPlotException()
hist_in = defaultdict(int)
hist_out = defaultdict(int)
for item in history:
if not item['confirmations']:
continue
if item['timestamp'] is None:
continue
value = item['value'].value/COIN
date = item['date']
datenum = int(md.date2num(datetime.date(date.year, date.month, 1)))
if value > 0:
hist_in[datenum] += value
else:
hist_out[datenum] -= value
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax = plt.gca()
plt.ylabel('FTC')
plt.xlabel('Month')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].set_title('Monthly Volume')
xfmt = md.DateFormatter('%Y-%m')
ax.xaxis.set_major_formatter(xfmt)
width = 20
r1 = None
r2 = None
dates_values = list(zip(*sorted(hist_in.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r1 = axarr[0].bar(dates, values, width, label='incoming')
axarr[0].legend(loc='upper left')
dates_values = list(zip(*sorted(hist_out.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r2 = axarr[1].bar(dates, values, width, color='r', label='outgoing')
axarr[1].legend(loc='upper left')
if r1 is None and r2 is None:
raise NothingToPlotException()
return plt
| 29.34375
| 76
| 0.633653
|
d4b3e525c750bc489eddc2ac60f9cb79b13e8ffc
| 332
|
py
|
Python
|
tests/test_docs.py
|
LaggAt/hovercraft
|
617bd6102ef5e6fc8ed2942bbd4d514e15715fdf
|
[
"MIT"
] | 1
|
2017-12-20T07:20:11.000Z
|
2017-12-20T07:20:11.000Z
|
tests/test_docs.py
|
LaggAt/hovercraft
|
617bd6102ef5e6fc8ed2942bbd4d514e15715fdf
|
[
"MIT"
] | 1
|
2020-07-11T01:05:08.000Z
|
2020-07-11T01:05:08.000Z
|
tests/test_docs.py
|
wearypossum4770/hovercraft
|
d9f63bfdfe1519c4d7a81697ee066e49dc26a30b
|
[
"MIT"
] | null | null | null |
import manuel.doctest
import manuel.codeblock
import manuel.testing
import unittest
def additional_tests():
m = manuel.doctest.Manuel()
m += manuel.codeblock.Manuel()
return manuel.testing.TestSuite(m, r'../docs/examples/tutorial.rst')
if __name__ == '__main__':
unittest.TextTestRunner().run(additional_tests())
| 23.714286
| 72
| 0.740964
|
c9ca0e175b53adb9d441b53580e90c0f2f11e2b6
| 100,313
|
py
|
Python
|
pyNastran/bdf/cards/test/test_aero.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 1
|
2021-08-02T09:49:24.000Z
|
2021-08-02T09:49:24.000Z
|
pyNastran/bdf/cards/test/test_aero.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/bdf/cards/test/test_aero.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# pylint: disable=R0914
"""tests aero cards"""
import os
from collections import defaultdict
import unittest
from io import StringIO
from typing import Tuple, Optional, Any
import numpy as np
from cpylog import SimpleLogger
import pyNastran
from pyNastran.bdf.bdf import BDF, CORD2R, BDFCard, SET1, read_bdf
from pyNastran.bdf.test.test_bdf import run_bdf
from pyNastran.bdf.cards.aero.aero import (
AEFACT, AELIST, AEPARM,
CAERO1, CAERO2, CAERO3, CAERO4, #CAERO5,
PAERO1, PAERO2, PAERO4, #PAERO3, PAERO5,
AESURF, AESURFS,
AELINK, AECOMP,
SPLINE1, SPLINE2, #, SPLINE3, SPLINE4, SPLINE5
build_caero_paneling
)
from pyNastran.bdf.cards.aero.dynamic_loads import AERO, FLFACT, FLUTTER, GUST, MKAERO1, MKAERO2
from pyNastran.bdf.cards.aero.static_loads import AESTAT, AEROS, CSSCHD, TRIM, TRIM2, DIVERG
from pyNastran.bdf.cards.test.utils import save_load_deck
IS_MATPLOTLIB = False
if IS_MATPLOTLIB:
import matplotlib.pyplot as plt
ROOTPATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(ROOTPATH, '..', 'models')
#test_path = os.path.join(ROOTPATH, 'bdf', 'cards', 'test')
COMMENT_BAD = 'this is a bad comment'
COMMENT_GOOD = 'this is a good comment\n'
class TestAero(unittest.TestCase):
"""
The Aero cards are:
* AEFACT
* AELINK
* AELIST
* AEPARM
* AESTAT
* AESURF / AESURFS
* AERO / AEROS
* CSSCHD
* CAERO1 / CAERO2 / CAERO3 / CAERO4 / CAERO5
* FLFACT
* FLUTTER
* GUST
* MKAERO1 / MKAERO2
* PAERO1 / PAERO2 / PAERO3
* SPLINE1 / SPLINE2 / SPLINE4 / SPLINE5
"""
def test_aestat_1(self):
log = SimpleLogger(level='warning')
model = BDF(log=log)
lines = ['AESTAT 502 PITCH']
card = model._process_card(lines)
card = BDFCard(card)
size = 8
card = AESTAT.add_card(card)
card.write_card(size, 'dummy')
card.raw_fields()
def test_aecomp_1(self):
"""checks the AECOMP card"""
#sid = 10
#aesid = 0
#lalpha = None
#lmach = None
#lschd = None
#sid = 5
#aesid = 50
#lalpha = 12
#lmach = 15
name = 'WING'
list_type = 'AELIST' # or SET1, CAEROx
aelist_ids = [75, 76]
card = ['AECOMP', name, list_type] + aelist_ids
bdf_card = BDFCard(card, has_none=True)
aecomp1 = AECOMP.add_card(bdf_card, comment='aecomp card')
aecomp1.validate()
aecomp1.write_card()
#label = 'ELEV'
#cid1 = 0
#alid1 = 37
#aesurf = AESURF(aesid, label, cid1, alid1)
#aefact_sid = alid1
#Di = [0., 0.5, 1.]
#aefact_elev = AEFACT(aefact_sid, Di)
#aefact_sid = lalpha
#Di = [0., 5., 10.]
#aefact_alpha = AEFACT(aefact_sid, Di)
#aefact_sid = lmach
#Di = [0., 0.7, 0.8]
#aefact_mach = AEFACT(aefact_sid, Di)
#aefact_sid = lschd
#Di = [0., 15., 30., 45.]
#aefact_delta = AEFACT(aefact_sid, Di)
log = SimpleLogger(level='warning')
model = BDF(log=log)
data = ['AELIST', 75, 1001, 'THRU', 1075, 1101, 'THRU', 1109, 1201, 1202]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
data = ['AELIST', 76, 2000, 'THRU', 2010]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
#model.add_aesurf(aesurf)
#model.add_aefact(aefact_elev)
#model.add_aefact(aefact_alpha)
#model.add_aefact(aefact_mach)
#model.add_aefact(aefact_delta)
aecomp1.safe_cross_reference(model)
aecomp1.uncross_reference()
aecomp1.cross_reference(model)
aecomp1.write_card()
aecomp1.uncross_reference()
aecomp1.write_card()
model.validate()
save_load_deck(model)
#-----------
aecomp2 = AECOMP(name, list_type, aelist_ids, comment='cssch card')
aecomp2.validate()
aecomp2.write_card()
list_type = 'INVALID'
aecomp3 = AECOMP(name, list_type, aelist_ids, comment='cssch card')
with self.assertRaises(RuntimeError):
aecomp3.validate()
name = 'MYCOMP'
list_type = 'AELIST'
lists = 10
model.add_aecomp(name, list_type, lists)
lists = 42.0
with self.assertRaises(TypeError):
AECOMP(name, list_type, lists)
def test_aefact_1(self):
"""checks the AEFACT card"""
data = ['AEFACT', 97, .3, 0.7, 1.0]
log = SimpleLogger(level='warning')
model = BDF(log=log)
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
data = ['AEFACT', 97, .3, 0.7, 1.0]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
data = ['AEFACT', '98', '.3', '0.7', '1.0']
model.add_card(data, data[0], COMMENT_GOOD, is_list=True)
msg = '$this is a bad comment\nAEFACT 97 .3 .7 1.\n'
aefact97 = model.aefacts[97]
aefact98 = model.aefacts[98]
self.assertTrue(all(aefact97.fractions == [.3, .7, 1.0]))
self.assertTrue(all(aefact98.fractions == [.3, .7, 1.0]))
out = aefact97.write_card(8, None)
self.assertEqual(msg, out)
msg = '$this is a good comment\nAEFACT 98 .3 .7 1.\n'
out = aefact98.write_card(8, None)
self.assertEqual(msg, out)
#data = ['AEFACT', 99, .3, 0.7, 1.0, None, 'cat']
#with self.assertRaises(SyntaxError):
#model.add_card(data, data[0], comment_good, is_list=True)
#data = ['AEFACT', 100, .3, 0.7, 1.0, 'cat']
#with self.assertRaises(SyntaxError):
#model.add_card(data, data[0], comment_good, is_list=True)
#data = ['AEFACT', 101, .3, 0.7, 1.0, 2]
#with self.assertRaises(SyntaxError):
#model.add_card(data, data[0], comment_good, is_list=True)
fractions = [1., 2., 3.]
aefact = AEFACT(200, fractions, comment='')
aefact.validate()
aefact.write_card()
#model = BDF()
#aefact.cross_reference(model)
#aefact.write_card()
#aefact.uncross_reference()
#aefact.write_card()
def test_aelink_1(self):
log = SimpleLogger(level='warning')
model = BDF(log=log)
idi = 10
label = 'CS'
independent_labels = ['A', 'B', 'C']
linking_coefficients = [1.0, 2.0]
aelink = AELINK(idi, label, independent_labels, linking_coefficients, comment='')
assert aelink.aelink_id == idi
with self.assertRaises(RuntimeError):
aelink.validate()
str(aelink)
aelink.write_card()
card = ['AELINK', idi, label, independent_labels[0], linking_coefficients[0],
independent_labels[1], linking_coefficients[1], independent_labels[2]]
with self.assertRaises(AssertionError):
model.add_card(card, 'AELINK')
card = ['AELINK', idi, label, independent_labels[0], linking_coefficients[0],
independent_labels[1], linking_coefficients[1]]
model.add_card(card, 'AELINK', comment='cat')
#print(model.aelinks[idi])
assert model.aelinks[idi][0].comment == '$cat\n', 'comment=%r' % str(model.aelinks[idi][0].comment)
#-------------------------------
idi = 11
label = 'LABEL'
independent_labels = ['pig', 'frog', 'dog']
linking_coefficients = []
aelink2 = model.add_aelink(idi, label, independent_labels, linking_coefficients)
with self.assertRaises(RuntimeError):
model.validate()
aelink2.linking_coefficients = [1.0, 2.0, 3.0]
assert aelink2.linking_coefficients == [1., 2., 3.]
#-------------------------------
idi = 'ALWAYS'
label = 'LABEL'
independent_labels = ['pig', 'frog', 'dog']
linking_coefficients = [1.0, 2.0, 3.0]
model.add_aelink(idi, label, independent_labels, linking_coefficients)
model.validate()
model.cross_reference()
def test_aelist_1(self):
"""checks the AELIST card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
data = ['AELIST', 75, 1001, 'THRU', 1075, 1101, 'THRU', 1109, 1201, 1202]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
elements = list(range(1001, 1076)) + list(range(1101, 1110)) + [1201, 1202]
aelist = AELIST(74, elements)
aelist.validate()
aelist.write_card()
aelist75 = model.aelists[75]
#print(aelist.elements)
#print(elements)
self.assertTrue(elements == aelist75.elements)
elements = list(range(1001, 1076)) + list(range(1101, 1110)) + [1108, 1202]
data = ['AELIST', 76, 1001, 'THRU', 1075, 1101, 'THRU', 1109, 1108, 1202]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
aelist76 = model.aelists[76]
#print(aelist76 .elements)
#print(elements)
self.assertFalse(elements == aelist76.elements)
elements = list(set(elements))
elements.sort()
self.assertTrue(elements == aelist76.elements)
elements = [1000, 1000, 1000, 2000, 1000, 2000]
aelist = AELIST(75, elements)
aelist.clean_ids()
str(aelist.write_card())
elements = 42
AELIST(76, elements)
elements = 42.0
with self.assertRaises(TypeError):
AELIST(77, elements)
def test_aeparm_1(self):
"""checks the AEPARM card"""
aeparm_id = 100
aeparm = AEPARM.add_card(BDFCard(['AEPARM', aeparm_id, 'THRUST', 'lb']),
comment='aeparm_comment')
model = BDF(debug=False)
aeparm = model.add_aeparm(aeparm_id, 'THRUST', 'lb', comment='aeparm_comment')
assert aeparm.aeparm_id == aeparm_id
aeparm.validate()
aeparm.cross_reference(None)
aeparm.uncross_reference()
aeparm.safe_cross_reference(None)
aeparm.write_card()
save_load_deck(model)
# def test_aestat_1(self):
# def test_aesurf_1(self):
def test_aesurfs_1(self):
"""checks the AESURFS cards"""
aesid = 6001
label = 'ELEV'
list1 = 6002
list2 = 6003
card = ['AESURFS', aesid, label, None, list1, None, list2]
bdf_card = BDFCard(card, has_none=True)
log = SimpleLogger(level='warning')
model = BDF(log=log)
model.add_card(bdf_card, 'AESURFS', comment='aesurfs',
is_list=True, has_none=True)
aesurfs = AESURFS(aesid, label, list1, list2, comment='aesurfs')
str(aesurfs)
aesurfs.write_card()
model.add_set1(6002, [1, 2, 3])
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
model.validate()
save_load_deck(model)
def test_aero_1(self):
"""checks the AERO card"""
acsid = 0.
velocity = None
cref = 1.0
rho_ref = 1.0
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0, sym_xy=0,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
assert aero.is_symmetric_xy is False
assert aero.is_symmetric_xz is False
assert aero.is_anti_symmetric_xy is False
assert aero.is_anti_symmetric_xz is False
#aero.set_ground_effect(True)
#assert aero.is_symmetric_xy is False
#assert aero.is_symmetric_xz is False
#assert aero.is_anti_symmetric_xy is True
#assert aero.is_anti_symmetric_xz is False
#aero.set_ground_effect(False)
#assert aero.is_symmetric_xy is False
#assert aero.is_symmetric_xz is False
#assert aero.is_anti_symmetric_xy is False
#assert aero.is_anti_symmetric_xz is False
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=1, sym_xy=1,
comment='aero card')
assert aero.is_symmetric_xy is True
assert aero.is_symmetric_xz is True
assert aero.is_anti_symmetric_xy is False
assert aero.is_anti_symmetric_xz is False
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=-1, sym_xy=-1,
comment='aero card')
assert aero.is_symmetric_xy is False
assert aero.is_symmetric_xz is False
assert aero.is_anti_symmetric_xy is True
assert aero.is_anti_symmetric_xz is True
aero.set_ground_effect(True)
def test_aero_2(self):
"""checks the AERO card"""
acsid = 0
velocity = None
cref = 1.0
rho_ref = 1.0
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0., sym_xy=0,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0, sym_xy=0.,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0, sym_xy=0.,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
aero = AERO(velocity, cref, rho_ref, acsid=None, sym_xz=0, sym_xy=0,
comment='aero card')
aero.validate()
aero.write_card()
aero.raw_fields()
model = BDF()
aero.cross_reference(model)
aero.write_card()
aero.raw_fields()
aero.uncross_reference()
aero.write_card()
aero.raw_fields()
def test_aeros_1(self):
"""checks the AEROS card"""
#acsid = 0.
#velocity = None
cref = 1.0
bref = 2.0
sref = 100.
acsid = 0
rcsid = 0
aeros = AEROS.add_card(BDFCard(['AERO', acsid, rcsid, cref, bref, sref]))
aeros = AEROS(cref, bref, sref, acsid, rcsid, sym_xz=0, sym_xy=0,
comment='aeros card')
aeros.validate()
aeros.write_card()
aeros.raw_fields()
acsid = None
rcsid = None
sym_xz = None
sym_xy = None
aeros = AEROS(cref, bref, sref, acsid, rcsid, sym_xz=sym_xz, sym_xy=sym_xy,
comment='aeros card')
aeros.validate()
aeros.write_card()
aeros.raw_fields()
cref = 1
bref = 2
sref = 3
acsid = 42.
rcsid = 43.
sym_xz = 44.
sym_xy = 45.
aeros = AEROS(cref, bref, sref, acsid, rcsid, sym_xz=sym_xz, sym_xy=sym_xy)
with self.assertRaises(TypeError):
aeros.validate()
def test_caero1_paneling_nspan_nchord_1(self):
"""checks the CAERO1/PAERO1/AEFACT card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 10000000
caero = model.add_caero1(eid, pid, igroup, p1, x12, p4, x43,
cp=0, nspan=3, lspan=0, nchord=2, lchord=0, comment='')
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = 12 # 4*3
nelements_expected = 6 # 2*3
x, y = caero.xy
chord_expected = np.array([0., 0.5, 1.])
span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
assert np.allclose(x, chord_expected)
assert np.allclose(y, span_expected)
assert npoints_expected == npoints
assert nelements_expected == nelements
def test_caero1_paneling_nspan_lchord(self):
"""checks the CAERO1/PAERO1/AEFACT card"""
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 10000000
chord_aefact_id = 10000
model.add_aefact(chord_aefact_id, [0., 0.5, 1.0])
caero = model.add_caero1(eid, pid, igroup, p1, x12, p4, x43,
cp=0,
nspan=3, lspan=0,
nchord=0, lchord=chord_aefact_id, comment='')
model.cross_reference()
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = 12 # 4*3
nelements_expected = 6 # 2*3
assert npoints_expected == npoints
assert nelements_expected == nelements
del model.caeros[eid]
del model.aefacts[chord_aefact_id]
points, elements = caero.panel_points_elements()
x, y = caero.xy
chord_expected = np.array([0., 0.5, 1.])
span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
assert np.allclose(x, chord_expected)
assert np.allclose(y, span_expected)
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
def test_caero1_paneling_transpose(self):
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
#['CAERO1', '2000', '2000', '0', '15', '10', '1', '0', None, '7.314386', '0.', '-0.18288', '1.463854', '8.222755', '1.573341', '-0.18288', '0.365963']
#card_lines = [
#'CAERO1,2000,2000,0,15,10,1,0,1',
#'+,7.314386,0.,-0.18288,1.463854,8.222755,1.573341,-0.18288,0.365963',
#]
#model.add_card(card_lines, 'CAERO1', comment='', ifile=None, is_list=False, has_none=True)
eid = 2000
#caero = model.caeros[eid]
#print(caero.get_stats())
pid = 1
igroup = 1
p1 = [7.3, 0., 0.]
p4 = [8.2, 1.6, 0.]
x12 = 1.4
x43 = 0.3
model.add_paero1(pid, caero_body_ids=None, comment='')
caero = model.add_caero1(
eid, pid, igroup, p1, x12, p4, x43,
cp=0, nspan=5, lspan=0, nchord=2, lchord=0, comment='')
caero.validate()
x, y = caero.xy
x_expected = np.array([0., 0.5, 1.])
y_expected = np.array([0., 0.2, 0.4, 0.6, 0.8, 1.])
assert np.allclose(x, x_expected)
assert np.allclose(y, y_expected)
#print(caero.get_stats())
caero.cross_reference(model)
all_control_surface_name, caero_control_surfaces, out = build_caero_paneling(model)
box_id_to_caero_element_map_expected = {
2000: np.array([0, 3, 4, 1]),
2001: np.array([1, 4, 5, 2]),
2002: np.array([3, 6, 7, 4]),
2003: np.array([4, 7, 8, 5]),
2004: np.array([ 6, 9, 10, 7]),
2005: np.array([ 7, 10, 11, 8]),
2006: np.array([ 9, 12, 13, 10]),
2007: np.array([10, 13, 14, 11]),
2008: np.array([12, 15, 16, 13]),
2009: np.array([13, 16, 17, 14]),
}
for key, data in out.box_id_to_caero_element_map.items():
assert np.array_equal(data, box_id_to_caero_element_map_expected[key])
all_control_surface_name, caero_control_surfaces, out = build_caero_paneling(model)
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
#x = 1
def test_caero1_paneling_multi(self):
"""checks the CAERO1/PAERO1/AEFACT card"""
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 1000
chord_aefact_id = 10000
model.add_aefact(chord_aefact_id, [0., 0.5, 1.0])
caero1a = model.add_caero1(eid, pid, igroup, p1, x12, p4, x43,
cp=0,
nspan=3, lspan=0,
nchord=0, lchord=chord_aefact_id, comment='')
eid = 2000
p1 = [1., 16., 0.]
p4 = [1., 30., 0.]
x12 = 1.
x43 = 1.
caero1b = model.add_caero1(eid, pid, igroup, p1, x12, p4, x43,
cp=0,
nspan=3, lspan=0,
nchord=2, lchord=0, comment='')
model.cross_reference()
npoints, nelements = caero1a.get_npanel_points_elements()
npoints_expected = 12 # 4*3
nelements_expected = 6 # 2*3
assert npoints_expected == npoints
assert nelements_expected == nelements
del model.caeros[eid]
del model.aefacts[chord_aefact_id]
#points, elements = caero.panel_points_elements()
#x, y = caero.xy
#chord_expected = np.array([0., 0.5, 1.])
#span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
#assert np.allclose(x, chord_expected)
#assert np.allclose(y, span_expected)
if IS_MATPLOTLIB:
caero1a.plot(ax)
caero1b.plot(ax)
fig.show()
x = 1
def test_caero1_paneling_nspan_nchord_2(self):
"""checks the CAERO1/PAERO1/AEFACT card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
# basic
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
fig, ax = _setup_aero_plot(fig_id=None)
unused_paero = model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 1000
aelist_id = 10
aesurf_id = 10
caero = model.add_caero1(eid, pid, igroup, p1, x12, p4, x43,
cp=0, nspan=3, lspan=0, nchord=1, lchord=0, comment='')
x, y = caero.xy
chord_expected = np.array([0., 1.])
span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
assert np.allclose(x, chord_expected)
assert np.allclose(y, span_expected)
elements = [1001, 1003, 1005]
unused_aelist = model.add_aelist(aelist_id, elements)
label = 'FLAP'
cid1 = 0
alid1 = aelist_id
unused_aesurf = model.add_aesurf(
aesurf_id, label, cid1, alid1, cid2=None, alid2=None,
eff=1.0, ldw='LDW', crefc=1.0, crefs=1.0, pllim=-np.pi/2., pulim=np.pi/2.,
hmllim=None, hmulim=None, tqllim=None, tqulim=None, comment='')
model.cross_reference()
points, elements = caero.panel_points_elements()
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
def test_caero3_paneling(self):
"""checks the CAERO3/PAERO1/AEFACT card"""
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
#model.add_paero1(pid, caero_body_ids=None, comment='')
ncontrol_surfaces = 0
nbox = 7
x = []
y = []
model.add_paero3(pid, nbox, ncontrol_surfaces, x, y, comment='')
eid = 1000
list_w = None
caero = model.add_caero3(eid, pid, list_w, p1, x12, p4, x43,
cp=0, list_c1=None, list_c2=None, comment='')
caero.validate()
caero.cross_reference(model)
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = 24 # (2+1)*(7+1)
nelements_expected = 14 # 2*7
# hardcoded
nchord_elements = 2
nchord_points = nchord_elements + 1
x2, y2 = caero.xy
#chord_expected = np.array([0., 0.5, 1.])
#span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
#assert np.allclose(x, chord_expected)
#assert np.allclose(y, span_expected)
assert npoints_expected == npoints
assert nelements_expected == nelements
points, elements = caero.panel_points_elements()
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
x = 1
def test_caero4_paneling(self):
"""checks the CAERO4/PAERO4 card"""
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
#model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 100
caero = model.add_caero4(eid, pid, p1, x12, p4, x43,
cp=0, nspan=2, lspan=0, comment='')
docs = []
caocs = []
gapocs = []
paero = model.add_paero4(pid, docs, caocs, gapocs,
cla=0, lcla=0, circ=0, lcirc=0, comment='')
paero.validate()
caero.validate()
caero.cross_reference(model)
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = 6 # 4*3
nelements_expected = 2 # 2*3
x, y = caero.xy
#chord_expected = np.array([0., 0.5, 1.])
#span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
#assert np.allclose(x, chord_expected)
#assert np.allclose(y, span_expected)
assert npoints_expected == npoints
assert nelements_expected == nelements
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
x = 1
def test_caero5_paneling(self):
"""checks the CAERO4/PAERO4 card"""
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
#model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 100
nspan = 10
caero = model.add_caero5(eid, pid, p1, x12, p4, x43, cp=0,
nspan=nspan, lspan=0, ntheory=0, nthick=0, comment='')
caoci = []
paero = model.add_paero5(pid, caoci, nalpha=0, lalpha=0,
nxis=0, lxis=0, ntaus=0, ltaus=0, comment='')
paero.validate()
caero.validate()
caero.cross_reference(model)
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = (nspan + 1) * 2
nelements_expected = nspan # 2*1
assert npoints_expected == npoints
assert nelements_expected == nelements
#x, y = caero.xy
#chord_expected = np.array([0., 0.5, 1.])
#span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
#assert np.allclose(x, chord_expected)
#assert np.allclose(y, span_expected)
all_control_surface_name, caero_control_surfaces, out = build_caero_paneling(model)
box_id_to_caero_element_map_expected = {
100: np.array([0, 2, 3, 1]),
101: np.array([2, 4, 5, 3]),
102: np.array([4, 6, 7, 5]),
103: np.array([6, 8, 9, 7]),
104: np.array([ 8, 10, 11, 9]),
105: np.array([10, 12, 13, 11]),
106: np.array([12, 14, 15, 13]),
107: np.array([14, 16, 17, 15]),
108: np.array([16, 18, 19, 17]),
109: np.array([18, 20, 21, 19]),
}
assert len(box_id_to_caero_element_map_expected) == len(out.box_id_to_caero_element_map)
for key, data in out.box_id_to_caero_element_map.items():
expected_data = box_id_to_caero_element_map_expected[key]
assert np.array_equal(data, expected_data)
points, elements = caero.panel_points_elements()
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
x = 1
def test_caero7_paneling(self):
"""checks the CAERO7/PAERO7T card"""
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
#model.add_paero7
eid = 100
chord_aefact_id = 10000
model.add_aefact(chord_aefact_id, [0., 0.5, 1.0])
label = 'panel'
nspan = 2
nchord = 4
caero = model.add_caero7(eid, label, p1, x12, p4, x43, cp=0,
nspan=nspan, nchord=nchord, lspan=0,
p_airfoil=None, ztaic=None, comment='')
model.cross_reference()
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = (nspan + 1) * (nchord + 1)
nelements_expected = nspan * nchord
#npoints_expected = 15 # 4*3
#nelements_expected = 8 # 2*3
assert npoints_expected == npoints
assert nelements_expected == nelements
points, elements = caero.panel_points_elements()
x, y = caero.xy
chord_expected = np.array([0., 0.25, 0.5, 0.75, 1.])
span_expected = np.array([0., 0.5, 1.])
assert np.allclose(x, chord_expected)
assert np.allclose(y, span_expected)
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
all_control_surface_name, caero_control_surfaces, out = build_caero_paneling(model)
box_id_to_caero_element_map_expected = {
100: np.array([0, 5, 6, 1]),
101: np.array([1, 6, 7, 2]),
102: np.array([2, 7, 8, 3]),
103: np.array([3, 8, 9, 4]),
104: np.array([5, 10, 11, 6]),
105: np.array([6, 11, 12, 7]),
106: np.array([7, 12, 13, 8]),
107: np.array([8, 13, 14, 9]),
}
assert len(box_id_to_caero_element_map_expected) == len(out.box_id_to_caero_element_map)
for box_id, data in out.box_id_to_caero_element_map.items():
expected_data = box_id_to_caero_element_map_expected[box_id]
assert np.array_equal(data, expected_data)
x = 1
def test_caero1_1(self):
"""checks the CAERO1/PAERO1/AEROS/AEFACT card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
model.set_error_storage(nparse_errors=0, stop_on_parsing_error=True,
nxref_errors=0, stop_on_xref_error=True)
eid = 1
pid = 10
cp = 4
nspan = None
lspan = 3
nchord = None
lchord = 4
igid = 0
p1 = [0., 0., 0.]
x12 = 5.
p4 = [2., 3., 4.]
x43 = 1.
caero1a = CAERO1.add_card(BDFCard(['CAERO1', eid, pid, cp, nspan, nchord, lspan, lchord,
igid, ] + p1 + [x12] + p4 + [x43]))
caero1a.validate()
eid = 2
caero1b = CAERO1.add_card(BDFCard(['CAERO1', eid, pid, None, nspan, nchord, lspan, lchord,
igid, ] + p1 + [x12] + p4 + [x43]))
caero1b.validate()
eid = 1
caero1c = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
caero1c.raw_fields()
caero1c.validate()
caero1c.write_card()
model.caeros[eid] = caero1c
eid = 4
p1 = [0., 0., 0.]
p2 = [1., 0., 0.]
p3 = [0.2, 1., 0.]
p4 = [0.1, 1., 0.]
nspan = 5
nchord = 10
igid = -1
caero1d = CAERO1.add_quad(eid, pid, nspan, nchord, igid, p1, p2, p3, p4,
cp=cp, spanwise='y', comment='')
caero1d.validate()
eid = 5
span = 0.1
chord = 0.05
igid = -1
caero1e = CAERO1.add_quad(eid, pid, span, chord, igid, p1, p2, p3, p4,
cp=cp, spanwise='y', comment='')
caero1e.validate()
eid = 6
p1 = [0., 0., 0.]
p2 = [1., 0., 0.]
p3 = [0.2, 0., 1.]
p4 = [0.1, 0., 1.]
span = 0.1
chord = 0.05
igid = -1
caero1f = CAERO1.add_quad(eid, pid, span, chord, igid, p1, p2, p3, p4,
cp=cp, spanwise='z', comment='')
caero1f.validate()
caero1f.flip_normal()
coord = CORD2R(cp, rid=0, origin=None, zaxis=None, xzplane=None,
comment='')
coord.validate()
model.coords[cp] = coord
eid = 7
p1 = [0., 0., 0.]
p2 = [1., 0., 0.]
p3 = [0.2, 0., 1.]
p4 = [0.1, 0., 1.]
span = 0.1
chord = 0.05
igid = -1
cp = None
caero1_no_coord = CAERO1.add_quad(eid, pid, span, chord, igid,
p1, p2, p3, p4,
cp=cp, spanwise='z', comment='')
caero1_no_coord.get_points()
# caero1c is set as eid=1
model.validate()
# ------------------------------------------------
eid = 1000
igroup = 1
lspan_lchord = 1
fractions = np.linspace(0., 1., num=11)
model.add_aefact(lspan_lchord, fractions, comment='')
model.add_caero1(eid, pid, igroup, p1, x12, p4, x43, cp=0,
nspan=0, lspan=lspan_lchord,
nchord=0, lchord=lspan_lchord, comment='')
paero = PAERO1(pid, caero_body_ids=None, comment='')
paero.validate()
paero.write_card()
model.paeros[pid] = paero
#acsid = 0.
#velocity = None
cref = 1.0
bref = 2.0
sref = 100.
aeros = model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0,
sym_xy=0, comment='aeros')
aeros.validate()
aeros.write_card()
model.aeros = aeros
aefact = AEFACT(lspan, [0., 1., 2., 3., 4., 5.])
aefact.validate()
model.aefacts[lspan] = aefact
aefact = AEFACT(lchord, [2., 3., 4., 5., 6., 7.])
aefact.validate()
model.aefacts[lchord] = aefact
paero.cross_reference(model)
caero1c.cross_reference(model)
caero1c.get_npanel_points_elements()
caero1c.get_points()
caero1c.panel_points_elements()
caero1c.write_card()
model.uncross_reference()
model.cross_reference()
model.uncross_reference()
#model.safe_cross_reference()
xref_errors = defaultdict(list)
caero1c.safe_cross_reference(model, xref_errors)
caero1c.panel_points_elements()
caero1c.raw_fields()
min_max_eid = caero1c.min_max_eid
self.assertEqual(min_max_eid, [1, 26])
#print('min_eid, max_eid', min_eid, max_eid)
points = [
[0., 0., 0.], # p1
[10., 0., 0.],
[10., 20., 0.],
[5., 20., 0.],
]
caero1c.set_points(points)
caero1c.get_points()
str(caero1c.write_card())
nspan = None
lspan = None
caero1h = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=None,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
with self.assertRaises(ValueError):
caero1h.validate()
nspan = 5
lspan = 5
caero1i = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
with self.assertRaises(ValueError):
caero1i.validate()
nspan = 5
nchord = None
lchord = None
caero1j = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
with self.assertRaises(ValueError):
caero1j.validate()
nchord = 10
lchord = 10
caero1k = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
with self.assertRaises(ValueError):
caero1k.validate()
lspan = None
lchord = None
nspan = 10
nchord = 10
p1 = [0., 0., 0., 0.]
caero1l = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
with self.assertRaises(AssertionError):
caero1l.validate()
p1 = [0., 0., 0.]
p4 = [1., 2., 3., 4.]
caero1m = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
with self.assertRaises(AssertionError):
caero1m.validate()
p4 = [1., 2., 3.]
eid = 8
nspan = 1
nchord = 1
lchord = None
lspan = None
caero1_1by1 = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
caero1_1by1.validate()
assert caero1_1by1.shape == (1, 1)
caero1_1by1.get_points()
p1 = [0., 0., 0.]
p4 = [0., 10., 0.]
x12 = 1.
x43 = 1.
eid = 1
nspan = 3
nchord = 2
lchord = None
lspan = None
caero1_2x3 = CAERO1(eid, pid, igid, p1, x12, p4, x43, cp=cp,
nspan=nspan, lspan=lspan, nchord=nchord, lchord=lchord,
comment='caero1')
caero1_2x3.validate()
assert caero1_2x3.shape == (2, 3), caero1_2x3.shape
caero1_2x3._init_ids()
points = caero1_2x3.get_points()
assert len(points) == 4
save_load_deck(model)
def test_spline1(self):
"""checks the SPLINE1 card"""
eid = 1
caero_id = 100
box1 = 1
box2 = 10
setg = 42
spline = SPLINE1(eid, caero_id, box1, box2, setg, dz=0., method='IPS',
usage='BOTH', nelements=10,
melements=10, comment='$ spline1')
spline.validate()
spline.write_card(size=8, is_double=False)
spline.raw_fields()
log = SimpleLogger(level='warning')
model = BDF(log=log)
model.splines[eid] = spline
pid = 10
igid = 1
p1 = [0., 0., 0.]
p4 = [0., 10., 0.]
x12 = 4.
x43 = 3.
cid = 1
caero1 = model.add_caero1(caero_id, pid, igid, p1, x12, p4, x43,
cp=cid, nspan=5,
lspan=0, nchord=6, lchord=0,
comment='')
caero_body_ids = [3]
unused_paero = model.add_paero1(pid, caero_body_ids=caero_body_ids, comment='')
origin = None
zaxis = None
xzplane = None
model.add_cord2r(cid, origin, zaxis, xzplane, rid=0, comment='')
velocity = 0.0
cref = 1.0
rho_ref = 1.225
model.add_aero(velocity, cref, rho_ref, acsid=0, sym_xz=0, sym_xy=0,
comment='')
setg = 42
ids = [100, 101, 102]
model.add_set1(setg, ids, is_skin=False, comment='')
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [0., 0., 0.])
model.add_grid(102, [0., 0., 0.])
#------------------
# CAERO2
eid = 3
caero = 3
id1 = 21
id2 = 35
setg = 43
spline2 = model.add_spline2(eid, caero, id1, id2, setg, dz=0.0, dtor=1.0, cid=1,
dthx=None, dthy=None, usage='BOTH', comment='')
spline2.validate()
pid = 3
caero2 = model.add_caero2(caero, pid, igid, p1, x12, cp=1, nsb=4,
nint=4, lsb=0, lint=0, comment='')
caero2.validate()
orient = 'ZY'
width = 1.0
AR = 2.0
thi = []
thn = []
paero2 = model.add_paero2(pid, orient, width, AR, thi, thn,
lrsb=10, lrib=None, lth=None, comment='')
paero2.validate()
sid = 10
Di = [0., 1.0, 2.0, 3.0, 0.]
aefact = model.add_aefact(sid, Di, comment='')
aefact.validate()
model.add_set1(setg, ids, is_skin=False, comment='')
model.cross_reference(model)
caero1.panel_points_elements()
caero2.get_points_elements_3d()
save_load_deck(model)
def test_spline2(self):
"""checks the SPLINE2 card"""
#+---------+------+-------+-------+-------+------+----+------+-----+
#| SPLINE2 | EID | CAERO | ID1 | ID2 | SETG | DZ | DTOR | CID |
#| | DTHX | DTHY | None | USAGE | | | | |
#+---------+------+-------+-------+-------+------+----+------+-----+
#| SPLINE2 | 5 | 8 | 12 | 24 | 60 | 0. | 1.0 | 3 |
#| | 1. | | | | | | | |
#+---------+------+-------+-------+-------+------+----+------+-----+
cid = 3
origin = [0., 0., 0.]
xaxis = [1., 0., 0.]
xyplane = [0., 1., 0.]
coord = CORD2R.add_axes(cid, rid=0, origin=origin,
xaxis=xaxis, yaxis=None, zaxis=None,
xyplane=xyplane, yzplane=None, xzplane=None,
comment='comment')
eid = 8
pid = 10
cp = 0
nsb = 4
nint = 2
lsb = None
lint = None
p1 = [0., 0., 0.]
x12 = 42.
igid = None
caero2 = CAERO2(eid, pid, igid, p1, x12,
cp=cp, nsb=nsb, nint=nint, lsb=lsb, lint=lint,
comment='this is a caero')
#caero = CAERO2(eid, pid, cp, nsb, nint, lsb, lint, igid, p1, x12)
sid = 60
ids = [7, 13]
set_obj = SET1(sid, ids, is_skin=False, comment='set card')
log = SimpleLogger(level='warning')
model = BDF(log=log)
model._add_coord_object(coord)
model._add_caero_object(caero2)
model._add_set_object(set_obj)
model.add_grid(7, [7., 0., 0.], cp=0, cd=0, ps='', seid=0, comment='')
model.add_grid(13, [13., 0., 0.], cp=0, cd=0, ps='', seid=0, comment='')
#model._add_node_object(grid7)
#model._add_node_object(grid13)
eid = 5
caero = 8
id1 = 12
id2 = 24
setg = 60
dz = 0.
dtor = 1.0
cid = 3
dthx = 1.
dthy = None
usage = None
card = ['SPLINE2', eid, caero, id1, id2, setg, dz, dtor, cid,
dthx, dthy, None, usage]
bdf_card = BDFCard(card, has_none=True)
spline_a = SPLINE2.add_card(bdf_card, comment='spline2_a')
spline_a.write_card()
spline_a.raw_fields()
spline_b = SPLINE2(eid, caero, id1, id2, setg, dz, dtor, cid, dthx,
dthy, usage, comment='spline2_b')
spline_b.validate()
spline_b.write_card()
spline_b.cross_reference(model)
spline_b.write_card()
#model.cross_reference()
#model.uncross_reference()
#model.safe_cross_reference()
def test_caero2_1(self):
"""checks the CAERO2/PAERO2/AERO/AEFACT card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
eid = 1
pid = 10
cp = 4
nsb = 0
nint = 0
lsb = 3
lint = 6
igid = 0
p1 = [0., 1., 2.]
x12 = 10.
CAERO2.add_card(BDFCard(['CAERO2', eid, pid, cp, nsb, nint,
lsb, lint, igid, ] + p1 + [x12]))
#---------------
# nsb=lsb=None=0
caero2b = CAERO2(eid, pid, igid, p1, x12,
cp=cp, nsb=None, nint=None, lsb=None, lint=6,
comment='this is a caero')
with self.assertRaises(ValueError):
caero2b.validate()
# nint=lint=None=0
caero2c = CAERO2(eid, pid, igid, p1, x12,
cp=cp, nsb=3, nint=None, lsb=3, lint=None,
comment='this is a caero')
with self.assertRaises(ValueError):
caero2c.validate()
# they're all bad?
caero2e = CAERO2(eid, pid, igid, p1, x12,
cp=cp, nsb=0, nint=0, lsb=0, lint=0,
comment='this is a caero')
with self.assertRaises(ValueError):
caero2e.validate()
#---------------
caero2f = model.add_caero2(eid, pid, igid, p1, x12, cp=4, nsb=0, nint=0,
lsb=3, lint=6, comment='this is a caero')
eid = 200
caero2g = model.add_caero2(eid, pid, igid, p1, x12, cp=4, nsb=10, nint=7,
lsb=0, lint=0, comment='this is a caero')
caero2f.validate()
caero2g.validate()
#str(caero2f.write_card())
aefact = AEFACT.add_card(BDFCard(['AEFACT', lint, 0., 1., 2., 3., 4., 5.]))
aefact = AEFACT(lint, [0., 1., 2., 3., 4., 5.])
aefact.validate()
aefact.write_card()
model.aefacts[lint] = aefact
orient = 'Z'
width = 10.
AR = 2.
lrsb = 0
lrib = 3
lth1 = 0
lth2 = 0
lth = [lth1, lth2]
thi = [0]
thn = [0]
paero2a = PAERO2.add_card(BDFCard(['PAERO2', pid, orient, width, AR,
lrsb, lrib] + lth + thi + thn),
comment='paero2')
paero2a.validate()
paero2b = model.add_paero2(pid, orient, width, AR, thi, thn,
lrsb=0, lrib=3, lth=lth, comment='paero2')
pid = 42
paero2c = model.add_paero2(pid, orient, width, AR, thi, thn,
lrsb=None, lrib=None, lth=None, comment='')
paero2b.validate()
paero2c.validate()
paero2b.write_card()
#model.paeros[pid] = paero
coord = CORD2R.add_card(BDFCard(['CORD2R', cp, 0,
0., 0., 0.,
0., 0., 1.,
1., 0., 0.]))
coord = CORD2R(cp, rid=0, origin=None, zaxis=None, xzplane=None,
comment='')
coord.validate()
model.coords[cp] = coord
aefact = AEFACT(lrib, [0., 1., 2., 3., 4., 5.])
aefact.validate()
model.aefacts[lrib] = aefact
acsid = 0
velocity = None
cref = 1.0
rho_ref = 1.0
aero = AERO.add_card(BDFCard(['AERO', acsid, velocity, cref, rho_ref]))
aero = AERO(velocity, cref, rho_ref, acsid=acsid,
comment='aero')
aero.validate()
aero.write_card()
model.aero = aero
model.cross_reference()
model.write_bdf('aero.temp1')
paero2b.raw_fields()
caero2f.raw_fields()
model.uncross_reference()
model.write_bdf('aero.temp2')
model.cross_reference()
model.write_bdf('aero.temp3')
caero2f.raw_fields()
caero2f.get_points_elements_3d()
caero2f.get_points()
unused_xyz, unused_elems = caero2f.get_points_elements_3d()
caero2g.get_points()
caero2g.get_points_elements_3d()
unused_xyz, unused_elems = caero2g.get_points_elements_3d()
model.uncross_reference()
model.safe_cross_reference()
model.uncross_reference()
model.write_bdf('aero.temp4')
model.cross_reference()
model.write_bdf('aero.temp5')
os.remove('aero.temp1')
os.remove('aero.temp2')
os.remove('aero.temp3')
os.remove('aero.temp4')
os.remove('aero.temp5')
nsb = 4
nint = 2
lsb = None
lint = None
caero2 = CAERO2(eid, pid, igid, p1, x12,
cp=cp, nsb=nsb, nint=nint, lsb=lsb, lint=lint,
comment='this is a caero')
caero2.validate()
caero2.cross_reference(model)
caero2.write_card()
#model.cross_reference()
model.uncross_reference()
model.safe_cross_reference()
caero2_set_points = CAERO2(eid, pid, igid, p1, x12,
cp=cp, nsb=nsb, nint=nint, lsb=lsb, lint=lint)
p1 = [0., 0., 0.]
p2 = [1., 2., 3.]
caero2_set_points.set_points([p1, p2])
assert np.allclose(caero2_set_points.x12, 1.), caero2_set_points.x12
save_load_deck(model)
def test_caero3_1(self):
"""checks the CAERO3/PAERO3"""
eid = 100
pid = 200
cp = 4
list_w = 5
list_c1 = 6
list_c2 = 7
p1 = [0., 0., 0.]
x12 = 10.
p4 = [5., 10., 0.]
x43 = 3.
nbox = 10
ncontrol_surfaces = 0
x = []
y = []
log = SimpleLogger(level='warning')
model = BDF(log=log)
coord = CORD2R.add_card(BDFCard(['CORD2R', cp, 0,
0., 0., 0.,
0., 0., 1.,
1., 0., 0.]))
origin = None
zaxis = None
xzplane = None
model.add_cord2r(cp, origin, zaxis, xzplane, rid=0, comment='cord2r')
coord.validate()
model.coords[cp] = coord
paero3 = model.add_paero3(pid, nbox, ncontrol_surfaces, x, y,
comment='paero3')
paero3.validate()
paero3.raw_fields()
card = ['CAERO3', 2000, 20001, 0, 22, 33, None, None, None,
1.0, 0.0, 0., 100., 17., 130., 0., 100.]
bdf_card = BDFCard(card, has_none=True)
caero3a = CAERO3.add_card(bdf_card, comment='msg')
caero3a.validate()
caero3a.write_card()
caero3a.raw_fields()
caero3b = model.add_caero3(eid, pid, list_w,
p1, x12, p4, x43,
cp=cp, list_c1=list_c1, list_c2=list_c2,
comment='caero3')
caero3b.validate()
aefact_sid = list_w
Di = [0., 0.5, 1.]
model.add_aefact(aefact_sid, Di, comment='aefact')
aefact_sid = list_c1
model.add_aefact(aefact_sid, Di, comment='aefact2')
aefact_sid = list_c2
model.add_aefact(aefact_sid, Di, comment='aefact2')
velocity = 100.
cref = 1.0
rho_ref = 1.0
model.add_aero(velocity, cref, rho_ref)
model.validate()
caero3b.write_card()
caero3b.cross_reference(model)
caero3b.write_card()
caero3a.raw_fields()
caero3b.uncross_reference()
caero3b.write_card()
caero3a.raw_fields()
xref_errors = defaultdict(list)
caero3b.safe_cross_reference(model, xref_errors)
npoints_expected = 33
nelements_expected = 20
npoints, nelements = caero3b.get_npanel_points_elements()
assert npoints == npoints_expected
assert nelements == nelements_expected
caero3b.get_points()
caero3b.panel_points_elements()
if IS_MATPLOTLIB:
fig, ax = _setup_aero_plot()
caero3b.plot(ax)
fig.show()
model.get_bdf_stats()
save_load_deck(model, run_convert=True, run_mirror=False)
def test_paero3(self):
"""checks the PAERO3"""
# +--------+------+------+-------+------+-----+------+------+------+
# | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
# +========+======+======+=======+======+=====+======+======+======+
# | PAERO3 | PID | NBOX | NCTRL | | X5 | Y5 | X6 | Y6 |
# +--------+------+------+-------+------+-----+------+------+------+
# | | X7 | Y7 | X8 | Y8 | X9 | Y9 | X10 | Y10 |
# +--------+------+------+-------+------+-----+------+------+------+
# | | X11 | Y11 | X12 | Y12 | | | | |
# +--------+------+------+-------+------+-----+------+------+------+
# | PAERO3 | 2001 | 15 | 1 | | 0. | 65. | | |
# +--------+------+------+-------+------+-----+------+------+------+
# | | 78. | 65. | 108. | 65. | 82. | 97.5 | 112. | 97.5 |
# +--------+------+------+-------+------+-----+------+------+------+
# | | 86. | 130. | 116. | 130. | | | | |
# +--------+------+------+-------+------+-----+------+------+------+
fields = ['PAERO3', 2001, 15, 1, None, 0., 65., None, None,
78., 65., 108., 65., 82., 97.5, 112., 97.5,
86., 130., 116., 130.]
log = SimpleLogger(level='warning')
model = BDF(log=log)
model.add_card(fields, fields[0])
paero = model.paeros[2001]
assert paero.npoints == 8, paero.npoints
paero.raw_fields()
def test_paero4(self):
"""checks the PAERO4"""
# +--------+------+-------+--------+-------+-------+--------+--------+--------+
# | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
# +========+======+=======+========+=======+=======+========+========+========+
# | PAERO4 | PID | CLA | LCLA | CIRC | LCIRC | DOC1 | CAOC1 | GAPOC1 |
# +--------+------+-------+--------+-------+-------+--------+--------+--------+
# | | DOC2 | CAOC2 | GAPOC2 | DOC3 | CAOC3 | GAPOC3 | etc. | |
# +--------+------+-------+--------+-------+-------+--------+--------+--------+
# | PAERO4 | 6001 | 1 | 501 | 0 | 0 | 0.0 | 0.0 | 0.0 |
# +--------+------+-------+--------+-------+-------+--------+--------+--------+
# | | 0.50 | 0.25 | 0.02 | 0.53 | 0.24 | 0.0 | | |
# +--------+------+-------+--------+-------+-------+--------+--------+--------+
pid = 6001
cla = 1
lcla = 501
circ = 0
lcirc = 0
dcg1 = [0., 0., 0.]
dcg2 = [0.5, 0.25, 0.02]
dcg3 = [0.53, 0.24, 0.]
card = ['PAERO4', pid, cla, lcla, circ, lcirc] + dcg1 + dcg2 + dcg3
bdf_card = BDFCard(card, has_none=True)
paero4 = PAERO4.add_card(bdf_card, comment='msg')
str(paero4)
paero4.cross_reference(None)
def test_caero4_1(self):
"""checks the CAERO4/PAERO4"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
pid = 1001
docs = []
caocs = []
gapocs = []
paero4 = model.add_paero4(pid, docs, caocs, gapocs,
cla=0, lcla=0, circ=0, lcirc=0,
comment='paero4')
paero4.validate()
paero4.raw_fields()
x1 = 0.
y1 = 0.
z1 = 0.
x12 = 100.
x4 = 50.
y4 = 0.
z4 = 0.
x43 = 10.
eid = 1000
nspan = 4 # number of stations
lspan = 0 # AEFACT
cp = 0
card = ['CAERO4', eid, pid, cp, nspan, lspan, None, None, None,
x1, y1, z1, x12, x4, y4, z4, x43]
bdf_card = BDFCard(card, has_none=True)
caero4a = CAERO4.add_card(bdf_card, comment='msg')
caero4a.validate()
npoints, nelements = caero4a.get_npanel_points_elements()
assert npoints == 10, npoints
assert nelements == 4, nelements
caero4a.write_card()
caero4a.raw_fields()
#caero4a.cross_reference(model)
#points, elements = caero4a.panel_points_elements()
#del points, elements
p1 = [x1, y1, z1]
p4 = [x4, y4, z4]
caero4b = model.add_caero4(eid, pid, p1, x12, p4, x43,
cp=cp, nspan=nspan, lspan=lspan,
comment='caero4b')
caero4b.validate()
caero4b.write_card()
caero4b.raw_fields()
caero4b.cross_reference(model)
caero4b.write_card()
caero4b.raw_fields()
points, elements = caero4b.panel_points_elements()
del points, elements
p1, unused_p2, unused_p3, p4 = caero4b.get_points()
caero4c = CAERO4(eid, pid, p1, x12, p4, x43,
cp=0, nspan=0, lspan=0,
comment='caero4c')
with self.assertRaises(RuntimeError):
# nspan=lspan=0
caero4c.validate()
#model.cross_reference()
model.uncross_reference()
#model.safe_cross_reference()
bdf_filename = StringIO()
model.write_bdf(bdf_filename, close=False)
bdf_filename.seek(0)
model2 = read_bdf(bdf_filename, xref=False, punch=True, debug=False)
model.safe_cross_reference()
model2.safe_cross_reference()
def test_caero5_1(self):
"""checks the CAERO5/PAERO5"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
pid = 6001
caoci = [0., 0.5, 1.0]
paero5 = model.add_paero5(pid, caoci,
nalpha=0, lalpha=0, nxis=0, lxis=0,
ntaus=0, ltaus=0, comment='paero5')
paero5.validate()
#| PAERO5 | PID | NALPHA | LALPHA | NXIS | LXIS | NTAUS | LTAUS |
#+--------+-------+--------+--------+---------+-------+-------+-------+
#| | CAOC1 | CAOC2 | CAOC3 | CAOC4 | CAOC5 | | |
nalpha = 0
lalpha = 0
nxis = 0
lxis = 0
ntaus = 0
ltaus = 0
card = ['PAERO5', pid, nalpha, lalpha, nxis, lxis, ntaus, ltaus, ] + caoci
model = BDF(debug=False)
model.add_card(card, card[0], comment='', is_list=True,
has_none=True)
paero5 = model.paeros[pid]
paero5.raw_fields()
model = BDF(debug=None)
paero5 = model.add_paero5(pid, caoci, nalpha=0, lalpha=0, nxis=0, lxis=0,
ntaus=0, ltaus=0, comment='paero5')
paero5.validate()
eid = 6000
x1 = 0.
y1 = 0.
z1 = 0.
x12 = 1.
x4 = 0.2
y4 = 1.
z4 = 0.
x43 = 0.8
p1 = [x1, y1, z1]
p4 = [x4, y4, z4]
caero5 = model.add_caero5(eid, pid, p1, x12, p4, x43,
cp=0, nspan=5, lspan=0,
ntheory=0, nthick=0,
comment='msg')
model.validate()
lxis = 43
paero5.lxis = lxis
aefact_sid = lxis
Di = [0., 0.5, 1.]
model.add_aefact(aefact_sid, Di, comment='aefact')
ltaus = 44
paero5.ltaus = ltaus
aefact_sid = ltaus
Di = [0., 0.5, 1.]
unused_aefact = model.add_aefact(aefact_sid, Di, comment='aefact2')
#caero5.cross_reference(model)
model.cross_reference()
unused_npoints, unused_nelements = caero5.get_npanel_points_elements()
unused_points, unused_elements = caero5.panel_points_elements()
caero5.write_card()
#caero5.raw_fields()
model.uncross_reference()
caero5.write_card()
model.cross_reference()
model.uncross_reference()
#model.safe_cross_reference()
bdf_filename = StringIO()
model.write_bdf(bdf_filename, close=False)
bdf_filename.seek(0)
read_bdf(bdf_filename, xref=False, punch=True, debug=False)
model.safe_cross_reference()
save_load_deck(model, run_renumber=False, run_test_bdf=False)
#caero5.raw_fields()
# def test_paero1_1(self):
# def test_paero2_1(self):
# def test_paero3_1(self):
# def test_paero4_1(self):
# def test_paero5_1(self):
# def test_spline1_1(self):
# def test_spline2_1(self):
def test_spline3(self):
"""checks the SPLINE3 card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
eid = 100
pid = 10
igid = 1
p1 = [0., 0., 0.]
x12 = x43 = 3.
p4 = [1., 11., 1.]
caero = eid
box_id = 42
components = 3
nids = 5
displacement_components = 3
coeffs = 1.0
model.add_caero1(eid, pid, igid, p1, x12, p4, x43,
cp=0,
nspan=5, lspan=0,
nchord=5, lchord=0, comment='')
model.add_paero1(pid, caero_body_ids=None, comment='')
model.add_grid(5, [0., 0., 0.])
spline_id = 101
spline3 = model.add_spline3(
spline_id, caero, box_id, components, nids,
displacement_components, coeffs, usage='BOTH', comment='spline3')
spline3.validate()
spline3.write_card()
spline3.raw_fields()
spline_id = 102
nids = [5, 6, 7]
displacement_components = [3, 6]
coeffs = [1.0, 2.0]
spline3b = model.add_spline3(
spline_id, caero, box_id, components, nids,
displacement_components, coeffs, usage='failed', comment='spline3')
cref = bref = sref = 1.0
model.add_aeros(cref, bref, sref)
with self.assertRaises(RuntimeError):
spline3b.validate()
spline3b.usage = 'BOTH'
spline3b.displacement_components.append(1)
spline3b.coeffs.append(0.1)
spline3b.validate()
del model.splines[spline_id]
model.validate()
#spline3.cross_reference(model)
model.cross_reference()
spline3.write_card()
spline3.raw_fields()
save_load_deck(model, run_renumber=False)
spline3b.eid = 1000
spline3b.nodes.append(42)
spline3b.displacement_components.append(4)
spline3b.coeffs.append(0.5)
spline3b.validate()
spline3b.comment = ''
lines = spline3b.rstrip().split('\n')
model.add_card(lines, 'SPLINE3', is_list=False)
spline = model.splines[1000]
assert spline.node_ids == [5, 6, 7, 42], spline.node_ids
#spline3.raw_fields()
def test_spline4(self):
"""checks the SPLINE4 card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
eid = 1
caero = 10
aelist = 11
setg = 12
dz = 0.
method = 'TPS'
usage = 'FORCE'
nelements = 4
melements = 5
spline = model.add_spline4(eid, caero, aelist, setg, dz, method, usage,
nelements, melements, comment='spline4')
spline.raw_fields()
elements = [1, 2, 3, 4, 5, 6, 7, 8, 9]
model.add_aelist(aelist, elements)
paero = 20
igid = 42
p1 = [0., 0., 0.]
x12 = 10.
p4 = [0., 10., 0.]
x43 = 3.
model.add_caero1(caero, paero, igid, p1, x12, p4, x43, cp=0, nspan=5,
lspan=0, nchord=10, lchord=0,
comment='')
model.add_paero1(paero)
velocity = None
cref = 1.0
rho_ref = 1.0
model.add_aero(velocity, cref, rho_ref,
comment='')
model.add_set1(setg, [1, 2, 3])
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
eid = 2
setg = 13
ids = [1, 2, 3]
model.add_set1(setg, ids)
model.add_spline4(eid, caero, aelist, setg, dz, method, usage,
nelements, melements, comment='spline4')
spline = model.splines[eid]
del model.splines[eid]
spline.cross_reference(model)
model.pop_parse_errors()
model.pop_xref_errors()
model.validate()
save_load_deck(model)
def test_spline5(self):
"""checks the SPLINE5 card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
eid = 1
caero = 10
aelist = 11
setg = 12
thx = 7.
thy = 8.
#dz = 0.
#method = 'cat'
#usage = 'dog'
#nelements = 4
#melements = 5
#dtor = 47
spline = model.add_spline5(eid, caero, aelist, setg, thx, thy, dz=0., dtor=1.0,
cid=0, usage='BOTH', method='BEAM', ftype='WF2',
rcore=None, comment='spline5')
spline.raw_fields()
elements = [1, 2, 3, 4, 5, 6, 7, 8, 9]
model.add_aelist(aelist, elements)
paero = 20
igid = 42
p1 = [0., 0., 0.]
x12 = 10.
p4 = [0., 10., 0.]
x43 = 3.
model.add_caero1(caero, paero, igid, p1, x12, p4, x43, cp=0, nspan=5,
lspan=0, nchord=10, lchord=0,
comment='')
model.add_paero1(paero)
velocity = None
cref = 1.0
rho_ref = 1.0
model.add_aero(velocity, cref, rho_ref,
comment='')
model.add_set1(setg, [1, 2, 3])
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
model.pop_parse_errors()
model.pop_xref_errors()
model.validate()
model.cross_reference()
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model)
def test_aesurf_1(self):
"""checks the AESURF/AELIST cards"""
aesid = 10
label = 'FLAP'
cid1 = 0
aelist_id1 = 10
cid2 = None
alid2 = None
aesurf1 = AESURF(aesid, label, cid1, aelist_id1, cid2, alid2,
#eff, ldw,
#crefc, crefs, pllim, pulim,
#hmllim, hmulim, tqllim, tqulim,
comment='aesurf comment')
aesurf2 = AESURF.add_card(BDFCard(
[
'AESURF', aesid, label, cid1, aelist_id1, cid2, alid2,
#eff, ldw,
#crefc, crefs, pllim, pulim,
#hmllim, hmulim, tqllim, tqulim,
]), comment='aesurf comment')
#assert aesurf1 == aesurf2
cid2 = 1
coord = CORD2R(cid2, rid=0, origin=[0., 0., 0.],
zaxis=[1., 0., 0.], xzplane=[0., 0., 1.], comment='')
aelist_id1 = 10
aelist_id2 = 20
aesurf2 = AESURF.add_card(BDFCard(
[
'AESURF', aesid, label, cid1, aelist_id1, cid2, aelist_id2,
#eff, ldw,
#crefc, crefs, pllim, pulim,
#hmllim, hmulim, tqllim, tqulim,
]), comment='aesurf comment')
aesurf1.validate()
aesurf2.validate()
log = SimpleLogger(level='warning')
model = BDF(log=log)
model._add_coord_object(coord)
model._add_aesurf_object(aesurf1)
elements = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
unused_aelist = model.add_aelist(aelist_id1, elements, comment='')
elements = [11, 22, 33, 44, 55, 66, 77, 88, 99]
unused_aelist = model.add_aelist(aelist_id2, elements, comment='')
aesid += 1
model.add_aesurf(
aesid, label, cid1, aelist_id2, cid2=None, alid2=None,
eff=1.0, ldw='LDW',
crefc=1.0, crefs=1.2,
pllim=-np.pi/2, pulim=np.pi/2.,
hmllim=-42., hmulim=42., # hinge moment limits in force/disp
tqllim=10, tqulim=11, # TABLEDi deflection limits vs. dynamic pressure
)
# lower
table_id = 10
x = np.linspace(0.1, 1.)
y = np.log(np.linspace(1.1, 2.))[::-1]
model.add_tabled1(table_id, x, y, xaxis='LINEAR', yaxis='LINEAR', extrap=0, comment='')
# upper
table_id = 11
y2 = -y
model.add_tabled1(table_id, x, y2, xaxis='LINEAR', yaxis='LINEAR', extrap=0, comment='')
aesurf1.cross_reference(model)
aesurf1.write_card()
aesurf1.raw_fields()
aesurf1.uncross_reference()
aesurf1.write_card()
aesurf1.cross_reference(model)
aesurf1.raw_fields()
aesurf2.cross_reference(model)
aesurf2.write_card()
aesurf2.raw_fields()
aesurf2.uncross_reference()
aesurf2.write_card()
aesurf2.cross_reference(model)
aesurf2.raw_fields()
model.cross_reference()
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model)
def test_flfact(self):
"""checks the FLFACT card"""
#List[f1, THRU, fnf, nf, fmid]
#f1 : float
#first value
#THRU : str
#the word THRU
#nf : float
#second value
#fmid : float; default=(f1 + fnf) / 2.
#the mid point to bias the array
sid = 42
factors = [0.200, 'THRU', 0.100, 11, 0.1333]
flfact = FLFACT(sid, factors)
assert len(flfact.factors) == 11
#print(flfact)
factors = [0.200, 'THRU', 0.100, 11]
flfact = FLFACT(sid, factors)
assert len(flfact.factors) == 11
def test_flutter(self):
"""checks the FLUTTER/FLFACT cards"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
sid = 75
method = 'PKNL'
idensity = 76
imach = 77
ivelocity = 78
# density, mach, velocity
flutter1 = model.add_flutter(sid, method, idensity, imach, ivelocity,
imethod='L', nvalue=None,
omax=None, epsilon=1.0e-3)
flutter2 = FLUTTER.add_card(BDFCard(['FLUTTER', sid, method, idensity, imach,
ivelocity]), comment='flutter card')
assert flutter2.headers == ['density', 'mach', 'velocity'], flutter2.headers
assert flutter1.get_field(1) == sid, flutter1.get_field(1)
assert flutter1.get_field(2) == 'PKNL', flutter1.get_field(2)
assert flutter1.get_field(3) == idensity, flutter1.get_field(3)
assert flutter1.get_field(4) == imach, flutter1.get_field(4)
assert flutter1.get_field(5) == ivelocity, flutter1.get_field(5)
assert flutter1.get_field(6) == 'L', flutter1.get_field(6)
assert flutter1.get_field(7) is None, flutter1.get_field(7)
assert flutter1.get_field(8) == 1.0e-3, flutter1.get_field(8)
with self.assertRaises(KeyError):
assert flutter1.get_field(9) == 1.0e-3, flutter1.get_field(9)
flutter1.validate()
flutter1.write_card()
flutter2.validate()
flutter2.write_card()
densities = np.linspace(0., 1.)
unused_density = model.add_flfact(idensity, densities)
machs = np.linspace(0.7, 0.8)
mach = FLFACT(imach, machs)
mach = FLFACT.add_card(BDFCard(['FLFACT', imach] + list(machs)), comment='flfact card')
mach2 = model.add_flfact(imach, machs, comment='flfact')
mach.write_card(size=16)
mach2.write_card(size=8)
velocities = np.linspace(3., 4.)
velocity = model.add_flfact(ivelocity, velocities)
velocity.validate()
velocity.write_card()
assert velocity.min() == 3., velocities
assert velocity.max() == 4., velocities
model.flfacts[ivelocity] = velocity
ikfreq = 79
kfreqs = np.linspace(0.1, 0.2)
card = ['FLFACT', ikfreq] + list(kfreqs)
model.add_card(card, card[0])
kfreq = model.FLFACT(ikfreq)
kfreq.validate()
kfreq.write_card()
assert kfreq.min() == 0.1, kfreqs
assert kfreq.max() == 0.2, kfreqs
model.flfacts[ikfreq] = kfreq
ikfreq2 = 80
card = ['FLFACT', ikfreq2, 10., 'THRU', 20., 11]
model.add_card(card, card[0])
kfreq = model.FLFACT(ikfreq2)
kfreq.validate()
kfreq.write_card()
assert kfreq.min() == 10., 'min=%s; card=%s factors=%s' % (kfreq.min(), card, kfreq.factors)
assert kfreq.max() == 20., 'max=%s; card=%s factors=%s' % (kfreq.max(), card, kfreq.factors)
model.flfacts[ikfreq] = kfreq
ikfreq3 = 81
factors = [10., 'THRU', 20., 10]
kfreq = FLFACT(ikfreq3, factors)
kfreq.validate()
kfreq.write_card()
assert kfreq.min() == 10., 'min=%s; factors=%s' % (kfreq.min(), factors)
assert kfreq.max() == 20., 'max=%s; factors=%s' % (kfreq.max(), factors)
model.flfacts[ikfreq] = kfreq
kfreq.validate()
ikfreq4 = 82
kfreq2 = model.add_flfact(ikfreq4, [])
with self.assertRaises(ValueError):
kfreq2.validate()
kfreq2.factors = [1.]
kfreq2.validate()
kfreq2.write_card()
# density, mach, rfreq
card = ['FLUTTER', 85, 'KE', idensity, imach, ikfreq]
model.add_card(card, card[0])
#model.pop_parse_errors()
model.cross_reference()
model.pop_xref_errors()
flutter = model.Flutter(85)
assert flutter.headers == ['density', 'mach', 'reduced_frequency'], flutter.headers
flutter.write_card()
flutter.raw_fields()
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model)
def test_flutter_2(self):
"""validates the FLUTTER card"""
method = 'TEST'
imethod = 'TEST2'
sid = 1
idensity = 10
imach = 20
ivelocity = 30
flutter = FLUTTER(sid, method, idensity, imach, ivelocity,
imethod=imethod, nvalue=None,
omax=None, epsilon=1.0e-3)
with self.assertRaises(ValueError):
flutter.validate()
def test_flutter_3(self):
"""tests the flutter sweeps"""
alts = np.linspace(-10000., 50000.)[::-1]
log = SimpleLogger(level='warning')
model = BDF(log=log)
sid = 70
method = 'PKNL'
density = 71
mach = 72
reduced_freq_velocity = 73
flutter = model.add_flutter(sid, method, density, mach, reduced_freq_velocity)
flutter.make_flfacts_alt_sweep(model, 0.7, alts, eas_limit=1000.0, alt_units=u'ft',
velocity_units=u'in/s', density_units=u'slinch/in^3',
eas_units=u'ft/s')
sid = 80
density = 81
mach = 82
reduced_freq_velocity = 83
flutter = model.add_flutter(sid, method, density, mach, reduced_freq_velocity)
alt = 10000.
machs = np.arange(0.1, 0.8)
flutter.make_flfacts_mach_sweep(model, alt, machs, eas_limit=1000., alt_units='m',
velocity_units='m/s',
density_units='kg/m^3',
eas_units='m/s')
def test_mkaero1(self):
"""checks the MKAERO1 card"""
machs = [0.5, 0.75]
reduced_freqs = [0.1, 0.2, 0.3, 0.4]
mkaero = MKAERO1(machs, reduced_freqs, comment='mkaero')
mkaero.validate()
mkaero.write_card()
mkaero = MKAERO1.add_card(BDFCard(
['MKAERO', 0.5, 0.75, None, None, None, None, None, None,
0.1, 0.2, 0.3, 0.4],
))
machs = [0.5, 0.75]
reduced_freqs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1]
mkaero = MKAERO1(machs, reduced_freqs, comment='mkaero')
mkaero.validate()
msg = mkaero.write_card()
lines = msg.strip().split('\n')
expected = [
'$mkaero',
'MKAERO1 .5 .75',
' .1 .2 .3 .4 .5 .6 .7 .8',
'MKAERO1 .5 .75',
' .9 1. 1.1',
]
for line1, line2 in zip(lines, expected):
assert line1 == line2, '\nline=%r\nexpected=%r'% (line1, line2)
expected2 = [
'$mkaero1',
'MKAERO1 .1 .2 .3 .4 .5 .6 .7 .8',
' .01 .02 .03',
'MKAERO1 .9',
' .01 .02 .03',
]
# ----------------------------------------------------------------------
model = BDF(debug=False)
machs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
reduced_freqs = [0.01, 0.02, 0.03]
mkaero = model.add_mkaero1(machs, reduced_freqs, comment='mkaero1')
mkaero.raw_fields()
msg = mkaero.write_card()
lines = msg.strip().split('\n')
for line1, line2 in zip(lines, expected2):
msg = '\nline =%r\n' % str(line1)
msg += 'expected=%r\n%s' % (str(line2), msg)
assert line1 == line2, msg
mkaerob = MKAERO1([], reduced_freqs)
with self.assertRaises(ValueError):
mkaerob.validate()
with self.assertRaises(ValueError):
mkaerob.write_card()
mkaeroc = MKAERO1([0.1, 0.2], [])
with self.assertRaises(ValueError):
mkaeroc.validate()
with self.assertRaises(ValueError):
mkaeroc.write_card()
machs = [0.01, 0.02, 0.03]
reduced_freqs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
mkaero = model.add_mkaero1(machs, reduced_freqs, comment='mkaero1')
machs = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09]
reduced_freqs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
mkaero = model.add_mkaero1(machs, reduced_freqs, comment='mkaero1')
machs_spike = [0.01]
reduced_freqs_spike = [0.1, 0.2]
mkaero = model.add_mkaero1(machs_spike, reduced_freqs_spike, comment='mkaero1')
# TODO: this fails...because it's linked to the first card somehow
#mkaerod = model.add_mkaero1(machs, [])
#with self.assertRaises(ValueError):
#mkaerod.write_card()
save_load_deck(model)
def test_mkaero2(self):
"""checks the MKAERO2 card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
machs = [0.5, 0.75, 0.8]
reduced_freqs = [0.1, 0.2, 0.3]
mkaero = model.add_mkaero2(machs, reduced_freqs, comment='mkaero2')
mkaero.validate()
mkaero.write_card()
machs = [0.5, 0.75]
reduced_freqs = [0.1, 0.2]
mkaero = model.add_mkaero2(machs, reduced_freqs, comment='mkaero2')
mkaero.validate()
mkaero.write_card()
mkaero.raw_fields()
mkaero = MKAERO2.add_card(BDFCard(['MKAERO2'] + machs + reduced_freqs), comment='mkaero2')
mkaero.validate()
mkaero.write_card()
# at least one mach
machs = []
reduced_freqs = [42.]
mkaero = MKAERO2(machs, reduced_freqs)
with self.assertRaises(ValueError):
mkaero.validate()
# at least one rfreq
machs = [0.8]
reduced_freqs = []
mkaero = MKAERO2(machs, reduced_freqs)
with self.assertRaises(ValueError):
mkaero.validate()
# should be the same length
machs = [0.8]
reduced_freqs = [42., 43.]
mkaero = MKAERO2(machs, reduced_freqs)
with self.assertRaises(ValueError):
mkaero.validate()
# split the write card method
machs = [0.1, 0.2, 0.3, 0.4, 0.5]
reduced_freqs = [1., 2., 3., 4., 5.]
mkaero = MKAERO2(machs, reduced_freqs)
mkaero.validate()
mkaero.write_card()
mkaerob = model.add_mkaero2([], reduced_freqs)
with self.assertRaises(ValueError):
mkaerob.validate()
with self.assertRaises(ValueError):
mkaerob.write_card()
mkaeroc = model.add_mkaero2([0.1, 0.2], [])
with self.assertRaises(ValueError):
mkaeroc.validate()
with self.assertRaises(ValueError):
mkaeroc.write_card()
mkaeroc = model.add_mkaero2([], [])
with self.assertRaises(ValueError):
mkaeroc.validate()
with self.assertRaises(ValueError):
mkaeroc.write_card()
def test_diverg(self):
"""checks the DIVERG card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
sid = 100
nroots = 21
machs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
diverg = DIVERG(sid, nroots, machs, comment='divergence')
diverg.validate()
diverg.write_card()
diverg = model.add_card(['DIVERG', sid, nroots] + machs, 'DIVERG', comment='divergence')
model.validate()
save_load_deck(model)
#diverg.validate()
#diverg.write_card()
def test_trim_01(self):
"""checks the TRIM card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
#model.add_aecompl
sid = 100
mach = 0.75
q = 100.
labels = ['ALPHA', 'ALPHA']
uxs = [10., 20.]
trim1 = TRIM(sid, mach, q, labels, uxs)
trim2 = TRIM2(sid+1, mach, q, labels, uxs)
with self.assertRaises(RuntimeError):
trim1.validate()
with self.assertRaises(RuntimeError):
trim2.validate()
labels = ['ALPHA']
uxs = [10., 20.]
trim1 = TRIM(sid, mach, q, labels, uxs)
trim2 = TRIM2(sid, mach, q, labels, uxs)
with self.assertRaises(RuntimeError):
trim1.validate()
with self.assertRaises(RuntimeError):
trim2.validate()
labels = ['ALPHA', 'BETA']
uxs = [10., 20.]
trim1 = TRIM(sid, mach, q, labels, uxs)
trim1.validate()
trim1.write_card()
trim2 = TRIM2(sid, mach, q, labels, uxs)
trim2.validate()
trim2.write_card()
labels = ['ALPHA']
uxs = [10.]
trim1 = TRIM(sid, mach, q, labels, uxs, aeqr=3.0, comment='')
trim1.validate()
trim1.write_card()
trim2 = TRIM2(sid, mach, q, labels, uxs, aeqr=3.0, comment='')
trim2.validate()
trim2.write_card()
labels = ['ALPHA', 'BETA']
uxs = [10., 20.]
trim1 = TRIM(sid, mach, q, labels, uxs, aeqr=3.0, comment='')
trim1.validate()
trim1.write_card()
trim2 = TRIM(sid, mach, q, labels, uxs, aeqr=3.0, comment='')
trim2.validate()
trim2.write_card()
model.add_card(['TRIM', sid, mach, q, labels[0], uxs[0]], 'TRIM', comment='$ trim')
model.validate()
model._verify_bdf(xref=False)
save_load_deck(model)
def test_trim_02(self):
"""checks the TRIM card with a 2.5g pullup"""
model = BDF()
sid = 75
mach = 0.75
q = 100.
labels = ['NZ']
uxs = [2.5]
trim1 = model.add_trim(sid, mach, q, labels, uxs, aeqr=0.0, comment='')
trim1.validate()
trim2 = model.add_trim(sid+1, mach, q, labels, uxs, aeqr=0.0, trim_type=2, comment='')
trim2.validate()
def test_trim_03(self):
"""checks the TRIM card with a 2.5g pullup"""
model = BDF(debug=None)
sid = 75
mach = 0.75
q = 100.
labels = ['URDD3', 'PITCH']
uxs = [2.5, 0.0]
trim1a = model.add_trim(sid, mach, q, labels, uxs, aeqr=0.0,
trim_type=1, comment='') # 75
trim2a = model.add_trim(sid+1, mach, q, labels, uxs, aeqr=0.0,
trim_type=2, comment='') # 76
labels = ['URDD3', 'URDD5', 'PITCH']
uxs = [2.5, 0.0, 0.0]
# good
trim1b = model.add_trim(sid+2, mach, q, labels, uxs, aeqr=0.0,
trim_type=1, comment='trim') # 77
trim2b = model.add_trim(sid+3, mach, q, labels, uxs, aeqr=0.0,
trim_type=2, comment='trim') # 78
model.add_aestat(1, 'URDD3', comment='aestat')
model.add_aestat(2, 'URDD5', comment='aestat')
model.add_aestat(3, 'PITCH', comment='aestat')
model.add_aestat(4, 'ANGLEA', comment='aestat')
#+--------+---------+-----------------------------+
#| ANGLEA | ur (R2) | Angle of Attack |
#| YAW | ur (R3) | Yaw Rate |
#| SIDES | ur (R3) | Angle of Sideslip |
#+--------+---------+-----------------------------+
#| ROLL | ůr (R1) | Roll Rate |
#| PITCH | ůr (R2) | Pitch Rate |
#+--------+---------+-----------------------------+
#| URDD1 | ür (T1) | Longitudinal (See Remark 3) |
#| URDD2 | ür (T2) | Lateral |
#| URDD3 | ür (T3) | Vertical |
#| URDD4 | ür (R1) | Roll |
#| URDD5 | ür (R2) | Pitch |
#| URDD6 | ür (R3) | Yaw |
#+--------+---------+-----------------------------+
cid1 = 0
label = 'DELTA'
aesid = 5
alid1 = 6
model.add_aesurf(aesid, label, cid1, alid1)
suport = model.add_suport([55, 66], ['3', '3'])
str(suport)
model.add_aelist(alid1, [100, 101, 102], comment='')
model.add_grid(55, [0., 0., 0.])
model.add_grid(66, [0., 0., 0.])
#model.add_cord2r(cid, origin, zaxis, xzplane, rid=0, comment='')
model.validate()
# why doesn't this work?
with self.assertRaises(RuntimeError):
trim1a.verify_trim(model.suport, model.suport1, model.aestats, model.aeparams,
model.aelinks, model.aesurf, xref=True)
with self.assertRaises(RuntimeError):
trim2a.verify_trim(model.suport, model.suport1, model.aestats, model.aeparams,
model.aelinks, model.aesurf, xref=True)
trim1b.verify_trim(model.suport, model.suport1, model.aestats, model.aeparams,
model.aelinks, model.aesurf, xref=True)
trim2b.verify_trim(model.suport, model.suport1, model.aestats, model.aeparams,
model.aelinks, model.aesurf, xref=True)
model.write_bdf('trim.bdf')
model2 = read_bdf('trim.bdf', debug=None)
model2._verify_bdf(xref=True)
model2.uncross_reference()
model2._verify_bdf(xref=False)
model2.cross_reference()
model2._verify_bdf(xref=True)
os.remove('trim.bdf')
model2.uncross_reference()
model2.safe_cross_reference()
save_load_deck(model)
def test_gust(self):
"""checks the GUST card"""
sid = 100
dload = 200
wg = 50.
x0 = 3.
V = 42.
log = SimpleLogger(level='warning')
model = BDF(log=log)
gust = model.add_gust(sid, dload, wg, x0, V=V, comment='gust load')
gust.validate()
gust.write_card()
gust2 = GUST.add_card(BDFCard(['GUST', sid, dload, wg, x0, V]), comment='gust load')
gust2.validate()
gust2.write_card()
save_load_deck(model)
def test_csschd(self):
"""checks the CSSCHD card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
sid = 5
aesid = 50
lalpha = 12
lmach = 15
lschd = 25
card = ['CSSCHD', sid, aesid, lalpha, lmach, lschd]
bdf_card = BDFCard(card, has_none=True)
csshcd_bad = CSSCHD(sid, aesid, lschd, lalpha='lalpha', lmach=4,
comment='')
with self.assertRaises(TypeError):
csshcd_bad.validate()
csshcd_bad.lalpha = 4
csshcd_bad.lmach = 5.0
with self.assertRaises(TypeError):
csshcd_bad.validate()
csshcd_bad.lmach = 5
csshcd_bad.validate()
card = ['CSSCHD', sid, aesid, lalpha, lmach, lschd]
bdf_card = BDFCard(card, has_none=True)
csshcd1 = CSSCHD.add_card(bdf_card, comment='csschd card')
csshcd1.validate()
csshcd1.write_card()
sid = 6
csshcd2 = model.add_csschd(sid, aesid, lschd, lalpha=lalpha, lmach=lmach,
comment='csschd card')
label = 'ELEV'
cid1 = 0
alid1 = 37
unused_aesurf = model.add_aesurf(
aesid, label, cid1, alid1, cid2=None, alid2=None,
eff=1.0, ldw='LDW', crefc=1.0, crefs=1.0,
pllim=-np.pi/2., pulim=np.pi/2.,
hmllim=None, hmulim=None,
tqllim=None, tqulim=None, comment='aesurf')
unused_aelist = model.add_aelist(alid1, [1, 2, 3], comment='')
aefact_sid = alid1
fractions = [0., 0.5, 1.]
unused_aefact_elev = model.add_aefact(aefact_sid, fractions, comment='aefact')
aefact_sid = lalpha
fractions = [0., 5., 10.]
unused_aefact_alpha = model.add_aefact(aefact_sid, fractions, comment='aefact')
aefact_sid = lmach
fractions = [0., 0.7, 0.8]
unused_aefact_mach = model.add_aefact(aefact_sid, fractions, comment='aefact')
aefact_sid = lschd
fractions = [0., 15., 30., 45.]
unused_aefact_delta = model.add_aefact(aefact_sid, fractions, comment='aefact')
model.cross_reference()
csshcd2.write_card()
#csshcd1.write_card()
model.uncross_reference()
bdf_filename = StringIO()
model.write_bdf(bdf_filename, close=False)
model.safe_cross_reference()
model.validate()
save_load_deck(model)
bdf_filename.seek(0)
model2 = read_bdf(bdf_filename, punch=True, debug=False)
bdf_filename2 = StringIO()
model.write_bdf(bdf_filename2, size=16, close=False)
model2.write_bdf(bdf_filename2, size=16, close=False)
#-----------
csshcd3 = CSSCHD(sid, aesid, lschd, lalpha=None, lmach=None, comment='cssch card')
csshcd3.write_card()
with self.assertRaises(RuntimeError):
csshcd3.validate()
def test_monpnt(self):
log = SimpleLogger(level='warning')
model = BDF(log=log)
name = 'test'
label = 'test2'
axes = '123'
comp = 'WING'
xyz = [0., 0., 0.]
monpnt1 = model.add_monpnt1(name, label, axes, comp, xyz, cp=0,
cd=None, comment='monpnt1')
monpnt1.raw_fields()
monpnt1.validate()
Type = 'CQUAD4'
table = 'STRESS'
nddl_item = 42
eid = 17
monpnt2 = model.add_monpnt2(name, label, table, Type, nddl_item, eid,
comment='monpnt2')
monpnt2.raw_fields()
monpnt2.validate()
grid_set = 43
elem_set = 44
monpnt3 = model.add_monpnt3(name, label, axes, grid_set, elem_set,
xyz, cp=0, cd=None,
xflag=None, comment='monpnt3')
monpnt3.raw_fields()
monpnt3.validate()
model._verify_bdf(xref=False)
model.cross_reference()
model._verify_bdf(xref=True)
model.uncross_reference()
save_load_deck(model)
def test_bah_plane_bdf(self):
"""tests the bah_plane"""
bdf_filename = os.path.join(MODEL_PATH, 'aero', 'bah_plane', 'bah_plane.bdf')
folder = ''
run_bdf(folder, bdf_filename, debug=False, xref=True, check=True,
punch=False, mesh_form='combined',
is_folder=False, print_stats=False,
encoding=None, sum_load=True, size=8,
is_double=False, stop=False, nastran='',
post=-1, dynamic_vars=None, quiet=True,
dumplines=False, dictsort=False,
run_extract_bodies=True, nerrors=0, dev=True,
crash_cards=None, pickle_obj=True)
def test_rotord(self):
"""tests the ROTORD"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
sid = 42
rstart = 3.14
rstep = .314
numstep = 10
rids = [None]
rsets = [-31]
rcords = [10]
w3s = [13.]
w4s = [3.]
rforces = [14]
brgsets = [17, False]
rspeeds = [42.1]
rotord = model.add_rotord(
sid, rstart, rstep, numstep,
rids, rsets, rspeeds, rcords, w3s, w4s, rforces, brgsets,
refsys='ROT', cmout=0.0, runit='RPM', funit='RPM',
zstein='NO', orbeps=1.e-6, roprt=0, sync=1, etype=1,
eorder=1.0, threshold=0.02, maxiter=10, comment='rotord')
rotord.validate()
sid = 43
nids = [100, 101, 102]
rotorg = model.add_rotorg(
sid, nids, comment='rotorg'
)
rotorg.validate()
save_load_deck(model)
def test_zona_1(self):
"""zona explicit test"""
log = SimpleLogger(level='error', encoding='utf-8', log_func=None) # lots of zona errors
bdf_filename = os.path.join(MODEL_PATH, 'aero', 'f16_ma41.bdf')
model = read_bdf(bdf_filename, xref=False, debug=None, log=log)
model.safe_cross_reference()
save_load_deck(model, xref='safe',
run_renumber=False, run_convert=False, run_remove_unused=False,
run_save_load=False, run_save_load_hdf5=False, run_mass_properties=False,
run_test_bdf=False, run_op2_writer=False)
with self.assertRaises(NotImplementedError):
model.zona.convert_to_nastran()
def test_zona_2(self):
"""zona explicit test"""
log = SimpleLogger(level='error', encoding='utf-8', log_func=None) # lots of zona errors
bdf_filename = os.path.join(MODEL_PATH, 'aero', 'ztran.bdf')
model = read_bdf(bdf_filename, xref=False, debug=None, log=log)
model.safe_cross_reference()
save_load_deck(model, xref='safe',
run_renumber=False, run_convert=False, run_remove_unused=False,
run_save_load=False, run_save_load_hdf5=False, run_mass_properties=False,
run_test_bdf=False)
model.zona.convert_to_nastran()
def test_zona_3(self):
"""totally fake zona model"""
bdf_file = get_zona_model()
model = read_bdf(bdf_filename=bdf_file, validate=True, xref=True, punch=False,
skip_cards=None, read_cards=None, encoding=None,
log=None, debug=None, mode='zona')
#with self.assertRaises(AttributeError):
model.uncross_reference()
model.write_bdf('zona.bdf')
model.safe_cross_reference()
model.write_bdf('zona.bdf')
bdf_file.seek(0)
model.clear_attributes()
model2 = read_bdf('zona.bdf', debug=None)
os.remove('zona.bdf')
model2.zona.convert_to_nastran()
def get_zona_model():
bdf_file = StringIO()
bdf_file.write(
'$ pyNastran: version=zona\n'
'CEND\n'
'BEGIN BULK\n'
#'$ acsid, rcsid, cref, bref, sref, symxz, symxy\n'
#'AEROZ, 10, 0, 1., 10., 100., YES\n'
'$AEROZ ACSID XZSYM FLIP FMMUNIT FMLUNIT REFC REFB REFS\n'
'$ REFX REFY REFZ\n'
'AEROZ, 0, YES, NO, SLIN, IN, 22.73,59.394,1175.8\n'
', 59.53,0.0, 0.0\n'
'$ label, type, cid, PANLST, setg, actid\n'
'AESURFZ,FLAP, ASYM, 1, 10, 20, 0\n'
#'AESURFZ,FLAP, SYM, 1, 10, 20, 0\n'
'CORD2R, 1,0, 0.,0.,0., 0.,0.,1.,\n'
',1.,0.,0.\n'
'$BODY7,ID,LABEL,IPBODY7, ACOORD, NSEG, IDMESH1\n'
'BODY7, 1, FUSE, , 2, , 1\n'
'PANLST3,10, FUSE, \n'
'$ id,naxial,nradial, \n'
'SEGMESH,1, 4, 3, \n'
# ITYPEi = 1 (Body of Revolution):
# Xi, CAMi, YRi
# ITYPEi = 2 (Elliptical Body):
# Xi, YRi, ZRi
# ITYPEi = 3 (Arbitrary Body):
# Xi, IDYi, IDZi
'$ itype, x1, cam, yr1, zr1, idy1, idz1 \n'
', 1, , 1., 1., ,\n'
', 2, 1., , 1., 2.,\n'
', 3, 2., , , , 13, 14 \n'
', 3, 3., , , , 13, 14 \n'
# y
'AEFACT,13, 1., 0., 0.,-1.\n'
'AEFACT,14, 0., 1., -1., 0.\n'
'$ MKAEROZ, ID, MACH, METHOD, IDFLT\n'
'MKAEROZ, 101, 0.8, -1, -1, \n'
'$ TRIM, ID, MKAEROZ, Q, LABEL1, UX1, CGX, CGY,\n'
'TRIM, 100, 101, 42., ALPHA, 5., 0., 0., 0.,\n'
'$CGZ, WEIGHT, Ixx, Ixy, Iyy, Ixz, Iyz, Izz\n'
',0., 1e4, 1e3, 1e3, 1e5, 1e3, 1e3, 1e4\n'
'$TRUE/G, NX, NY, NZ, P, Q, R, \n'
', TRUE, FREE, NONE, 32., FREE, NONE, 42., \n'
'$var, value\n'
',17, 1.0,\n'
'$\n'
'TRIMVAR,17,VAR\n'
'$\n'
'$trimlnk,id,sym, ?, ?\n'
'TRIMLNK,10,SYM, -1, 17\n'
'ACOORD, 2, 0.,0.,0., 1.0,0.\n'
'$ ID, MODEL, CP, PANLST, SETG, DZ, EPS\n'
'SPLINE1,100, , , 422, 423,\n'
'$, NELEM, MELEM\n'
'$, 10, 10\n'
'PANLST3,422, FUSE, \n'
'$ id,naxial,nradial, \n'
#'$ ID, MODEL, PANLST, SETG,\n'
#'SPLINE2,1000, , 422, 423,\n'
'$ ID, MODEL, CP, PANLST, SETG,\n'
'SPLINE3,1200, , , 422, 423,\n'
'SET1,423,10\n'
'GRID,10,,0.,0.,0.\n'
'GRID,11,,1.,0.,0.\n'
'CONROD,100, 10,11, 101,1.0\n'
'MAT1,101,3.0e7,,0.3\n'
)
bdf_file.seek(0)
return bdf_file
def _setup_aero_plot(fig_id: Optional[int]=None) -> Tuple[Any, Any]:
"""helper for plotting aero panels"""
fig = None
ax = None
if IS_MATPLOTLIB:
fig = plt.figure(fig_id)
ax = fig.gca()
ax.set_ylabel('Y')
ax.set_xlabel('X')
ax.grid()
return fig, ax
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 34.638467
| 158
| 0.511828
|
b2f10df34ec3b9e4e1fba234802b5bde5cc44087
| 3,523
|
py
|
Python
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 3
|
2019-02-21T20:46:26.000Z
|
2021-06-22T15:35:52.000Z
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 15
|
2017-10-02T18:48:20.000Z
|
2022-03-03T14:03:49.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from .._deserialize import (
process_storage_error)
from .._generated.models import (
StorageErrorException,
)
from .._shared.response_handlers import return_response_headers
from .._shared.uploads_async import (
upload_data_chunks,
DataLakeFileChunkUploader)
def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument
return any([
modified_access_conditions.if_modified_since,
modified_access_conditions.if_unmodified_since,
modified_access_conditions.if_none_match,
modified_access_conditions.if_match
])
async def upload_datalake_file( # pylint: disable=unused-argument
client=None,
stream=None,
length=None,
overwrite=None,
validate_content=None,
max_concurrency=None,
**kwargs):
try:
if length == 0:
return {}
properties = kwargs.pop('properties', None)
umask = kwargs.pop('umask', None)
permissions = kwargs.pop('permissions', None)
path_http_headers = kwargs.pop('path_http_headers', None)
modified_access_conditions = kwargs.pop('modified_access_conditions', None)
if not overwrite:
# if customers didn't specify access conditions, they cannot flush data to existing file
if not _any_conditions(modified_access_conditions):
modified_access_conditions.if_none_match = '*'
if properties or umask or permissions:
raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
if overwrite:
response = await client.create(
resource='file',
path_http_headers=path_http_headers,
properties=properties,
modified_access_conditions=modified_access_conditions,
umask=umask,
permissions=permissions,
cls=return_response_headers,
**kwargs)
# this modified_access_conditions will be applied to flush_data to make sure
# no other flush between create and the current flush
modified_access_conditions.if_match = response['etag']
modified_access_conditions.if_none_match = None
modified_access_conditions.if_modified_since = None
modified_access_conditions.if_unmodified_since = None
await upload_data_chunks(
service=client,
uploader_class=DataLakeFileChunkUploader,
total_size=length,
chunk_size=100 * 1024 * 1024,
stream=stream,
max_concurrency=max_concurrency,
validate_content=validate_content,
**kwargs)
return await client.flush_data(position=length,
path_http_headers=path_http_headers,
modified_access_conditions=modified_access_conditions,
cls=return_response_headers,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
| 40.494253
| 109
| 0.619075
|
38a340975f42f7845b9377275929364265bfb0f1
| 2,642
|
py
|
Python
|
oqtans_tools/GFFtools/0.2/gtf_to_gff.py
|
vipints/oqtans
|
af8119a73f6023deca57aedb9b090f7378a1a6c6
|
[
"BSD-3-Clause"
] | 3
|
2015-11-20T19:40:29.000Z
|
2019-07-25T15:34:24.000Z
|
oqtans_tools/GFFtools/0.2/gtf_to_gff.py
|
vipints/oqtans
|
af8119a73f6023deca57aedb9b090f7378a1a6c6
|
[
"BSD-3-Clause"
] | null | null | null |
oqtans_tools/GFFtools/0.2/gtf_to_gff.py
|
vipints/oqtans
|
af8119a73f6023deca57aedb9b090f7378a1a6c6
|
[
"BSD-3-Clause"
] | 2
|
2018-09-20T08:28:36.000Z
|
2019-03-06T06:26:52.000Z
|
#!/usr/bin/env python
"""
Convert Gene Transfer Format [GTF] to Generic Feature Format Version 3 [GFF3].
Usage: python gtf_to_gff.py in.gtf > out.gff3
Requirement:
GFFParser.py: https://github.com/vipints/GFFtools-GX/blob/master/GFFParser.py
helper.py : https://github.com/vipints/GFFtools-GX/blob/master/helper.py
Copyright (C)
2009-2012 Friedrich Miescher Laboratory of the Max Planck Society, Tubingen, Germany.
2012-2014 Memorial Sloan Kettering Cancer Center New York City, USA.
"""
import re
import sys
import GFFParser
import helper
def GFFWriter(gtf_content):
"""
write the feature information to GFF format
@args gtf_content: Parsed object from gtf file
@type gtf_content: numpy array
"""
print '##gff-version 3'
for ent1 in gtf_content:
chr_name = ent1['chr']
strand = ent1['strand']
start = ent1['start']
stop = ent1['stop']
source = ent1['source']
ID = ent1['name']
Name = ent1['gene_info']['Name']
print '%s\t%s\tgene\t%d\t%d\t.\t%s\t.\tID=%s;Name=%s' % (chr_name, source, start, stop, strand, ID, Name)
for idx, tid in enumerate(ent1['transcripts']):
t_start = ent1['exons'][idx][0][0]
t_stop = ent1['exons'][idx][-1][-1]
t_type = ent1['transcript_type'][idx]
if ent1['exons'][idx].any() and ent1['cds_exons'][idx].any():
utr5_exons, utr3_exons = helper.buildUTR(ent1['cds_exons'][idx], ent1['exons'][idx], strand)
print '%s\t%s\t%s\t%d\t%d\t.\t%s\t.\tID=%s;Parent=%s' % (chr_name, source, t_type, t_start, t_stop, strand, tid[0], ID)
for ex_cod in utr5_exons:
print '%s\t%s\tfive_prime_UTR\t%d\t%d\t.\t%s\t.\tParent=%s' % (chr_name, source, ex_cod[0], ex_cod[1], strand, tid[0])
for ex_cod in ent1['cds_exons'][idx]:
print '%s\t%s\tCDS\t%d\t%d\t.\t%s\t%d\tParent=%s' % (chr_name, source, ex_cod[0], ex_cod[1], strand, ex_cod[2], tid[0])
for ex_cod in utr3_exons:
print '%s\t%s\tthree_prime_UTR\t%d\t%d\t.\t%s\t.\tParent=%s' % (chr_name, source, ex_cod[0], ex_cod[1], strand, tid[0])
for ex_cod in ent1['exons'][idx]:
print '%s\t%s\texon\t%d\t%d\t.\t%s\t.\tParent=%s' % (chr_name, source, ex_cod[0], ex_cod[1], strand, tid[0])
def __main__():
try:
gtf_fname = sys.argv[1]
except:
print __doc__
sys.exit(-1)
gtf_file_content = GFFParser.Parse(gtf_fname)
GFFWriter(gtf_file_content)
if __name__ == "__main__":
__main__()
| 32.617284
| 136
| 0.595004
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.