hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
358a6eb9c1501c587bc5bfcf16880734d69fa7cf
| 932
|
py
|
Python
|
dexter_utils/vectorpixel.py
|
rsbohn/Dexter_CircuitPython_Widgets
|
0ba1a559c64878bcdd1dba0f0481f1a9ab6b4521
|
[
"MIT"
] | null | null | null |
dexter_utils/vectorpixel.py
|
rsbohn/Dexter_CircuitPython_Widgets
|
0ba1a559c64878bcdd1dba0f0481f1a9ab6b4521
|
[
"MIT"
] | 1
|
2022-02-13T23:29:10.000Z
|
2022-02-13T23:29:10.000Z
|
dexter_utils/vectorpixel.py
|
rsbohn/Dexter_CircuitPython_Widgets
|
0ba1a559c64878bcdd1dba0f0481f1a9ab6b4521
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: Copyright (c) 2022 Randall Bohn (dexter)
#
# SPDX-License-Identifier: MIT
"""
dexter_utils.vectorpixel
VectorIO shapes that act like NeoPixels.
"""
import displayio
class VectorPixel:
"""Turn a group of Vectorio shapes into a string of NeoPixel-like objects."""
def __init__(self, group: displayio.Group):
self.shaders = [item.pixel_shader for item in group]
def fill(self, color):
"""Fill all with color"""
for item in self.shaders:
item[0] = color
def show(self):
"""I do nothing"""
def __len__(self) -> int:
return len(self.shaders)
def __getitem__(self, key: int):
return self.shaders[key][0]
def __setitem__(self, key, value):
if isinstance(key, slice):
for x, item in enumerate(value):
self.shaders[x][0] = item
else:
self.shaders[key][0] = value
| 24.526316
| 81
| 0.614807
|
f786d85dd58402ea383d4a920a6ac44d1cab724d
| 43
|
py
|
Python
|
cli/python/src/auth/version.py
|
qweio/qwe-vpn
|
99f1deb0e156d9c4a6b3696cb031f6d6f4063a58
|
[
"Apache-2.0"
] | 1
|
2021-03-23T07:28:46.000Z
|
2021-03-23T07:28:46.000Z
|
cli/python/src/auth/version.py
|
qweio/qwe-vpn
|
99f1deb0e156d9c4a6b3696cb031f6d6f4063a58
|
[
"Apache-2.0"
] | 38
|
2021-05-12T06:12:51.000Z
|
2022-03-15T20:12:18.000Z
|
cli/python/src/auth/version.py
|
zero88/qwe-vpn
|
99f1deb0e156d9c4a6b3696cb031f6d6f4063a58
|
[
"Apache-2.0"
] | null | null | null |
APP_VERSION = '0.0.1'
HASH_VERSION = 'dev'
| 14.333333
| 21
| 0.674419
|
bda3cde238bebbfe7a245da3498862d8458bb7bb
| 9,108
|
py
|
Python
|
neo/test/coretest/test_spike.py
|
tclose/python-neo
|
338d381b735a019f6be68ab7196366eed33815fe
|
[
"BSD-3-Clause"
] | null | null | null |
neo/test/coretest/test_spike.py
|
tclose/python-neo
|
338d381b735a019f6be68ab7196366eed33815fe
|
[
"BSD-3-Clause"
] | null | null | null |
neo/test/coretest/test_spike.py
|
tclose/python-neo
|
338d381b735a019f6be68ab7196366eed33815fe
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tests of the neo.core.spike.Spike class
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import quantities as pq
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.spike import Spike
from neo.core import Segment, Unit
from neo.test.tools import assert_arrays_equal, assert_neo_object_is_compliant
from neo.test.generate_datasets import (get_fake_value, get_fake_values,
fake_neo, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = dict([(str(x), TEST_ANNOTATIONS[x]) for x in
range(len(TEST_ANNOTATIONS))])
def test__get_fake_values(self):
self.annotations['seed'] = 0
time = get_fake_value('time', pq.Quantity, seed=0, dim=0)
waveform = get_fake_value('waveform', pq.Quantity, seed=1, dim=2)
left_sweep = get_fake_value('left_sweep', pq.Quantity, seed=2, dim=0)
sampling_rate = get_fake_value('sampling_rate', pq.Quantity,
seed=3, dim=0)
name = get_fake_value('name', str, seed=4, obj=Spike)
description = get_fake_value('description', str, seed=5, obj='Spike')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(Spike, annotate=False, seed=0)
res12 = get_fake_values('Spike', annotate=False, seed=0)
res21 = get_fake_values(Spike, annotate=True, seed=0)
res22 = get_fake_values('Spike', annotate=True, seed=0)
assert_arrays_equal(res11.pop('time'), time)
assert_arrays_equal(res12.pop('time'), time)
assert_arrays_equal(res21.pop('time'), time)
assert_arrays_equal(res22.pop('time'), time)
assert_arrays_equal(res11.pop('waveform'), waveform)
assert_arrays_equal(res12.pop('waveform'), waveform)
assert_arrays_equal(res21.pop('waveform'), waveform)
assert_arrays_equal(res22.pop('waveform'), waveform)
assert_arrays_equal(res11.pop('left_sweep'), left_sweep)
assert_arrays_equal(res12.pop('left_sweep'), left_sweep)
assert_arrays_equal(res21.pop('left_sweep'), left_sweep)
assert_arrays_equal(res22.pop('left_sweep'), left_sweep)
assert_arrays_equal(res11.pop('sampling_rate'), sampling_rate)
assert_arrays_equal(res12.pop('sampling_rate'), sampling_rate)
assert_arrays_equal(res21.pop('sampling_rate'), sampling_rate)
assert_arrays_equal(res22.pop('sampling_rate'), sampling_rate)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = Spike
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Spike))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'Spike'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Spike))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
class TestSpike(unittest.TestCase):
def setUp(self):
params = {'test2': 'y1', 'test3': True}
self.sampling_rate1 = .1*pq.Hz
self.left_sweep1 = 2.*pq.s
self.spike1 = Spike(1.5*pq.ms, waveform=[[1.1, 1.5, 1.7],
[2.2, 2.6, 2.8]]*pq.mV,
sampling_rate=self.sampling_rate1,
left_sweep=self.left_sweep1,
name='test', description='tester',
file_origin='test.file',
test1=1, **params)
self.spike1.annotate(test1=1.1, test0=[1, 2])
def test_spike_creation(self):
assert_neo_object_is_compliant(self.spike1)
self.assertEqual(self.spike1.time, 1.5*pq.ms)
assert_arrays_equal(self.spike1.waveform, [[1.1, 1.5, 1.7],
[2.2, 2.6, 2.8]]*pq.mV)
self.assertEqual(self.spike1.sampling_rate, .1*pq.Hz)
self.assertEqual(self.spike1.left_sweep, 2.*pq.s)
self.assertEqual(self.spike1.description, 'tester')
self.assertEqual(self.spike1.file_origin, 'test.file')
self.assertEqual(self.spike1.annotations['test0'], [1, 2])
self.assertEqual(self.spike1.annotations['test1'], 1.1)
self.assertEqual(self.spike1.annotations['test2'], 'y1')
self.assertTrue(self.spike1.annotations['test3'])
def test__duration(self):
result1 = self.spike1.duration
self.spike1.sampling_rate = None
assert_neo_object_is_compliant(self.spike1)
result2 = self.spike1.duration
self.spike1.sampling_rate = self.sampling_rate1
self.spike1.waveform = None
assert_neo_object_is_compliant(self.spike1)
result3 = self.spike1.duration
self.assertEqual(result1, 30./pq.Hz)
self.assertEqual(result1.units, 1./pq.Hz)
self.assertEqual(result2, None)
self.assertEqual(result3, None)
def test__sampling_period(self):
result1 = self.spike1.sampling_period
self.spike1.sampling_rate = None
assert_neo_object_is_compliant(self.spike1)
result2 = self.spike1.sampling_period
self.spike1.sampling_rate = self.sampling_rate1
self.spike1.sampling_period = 10.*pq.ms
assert_neo_object_is_compliant(self.spike1)
result3a = self.spike1.sampling_period
result3b = self.spike1.sampling_rate
self.spike1.sampling_period = None
result4a = self.spike1.sampling_period
result4b = self.spike1.sampling_rate
self.assertEqual(result1, 10./pq.Hz)
self.assertEqual(result1.units, 1./pq.Hz)
self.assertEqual(result2, None)
self.assertEqual(result3a, 10.*pq.ms)
self.assertEqual(result3a.units, 1.*pq.ms)
self.assertEqual(result3b, .1/pq.ms)
self.assertEqual(result3b.units, 1./pq.ms)
self.assertEqual(result4a, None)
self.assertEqual(result4b, None)
def test__right_sweep(self):
result1 = self.spike1.right_sweep
self.spike1.left_sweep = None
assert_neo_object_is_compliant(self.spike1)
result2 = self.spike1.right_sweep
self.spike1.left_sweep = self.left_sweep1
self.spike1.sampling_rate = None
assert_neo_object_is_compliant(self.spike1)
result3 = self.spike1.right_sweep
self.spike1.sampling_rate = self.sampling_rate1
self.spike1.waveform = None
assert_neo_object_is_compliant(self.spike1)
result4 = self.spike1.right_sweep
self.assertEqual(result1, 32.*pq.s)
self.assertEqual(result1.units, 1.*pq.s)
self.assertEqual(result2, None)
self.assertEqual(result3, None)
self.assertEqual(result4, None)
def test__children(self):
segment = Segment(name='seg1')
segment.spikes = [self.spike1]
segment.create_many_to_one_relationship()
unit = Unit(name='unit1')
unit.spikes = [self.spike1]
unit.create_many_to_one_relationship()
self.assertEqual(self.spike1._single_parent_objects,
('Segment', 'Unit'))
self.assertEqual(self.spike1._multi_parent_objects, ())
self.assertEqual(self.spike1._single_parent_containers,
('segment', 'unit'))
self.assertEqual(self.spike1._multi_parent_containers, ())
self.assertEqual(self.spike1._parent_objects,
('Segment', 'Unit'))
self.assertEqual(self.spike1._parent_containers,
('segment', 'unit'))
self.assertEqual(len(self.spike1.parents), 2)
self.assertEqual(self.spike1.parents[0].name, 'seg1')
self.assertEqual(self.spike1.parents[1].name, 'unit1')
assert_neo_object_is_compliant(self.spike1)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
ann = {'targ0': self.spike1.annotations['test0']}
self.spike1.annotations = ann
res = pretty(self.spike1)
targ = ("Spike " +
"name: '%s' description: '%s' annotations: %s" %
(self.spike1.name, self.spike1.description, ann))
self.assertEqual(res, targ)
if __name__ == "__main__":
unittest.main()
| 38.268908
| 78
| 0.639438
|
c034c3234606ed7aa1cd3c2989d1e9f55ec11045
| 6,104
|
py
|
Python
|
pygame_menu/examples/other/scrollbar_area.py
|
apuly/pygame-menu
|
77bf8f2c8913de5a24674ee0d0d2c7c9b816a58b
|
[
"MIT"
] | null | null | null |
pygame_menu/examples/other/scrollbar_area.py
|
apuly/pygame-menu
|
77bf8f2c8913de5a24674ee0d0d2c7c9b816a58b
|
[
"MIT"
] | null | null | null |
pygame_menu/examples/other/scrollbar_area.py
|
apuly/pygame-menu
|
77bf8f2c8913de5a24674ee0d0d2c7c9b816a58b
|
[
"MIT"
] | null | null | null |
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - SCROLL AREA
Shows ScrollArea widget usage.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = ['main']
import pygame
from pygame_menu import locals
from pygame_menu.examples import create_example_window
from pygame_menu.scrollarea import ScrollArea
from pygame_menu.utils import make_surface
import itertools
from typing import Generator
FPS = 30
W_SIZE = 800 # Width of window size
H_SIZE = 600 # Height of window size
COLOR_BACKGROUND = (128, 230, 198)
LEGEND = 'Area {}x{}\nWorld {}x{}\nPress [ESC] to change'
WORLDS = {
'1': {'pos': (0, 0),
'win': (W_SIZE, H_SIZE),
'size': (W_SIZE * 2, H_SIZE * 3)},
'2': {'pos': (200, 100),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (W_SIZE * 2, H_SIZE * 3)},
'3': {'pos': (50, 250),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (200, 200)},
'4': {'pos': (350, 250),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (W_SIZE // 2, H_SIZE // 2)},
'5': {'pos': (200, 200),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (W_SIZE // 2, H_SIZE // 2 + 10)},
'6': {'pos': (10, 10),
'win': (W_SIZE - 300, H_SIZE // 2),
'size': (W_SIZE - 200, H_SIZE // 2 - 10)}
}
def make_world(width: int, height: int, text: str = '') -> 'pygame.Surface':
"""
Create a test surface.
:param width: Width in pixels
:param height: Height in pixels
:param text: Text to write
:return: World surface
"""
world = make_surface(width, height)
world.fill((210, 210, 210))
font = pygame.font.SysFont('arial', 20)
posy = 60
for line in text.splitlines():
text = font.render(str(line), True, (0, 0, 0))
world.blit(text, (60, posy))
posy += text.get_height() + 10
for x in range(0, width, 10):
if x % 100 == 0 and x != 0:
pygame.draw.line(world, (255, 0, 0), (x, 0), (x, 20))
pygame.draw.line(world, (180, 180, 180), (x, 80), (x, height))
tick = font.render(str(x), True, (255, 0, 0))
world.blit(tick, (x - tick.get_width() / 2, 25))
else:
pygame.draw.line(world, (255, 0, 0), (x, 0), (x, 10))
for y in range(0, height, 10):
if y % 100 == 0 and y != 0:
pygame.draw.line(world, (255, 0, 0), (0, y), (20, y))
pygame.draw.line(world, (180, 180, 180), (80, y), (width, y))
tick = font.render(str(y), True, (255, 0, 0))
world.blit(tick, (25, y - tick.get_height() / 2))
else:
pygame.draw.line(world, (255, 0, 0), (0, y), (10, y))
return world
# noinspection PyProtectedMember
def iter_world(area: 'ScrollArea') -> Generator:
"""
Iterate through worlds.
:param area: Scroll area
:return: None
"""
for name in itertools.cycle(WORLDS):
params = WORLDS[name]
area._rect.width = params['win'][0]
area._rect.height = params['win'][1]
text = LEGEND.format(params['win'][0], params['win'][1],
params['size'][0], params['size'][1])
area.set_world(make_world(params['size'][0],
params['size'][1],
text))
area.set_position(*params['pos'])
yield params
def main(test: bool = False) -> None:
"""
Main function.
:param test: Indicate function is being tested
:return: None
"""
screen = create_example_window('Example - Scrolling Area', (W_SIZE, H_SIZE))
clock = pygame.time.Clock()
area = ScrollArea(
W_SIZE, H_SIZE,
scrollbars=(
locals.POSITION_SOUTH,
locals.POSITION_EAST,
locals.POSITION_WEST,
locals.POSITION_NORTH
)
)
worlds = iter_world(area)
next(worlds)
# -------------------------------------------------------------------------
# Main loop
# -------------------------------------------------------------------------
while True:
# Tick
clock.tick(FPS)
# Paint background
screen.fill(COLOR_BACKGROUND)
pygame.draw.rect(
screen,
(20, 89, 20),
area.get_rect().inflate(20, 20) # Inflate to see area overflow in case of bug
)
# Application events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
exit(0)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
next(worlds)
area.update(events)
area.draw(screen)
# Update surface
pygame.display.flip()
# At first loop returns
if test:
break
if __name__ == '__main__':
main()
| 31.626943
| 90
| 0.549803
|
2ff2d706eadd273c7647a30c057650d9efd85fef
| 347
|
py
|
Python
|
tests/test_circuit/test_inheritance.py
|
leonardt/magma
|
d3e8c9500ec3b167df8ed067e0c0305781c94ab6
|
[
"MIT"
] | 167
|
2017-10-08T00:59:22.000Z
|
2022-02-08T00:14:39.000Z
|
tests/test_circuit/test_inheritance.py
|
leonardt/magma
|
d3e8c9500ec3b167df8ed067e0c0305781c94ab6
|
[
"MIT"
] | 719
|
2017-08-29T17:58:28.000Z
|
2022-03-31T23:39:18.000Z
|
tests/test_circuit/test_inheritance.py
|
leonardt/magma
|
d3e8c9500ec3b167df8ed067e0c0305781c94ab6
|
[
"MIT"
] | 14
|
2017-09-01T03:25:16.000Z
|
2021-11-05T13:30:24.000Z
|
import magma as m
def test_circuit_base_attr():
class _FooInterface(m.Circuit):
_circuit_base_ = True
class _FooBase(_FooInterface):
pass
class _FooImpl(_FooBase):
pass
assert _FooInterface._circuit_base_ == True
assert _FooBase._circuit_base_ == False
assert _FooImpl._circuit_base_ == False
| 19.277778
| 47
| 0.694524
|
847f8f63f9dd16feeb616b239d18047ffe30dd4c
| 2,750
|
py
|
Python
|
keithleygui/utils/led_indicator_widget.py
|
quaeritis/keithleygui
|
f5552aff6ed69a4fd2932c298bba662e9a21a96d
|
[
"MIT"
] | null | null | null |
keithleygui/utils/led_indicator_widget.py
|
quaeritis/keithleygui
|
f5552aff6ed69a4fd2932c298bba662e9a21a96d
|
[
"MIT"
] | null | null | null |
keithleygui/utils/led_indicator_widget.py
|
quaeritis/keithleygui
|
f5552aff6ed69a4fd2932c298bba662e9a21a96d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © keithleygui Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
from __future__ import division, absolute_import, print_function
from qtpy import QtGui, QtCore, QtWidgets
class LedIndicator(QtWidgets.QAbstractButton):
scaledSize = 1000.0
def __init__(self, parent=None):
QtWidgets.QAbstractButton.__init__(self, parent)
self.setMinimumSize(12, 12)
self.setCheckable(True)
self.setDisabled(True) # Make the led non clickable
self._checked = False
# Green
self.on_color_1 = QtGui.QColor(0, 255, 0)
self.on_color_2 = QtGui.QColor(0, 192, 0)
# Red
self.off_color_1 = QtGui.QColor(255, 0, 0)
self.off_color_2 = QtGui.QColor(176, 0, 0)
def resizeEvent(self, QResizeEvent):
self.update()
def paintEvent(self, QPaintEvent):
real_size = min(self.width(), self.height())
painter = QtGui.QPainter(self)
pen = QtGui.QPen(QtCore.Qt.black)
pen.setWidth(1)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.translate(self.width()/2, self.height()/2)
painter.scale(real_size/self.scaledSize, real_size/self.scaledSize)
gradient = QtGui.QRadialGradient(QtCore.QPointF(-500, -500), 1500,
QtCore.QPointF(-500, -500))
gradient.setColorAt(0, QtGui.QColor(224, 224, 224))
gradient.setColorAt(1, QtGui.QColor(28, 28, 28))
painter.setPen(pen)
painter.setBrush(QtGui.QBrush(gradient))
painter.drawEllipse(QtCore.QPointF(0, 0), 500, 500)
gradient = QtGui.QRadialGradient(QtCore.QPointF(500, 500), 1500,
QtCore.QPointF(500, 500))
gradient.setColorAt(0, QtGui.QColor(224, 224, 224))
gradient.setColorAt(1, QtGui.QColor(28, 28, 28))
painter.setPen(pen)
painter.setBrush(QtGui.QBrush(gradient))
painter.drawEllipse(QtCore.QPointF(0, 0), 450, 450)
painter.setPen(pen)
if self.isChecked():
gradient = QtGui.QRadialGradient(QtCore.QPointF(-500, -500), 1500,
QtCore.QPointF(-500, -500))
gradient.setColorAt(0, self.on_color_1)
gradient.setColorAt(1, self.on_color_2)
else:
gradient = QtGui.QRadialGradient(QtCore.QPointF(500, 500), 1500,
QtCore.QPointF(500, 500))
gradient.setColorAt(0, self.off_color_1)
gradient.setColorAt(1, self.off_color_2)
painter.setBrush(gradient)
painter.drawEllipse(QtCore.QPointF(0, 0), 400, 400)
| 37.671233
| 78
| 0.614909
|
2c309c24ebbc0604018a414b42b652cba5e729a4
| 36,103
|
py
|
Python
|
green/test/test_result.py
|
jwaschkau/green
|
96409beaf27f2d132def1fee4d568ba042b91769
|
[
"MIT"
] | null | null | null |
green/test/test_result.py
|
jwaschkau/green
|
96409beaf27f2d132def1fee4d568ba042b91769
|
[
"MIT"
] | null | null | null |
green/test/test_result.py
|
jwaschkau/green
|
96409beaf27f2d132def1fee4d568ba042b91769
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from __future__ import unicode_literals
import copy
# `from doctest import DocTestCase` causes crashes, since the DocTestCase is
# detected as a TestCase subclass and unittest.TestLoader.loadTestsFromModule()
# called from GreenTestLoader.loadTestsFromModule() thinks it is a definition
# of a test to actually try to run, and causes very weird crashes.
import doctest
from io import StringIO
import sys
import os
import unittest
import tempfile
from green.config import default_args
from green.output import Colors, GreenStream
from green.result import (
GreenTestResult,
proto_test,
ProtoTest,
proto_error,
ProtoTestResult,
BaseTestResult,
)
try:
from unittest.mock import MagicMock, patch
except ImportError:
from mock import MagicMock, patch
from coverage import coverage, CoverageException
class MyProtoTest(ProtoTest, object):
"""
For quickly making a ProtoTest
"""
def __init__(self):
super(MyProtoTest, self).__init__()
self.module = "my_module"
self.class_name = "MyClass"
self.method_name = "myMethod"
self.docstr_part = "My docstring"
self.subtest_part = ""
class TestBaseTestResult(unittest.TestCase):
def test_stdoutOutput(self):
"""
recordStdout records output.
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
o = "some output"
btr.recordStdout(pt, o)
self.assertEqual(btr.stdout_output[pt], o)
def test_stdoutNoOutput(self):
"""
recordStdout ignores empty output sent to it
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
btr.recordStdout(pt, "")
self.assertEqual(btr.stdout_output, {})
def test_displayStdout(self):
"""
displayStdout displays captured stdout
"""
stream = StringIO()
noise = "blah blah blah"
btr = BaseTestResult(stream, Colors(False))
pt = ProtoTest()
btr.stdout_output[pt] = noise
btr.displayStdout(pt)
self.assertIn(noise, stream.getvalue())
def test_stderrErrput(self):
"""
recordStderr records errput.
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
o = "some errput"
btr.recordStderr(pt, o)
self.assertEqual(btr.stderr_errput[pt], o)
def test_stderrNoErrput(self):
"""
recordStderr ignores empty errput sent to it
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
btr.recordStderr(pt, "")
self.assertEqual(btr.stderr_errput, {})
def test_displayStderr(self):
"""
displayStderr displays captured stderr
"""
stream = StringIO()
noise = "blah blah blah"
btr = BaseTestResult(stream, Colors(False))
pt = ProtoTest()
btr.stderr_errput[pt] = noise
btr.displayStderr(pt)
self.assertIn(noise, stream.getvalue())
class TestProtoTestResult(unittest.TestCase):
def test_addSuccess(self):
"""
addSuccess adds a test correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
ptr.addSuccess(test)
self.assertEqual(test, ptr.passing[0])
def test_addError(self):
"""
addError adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addError(test, err)
self.assertEqual(test, ptr.errors[0][0])
self.assertEqual(err, ptr.errors[0][1])
def test_addFailure(self):
"""
addFailure adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addFailure(test, err)
self.assertEqual(test, ptr.failures[0][0])
self.assertEqual(err, ptr.failures[0][1])
def test_addSkip(self):
"""
addSkip adds a test and reason correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
reason = "some plausible reason"
ptr.addSkip(test, reason)
self.assertEqual(test, ptr.skipped[0][0])
self.assertEqual(reason, ptr.skipped[0][1])
def test_addExpectedFailure(self):
"""
addExpectedFailure adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addExpectedFailure(test, err)
self.assertEqual(test, ptr.expectedFailures[0][0])
self.assertEqual(err, ptr.expectedFailures[0][1])
def test_addUnexpectedSuccess(self):
"""
addUnexpectedSuccess adds a test correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
ptr.addUnexpectedSuccess(test)
self.assertEqual(test, ptr.unexpectedSuccesses[0])
@patch("green.result.ProtoTestResult.addError")
@patch("green.result.ProtoTestResult.addFailure")
def test_addSubTest_failure(self, mock_addFailure, mock_addError):
"""
addSubTest calls over to addFailure for failures
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
test.failureException = Exception
subtest = MagicMock()
err = [Exception]
ptr.addSubTest(test, subtest, err)
mock_addFailure.assert_called_with(subtest, err)
@patch("green.result.ProtoTestResult.addError")
@patch("green.result.ProtoTestResult.addFailure")
def test_addSubTest_error(self, mock_addFailure, mock_addError):
"""
addSubTest calls over to addError for errors
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
test.failureException = KeyError
subtest = MagicMock()
err = [Exception]
ptr.addSubTest(test, subtest, err)
mock_addError.assert_called_with(subtest, err)
class TestProtoError(unittest.TestCase):
def test_str(self):
"""
Running a ProtoError through str() should result in a traceback string
"""
test_str = "noetuaoe"
try:
raise Exception(test_str)
except:
err = sys.exc_info()
pe = proto_error(err)
self.assertIn(test_str, str(pe))
class TestProtoTest(unittest.TestCase):
def test_ProtoTestBlank(self):
"""
ProtoTest can be instantiated empty
"""
pt = ProtoTest()
for i in ["module", "class_name", "docstr_part", "method_name"]:
self.assertEqual("", getattr(pt, i, None))
def test_str(self):
"""
Running a ProtoTest through str() is the same as getting .dotted_name
"""
pt = ProtoTest()
pt.module = "aoeusnth"
self.assertEqual(str(pt), pt.dotted_name)
def test_ProtoTestFromTest(self):
"""
Passing a test into ProtoTest copies out the relevant info.
"""
module = "green.test.test_result"
class_name = "Small"
docstr_part = "stuff"
method_name = "test_method"
class Small(unittest.TestCase):
def test_method(self):
"stuff"
pt = ProtoTest(Small("test_method"))
for i in ["module", "class_name", "docstr_part", "method_name"]:
self.assertEqual(locals()[i], getattr(pt, i, None))
def test_getDescription(self):
"""
getDescription() returns what we expect for all verbose levels
"""
# With a docstring
class Fruit(unittest.TestCase):
def test_stuff(self):
"apple"
pass
t = proto_test(Fruit("test_stuff"))
self.assertEqual(t.getDescription(1), "")
self.assertEqual(t.getDescription(2), "test_stuff")
self.assertEqual(t.getDescription(3), "apple")
self.assertEqual(t.getDescription(4), "apple")
# Without a docstring
class Vegetable(unittest.TestCase):
def test_stuff(self):
pass
t = proto_test(Vegetable("test_stuff"))
self.assertEqual(t.getDescription(1), "")
self.assertEqual(t.getDescription(2), "test_stuff")
self.assertEqual(t.getDescription(3), "test_stuff")
self.assertEqual(t.getDescription(4), "test_stuff")
def test_newlineDocstring(self):
"""
Docstrings starting with a newline are properly handled.
"""
class MyTests(unittest.TestCase):
def test_stuff(self):
"""
tricky
"""
pass
test = proto_test(MyTests("test_stuff"))
self.assertIn("tricky", test.getDescription(3))
def test_multilineDocstring(self):
"""
The description includes all of docstring until the first blank line.
"""
class LongDocs(unittest.TestCase):
def test_long(self):
"""First line is
tricky!
garbage
"""
pass
test = proto_test(LongDocs("test_long"))
self.assertIn("tricky", test.getDescription(3))
self.assertNotIn("garbage", test.getDescription(3))
def test_doctest(self):
"""
If we parse a doctest, we get all the fields we need.
"""
test = """
>>> f()
42
"""
def f():
return 42
parser = doctest.DocTestParser()
dt = parser.get_doctest(test, {"f": f}, "doctest.name", "somefile.py", 20)
dt.__module__ = "somefile"
p = proto_test(doctest.DocTestCase(dt))
# short description
self.assertEqual(p.getDescription(2), "doctest.name")
# long description
description = p.getDescription(3)
self.assertIn("doctest.name", description)
self.assertIn("somefile.py", description)
self.assertIn("20", description)
# dotted name
self.assertEqual(p.dotted_name, "doctest.name")
def test_class_or_module_failure(self):
"""
If we parse an error from a class or module failure, we get the correct result.
"""
p = ProtoTest()
p.is_class_or_module_teardown_error = True
p.name = "the thing"
self.assertEqual(p.getDescription(1), "the thing")
self.assertEqual(p.getDescription(2), "the thing")
self.assertEqual(p.getDescription(3), "the thing")
class TestGreenTestResult(unittest.TestCase):
def setUp(self):
self.args = copy.deepcopy(default_args)
self.stream = StringIO()
def tearDown(self):
del self.stream
del self.args
@patch("green.result.GreenTestResult.printErrors")
def test_stopTestRun(self, mock_printErrors):
"""
We ignore coverage's error about not having anything to cover.
"""
self.args.cov = MagicMock()
self.args.cov.stop = MagicMock(
side_effect=CoverageException("Different Exception")
)
self.args.run_coverage = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
self.assertRaises(CoverageException, gtr.stopTestRun)
self.args.cov.stop = MagicMock(
side_effect=CoverageException("No data to report")
)
def test_tryRecordingStdoutStderr(self):
"""
Recording stdout and stderr works correctly.
"""
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.recordStdout = MagicMock()
gtr.recordStderr = MagicMock()
output = "apple"
test1 = MagicMock()
ptr1 = MagicMock()
ptr1.stdout_output = {test1: output}
ptr1.stderr_errput = {}
errput = "banana"
test2 = MagicMock()
ptr2 = MagicMock()
ptr2.stdout_output = {}
ptr2.stderr_errput = {test2: errput}
gtr.tryRecordingStdoutStderr(test1, ptr1)
gtr.recordStdout.assert_called_with(test1, output)
gtr.tryRecordingStdoutStderr(test2, ptr2)
gtr.recordStderr.assert_called_with(test2, errput)
def test_failfastAddError(self):
"""
addError triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
try:
raise Exception
except:
err = sys.exc_info()
self.assertEqual(gtr.shouldStop, False)
gtr.addError(MyProtoTest(), proto_error(err))
self.assertEqual(gtr.shouldStop, True)
def test_failfastAddFailure(self):
"""
addFailure triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
try:
raise Exception
except:
err = sys.exc_info()
self.assertEqual(gtr.shouldStop, False)
gtr.addFailure(MyProtoTest(), proto_error(err))
self.assertEqual(gtr.shouldStop, True)
def test_failfastAddUnexpectedSuccess(self):
"""
addUnexpectedSuccess no longer triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
self.assertEqual(gtr.shouldStop, False)
gtr.addUnexpectedSuccess(MyProtoTest())
self.assertEqual(gtr.shouldStop, False)
def _outputFromVerboseTest(self):
"""
Start a test with verbose = 2 and get its output.
"""
class FakeCase(unittest.TestCase):
def runTest(self):
pass
self.args.verbose = 2
gtr = GreenTestResult(self.args, GreenStream(self.stream))
tc = FakeCase()
gtr.startTest(tc)
output = self.stream.getvalue()
return output.split("\n")
def test_startTestVerboseTerminal(self):
"""
startTest() contains output we expect in verbose mode on a terminal
"""
self.stream.isatty = lambda: True
output_lines = self._outputFromVerboseTest()
# Output should look like (I'm not putting the termcolor formatting
# here)
# green.test.test_runner
# FakeCase
# test_it
self.assertEqual(len(output_lines), 3)
self.assertNotIn(" ", output_lines[0])
self.assertIn(" ", output_lines[1])
self.assertIn(" ", output_lines[2])
def test_startTestVerbosePipe(self):
"""
startTest() contains output we expect in verbose mode on a pipe
"""
self.stream.isatty = lambda: False
output_lines = self._outputFromVerboseTest()
# Output should look like (I'm not putting the termcolor formatting
# here)
# green.test.test_runner
# FakeCase
# test_it
self.assertEqual(len(output_lines), 3)
self.assertNotIn(" ", output_lines[0])
self.assertIn(" ", output_lines[1])
# No carriage return or extra lines printed
self.assertIn("", output_lines[2])
def test_reportOutcome(self):
"""
_reportOutcome contains output we expect.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr._reportOutcome(None, ".", lambda x: x)
self.assertIn(".", self.stream.getvalue())
@patch("green.result.proto_test")
def test_reportOutcomeCursorUp(self, mock_proto_test):
"""
_reportOutcome moves the cursor up when it needs to.
"""
mockProtoTest = MagicMock()
mockProtoTest.getDescription.return_value = "a description"
mock_proto_test.return_value = mockProtoTest
self.args.verbose = 2
def isatty():
return True
gs = GreenStream(self.stream)
gs.isatty = isatty
gtr = GreenTestResult(self.args, gs)
r = "a fake reason"
t = MagicMock()
t.__str__.return_value = "x" * 1000
gtr._reportOutcome(t, ".", lambda x: x, None, r)
self.assertIn(r, self.stream.getvalue())
self.assertLess(len(self.stream.getvalue()), 2000)
@patch("green.result.proto_test")
def test_reportOutcomeVerbose(self, mock_proto_test):
"""
_reportOutcome contains output we expect in verbose mode.
"""
mockProtoTest = MagicMock()
mockProtoTest.getDescription.return_value = "a description"
mock_proto_test.return_value = mockProtoTest
self.args.verbose = 2
def isatty():
return True
gs = GreenStream(self.stream)
gs.isatty = isatty
gtr = GreenTestResult(self.args, gs)
r = "a fake reason"
t = MagicMock()
t.__str__.return_value = "junk"
gtr._reportOutcome(t, ".", lambda x: x, None, r)
self.assertIn(r, self.stream.getvalue())
def test_printErrorsSkipreport(self):
"""
printErrors() prints the skip report.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
reason = "dog ate homework"
gtr.addSkip(pt, reason)
gtr.printErrors()
self.assertIn(reason, self.stream.getvalue())
def test_printErrorsStdout(self):
"""
printErrors() prints out the captured stdout.
"""
self.args.verbose = 1
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertIn(output, self.stream.getvalue())
def test_printErrorsStdoutQuietStdoutOnSuccess(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests.
"""
self.args.quiet_stdout = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should not spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertNotIn(output, self.stream.getvalue())
def test_printErrorsStdoutQuietStdoutOnError(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests, but here we are on a
failing test.
"""
self.args.quiet_stdout = True
try:
raise Exception
except:
err = sys.exc_info()
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addError(pt, proto_error(err))
gtr.printErrors()
self.assertIn(output, self.stream.getvalue())
def test_printErrorsStderrQuietStdoutOnSuccess(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests.
"""
self.args.quiet_stdout = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should not spit out to stdout"
gtr.recordStderr(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertNotIn(output, self.stream.getvalue())
def test_printErrorsNoTracebacks(self):
"""
printErrors() omits tracebacks for failures and errors when
no_tracebacks is True
"""
self.args.no_tracebacks = True
try:
raise Exception
except:
err = sys.exc_info()
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
gtr.addError(pt, proto_error(err))
gtr.printErrors()
self.assertNotIn("Exception", self.stream.getvalue())
def test_printErrorsDots(self):
"""
printErrors() looks correct in verbose=1 (dots) mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 1
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsDots", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose2(self):
"""
printErrors() looks correct in verbose=2 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 2
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose2", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose3(self):
"""
printErrors() looks correct in verbose=3 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 3
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose3", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose4(self):
"""
printErrors() looks correct in verbose=4 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 4
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), err)
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("(most recent call last)", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose4", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrors_Py2Unicode(self):
"""
printErrors() doesn't crash in Python 2 when tracebacks contain unicode
"""
try:
raise Exception("Das Böse ist immer und überall")
except:
err = sys.exc_info()
self.args.verbose = 1
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors() # We shouldn't hit an exception here
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
self.assertIn("Böse", self.stream.getvalue())
def test_addProtoTestResult(self):
"""
addProtoTestResult adds the correct things to the correct places.
"""
ptr = ProtoTestResult()
err_t = proto_test(MagicMock())
try:
raise Exception
except:
err_e = proto_error(sys.exc_info())
ptr.addError(err_t, err_e)
ef_t = proto_test(MagicMock())
try:
raise Exception
except:
ef_e = proto_error(sys.exc_info())
ptr.addExpectedFailure(ef_t, ef_e)
fail_t = proto_test(MagicMock())
try:
raise Exception
except:
fail_e = proto_error(sys.exc_info())
ptr.addFailure(fail_t, fail_e)
pass_t = proto_test(MagicMock())
ptr.addSuccess(pass_t)
skip_t = proto_test(MagicMock())
skip_r = proto_test(MagicMock())
ptr.addSkip(skip_t, skip_r)
us_t = proto_test(MagicMock())
ptr.addUnexpectedSuccess(us_t)
self.args.verbose = 0
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addProtoTestResult(ptr)
self.assertEqual(gtr.errors, [(err_t, err_e)])
self.assertEqual(gtr.expectedFailures, [(ef_t, ef_e)])
self.assertEqual(gtr.failures, [(fail_t, fail_e)])
self.assertEqual(gtr.passing, [pass_t])
self.assertEqual(gtr.skipped, [(skip_t, skip_r)])
self.assertEqual(gtr.unexpectedSuccesses, [us_t])
def test_stopTestRun_processes_message(self):
"""
StopTestRun adds number of processes used to summary
"""
self.args.processes = 4
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
gtr.stopTestRun()
self.assertIn("using 4 processes\n", self.stream.getvalue())
def test_stopTestRun_singular_process_message(self):
"""
StopTestRun adds correct summary when one process is used
"""
self.args.processes = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
gtr.stopTestRun()
self.assertIn("using 1 process\n", self.stream.getvalue())
class TestGreenTestResultAdds(unittest.TestCase):
def setUp(self):
self.stream = StringIO()
self.args = copy.deepcopy(default_args)
self.args.verbose = 0
self.gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.gtr._reportOutcome = MagicMock()
def tearDown(self):
del self.stream
del self.gtr
def test_addSuccess(self):
"""
addSuccess() makes the correct calls to other functions.
"""
test = MagicMock()
test.shortDescription.return_value = "a"
test.__str__.return_value = "b"
test = proto_test(test)
self.gtr.addSuccess(test)
self.gtr._reportOutcome.assert_called_with(test, ".", self.gtr.colors.passing)
def test_addSuccess_with_test_time(self):
"""
addSuccess() sets test time to correct value
"""
test = MagicMock()
test.shortDescription.return_value = "a"
test.__str__.return_value = "b"
test = proto_test(test)
self.gtr.addSuccess(test, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addError(self):
"""
addError() makes the correct calls to other functions.
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addError(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "E", self.gtr.colors.error, err
)
def test_addError_with_test_time(self):
"""
addError() sets test time to correct value
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addError(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addFailure(self):
"""
addFailure() makes the correct calls to other functions.
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "F", self.gtr.colors.failing, err
)
def test_addFailure_with_test_time(self):
"""
addFailure() makes test time the correct value
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addFailure(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addFailureTwistedSkip(self):
"""
Twisted's practice of calling addFailure() with their skips is detected
and redirected to addSkip()
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
reason = "Twisted is odd"
err = proto_error(err)
err.traceback_lines = ["UnsupportedTrialFeature: ('skip', '{}')".format(reason)]
self.gtr.addFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "s", self.gtr.colors.skipped, reason=reason
)
def test_addSkip(self):
"""
addSkip() makes the correct calls to other functions.
"""
test = proto_test(MagicMock())
reason = "skip reason"
self.gtr.addSkip(test, reason)
self.gtr._reportOutcome.assert_called_with(
test, "s", self.gtr.colors.skipped, reason=reason
)
def test_addSkip_with_test_time(self):
"""
addSkip() makes test time the correct value
"""
test = proto_test(MagicMock())
reason = "skip reason"
self.gtr.addSkip(test, reason, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addExpectedFailure(self):
"""
addExpectedFailure() makes the correct calls to other functions.
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addExpectedFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "x", self.gtr.colors.expectedFailure, err
)
def test_addExcepectedFailure_with_test_time(self):
"""
addExpectedFailure() makes test time correct value
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addExpectedFailure(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addUnexpectedSuccess(self):
"""
addUnexpectedSuccess() makes the correct calls to other functions.
"""
test = proto_test(MagicMock())
self.gtr.addUnexpectedSuccess(test)
self.gtr._reportOutcome.assert_called_with(
test, "u", self.gtr.colors.unexpectedSuccess
)
def test_addUnexpectedSuccess_with_test_time(self):
"""
addUnexpectedSuccess() makes test time with correct value
"""
test = proto_test(MagicMock())
self.gtr.addUnexpectedSuccess(test, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_wasSuccessful(self):
"""
wasSuccessful returns what we expect.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.wasSuccessful(), False)
gtr.passing.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
gtr.all_errors.append("anything")
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_expectedFailures(self):
"""
wasSuccessful returns what we expect when we only have expectedFailures
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.expectedFailures.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_passing(self):
"""
wasSuccessful returns what we expect when we only have passing tests
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.passing.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_skipped(self):
"""
wasSuccessful returns what we expect when we only have skipped tests
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.skipped.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_unexpectedSuccesses(self):
"""
wasSuccessful returns what we expect when we only have unexpectedSuccesses
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.unexpectedSuccesses.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_coverageFails(self):
"""
wasSuccessful fails if minimum coverage is not met
"""
self.args.minimum_coverage = 50
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.coverage_percent = 49
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_coverageSucceeds(self):
"""
wasSuccessful succeds if minimum coverage is met
"""
self.args.minimum_coverage = 50
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.passing.append("anything")
gtr.coverage_percent = 60
self.assertEqual(gtr.wasSuccessful(), True)
class TestGreenTestRunCoverage(unittest.TestCase):
def setUp(self):
self.args = copy.deepcopy(default_args)
cov_file = tempfile.NamedTemporaryFile(delete=False)
cov_file.close()
self.args.cov = coverage(
data_file=cov_file.name,
omit=self.args.omit_patterns,
include=self.args.include_patterns,
)
self.args.cov.start()
self.stream = StringIO()
def tearDown(self):
del self.stream
del self.args
def _outputFromTest(self, args):
class FakeCase(unittest.TestCase):
def runTest(self):
pass
gtr = GreenTestResult(args, GreenStream(self.stream))
gtr.startTestRun()
gtr.startTest(FakeCase())
gtr.stopTestRun()
output = self.stream.getvalue()
return output.split("\n")
def test_coverage(self):
self.args.run_coverage = True
output = self._outputFromTest(self.args)
self.assertIn("Stmts Miss Cover Missing", "\n".join(output))
def test_quiet_coverage(self):
self.args.run_coverage = True
self.args.quiet_coverage = True
output = self._outputFromTest(self.args)
self.assertNotIn("Stmts Miss Cover Missing", "\n".join(output))
| 32.583935
| 88
| 0.605739
|
06000915a7d48e4d8a23145392b45b4e8475f58a
| 1,207
|
py
|
Python
|
class9/collateral/napalm_get_config.py
|
ksannedhi/pyplus_course
|
fc3499f2dfef472dc49fe6caddf2e6e2be160f4b
|
[
"Apache-2.0"
] | 39
|
2019-03-03T18:16:55.000Z
|
2022-02-17T17:05:18.000Z
|
class9/collateral/napalm_get_config.py
|
ksannedhi/pyplus_course
|
fc3499f2dfef472dc49fe6caddf2e6e2be160f4b
|
[
"Apache-2.0"
] | 1
|
2020-06-17T22:39:28.000Z
|
2020-06-17T22:39:28.000Z
|
class9/collateral/napalm_get_config.py
|
ksannedhi/pyplus_course
|
fc3499f2dfef472dc49fe6caddf2e6e2be160f4b
|
[
"Apache-2.0"
] | 77
|
2019-01-25T10:41:23.000Z
|
2022-03-14T21:35:59.000Z
|
#!/usr/bin/env python
from getpass import getpass
from napalm import get_network_driver
# Supress SSL Certificate Warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Device definitions
password = getpass()
cisco3 = dict(
hostname="cisco3.lasthop.io",
device_type="ios",
username="pyclass",
password=password,
optional_args={},
)
nxos1 = dict(
hostname="nxos1.lasthop.io",
device_type="nxos",
username="pyclass",
password=password,
optional_args={"port": 8443},
)
eos1 = dict(
hostname="arista1.lasthop.io",
device_type="eos",
username="pyclass",
password=password,
)
# Device we are testing
my_device = cisco3
# NAPALM Class Selection/Object Creation
device_type = my_device.pop("device_type")
driver = get_network_driver(device_type)
device = driver(**my_device)
# NAPALM Action
print()
print("\n\n>>>Test device open")
device.open()
print()
# output = device.get_facts()
output = device.get_config()["running"]
# output = device.get_lldp_neighbors()
print(output)
with open("cisco3.txt", "w") as f:
f.write(output)
print()
| 21.553571
| 71
| 0.728252
|
34d94c4c9e119b9717235619d30db4078ac13b18
| 708
|
py
|
Python
|
djflow/apps/security/signals.py
|
jasmanysanchez/djflow
|
44dea3221a9228dd8685184ab0ecc2108671a5e6
|
[
"MIT"
] | 20
|
2017-09-27T01:57:54.000Z
|
2021-07-22T19:09:20.000Z
|
djflow/apps/security/signals.py
|
yunica/djflow
|
244b301ed585fe46ba152e9d08f9b23622dcc697
|
[
"MIT"
] | null | null | null |
djflow/apps/security/signals.py
|
yunica/djflow
|
244b301ed585fe46ba152e9d08f9b23622dcc697
|
[
"MIT"
] | 12
|
2017-09-29T14:12:28.000Z
|
2020-06-16T19:46:32.000Z
|
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.contrib.auth.models import User
from .models import UserProfile
@receiver(post_save, sender=User)
def new_user(sender, **kwargs):
"""
Creamos un UserProfile cuando se crea un nuevo User.
"""
if kwargs.get('created', False):
userprofile = UserProfile()
userprofile.user = kwargs.get("instance")
userprofile.save()
@receiver(post_delete, sender=UserProfile)
def delete_userprofile(sender, **kwargs):
"""
Eliminamos el usuario vinculado a un userprofile.
"""
userprofile = kwargs.get("instance")
user = userprofile.user
user.delete()
| 27.230769
| 59
| 0.706215
|
273059e86ab4c3d4c9c26880cbd0f33d2ff13567
| 4,858
|
py
|
Python
|
zip_0143.py
|
paracetamolo/zcash-test-vectors
|
0db0553a93282768faf95fc995d38fab74ceb9f1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
zip_0143.py
|
paracetamolo/zcash-test-vectors
|
0db0553a93282768faf95fc995d38fab74ceb9f1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
zip_0143.py
|
paracetamolo/zcash-test-vectors
|
0db0553a93282768faf95fc995d38fab74ceb9f1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from pyblake2 import blake2b
import struct
from transaction import (
MAX_MONEY,
OVERWINTER_TX_VERSION,
Script,
Transaction,
)
from tv_output import render_args, render_tv, Some
from tv_rand import Rand
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
NOT_AN_INPUT = -1 # For portability of the test vectors; replaced with None for Rust
def getHashPrevouts(tx):
digest = blake2b(digest_size=32, person=b'ZcashPrevoutHash')
for x in tx.vin:
digest.update(bytes(x.prevout))
return digest.digest()
def getHashSequence(tx):
digest = blake2b(digest_size=32, person=b'ZcashSequencHash')
for x in tx.vin:
digest.update(struct.pack('<I', x.nSequence))
return digest.digest()
def getHashOutputs(tx):
digest = blake2b(digest_size=32, person=b'ZcashOutputsHash')
for x in tx.vout:
digest.update(bytes(x))
return digest.digest()
def getHashJoinSplits(tx):
digest = blake2b(digest_size=32, person=b'ZcashJSplitsHash')
for jsdesc in tx.vJoinSplit:
digest.update(bytes(jsdesc))
digest.update(tx.joinSplitPubKey)
return digest.digest()
def signature_hash(scriptCode, tx, nIn, nHashType, amount, consensusBranchId):
hashPrevouts = b'\x00'*32
hashSequence = b'\x00'*32
hashOutputs = b'\x00'*32
hashJoinSplits = b'\x00'*32
if not (nHashType & SIGHASH_ANYONECANPAY):
hashPrevouts = getHashPrevouts(tx)
if (not (nHashType & SIGHASH_ANYONECANPAY)) and \
(nHashType & 0x1f) != SIGHASH_SINGLE and \
(nHashType & 0x1f) != SIGHASH_NONE:
hashSequence = getHashSequence(tx)
if (nHashType & 0x1f) != SIGHASH_SINGLE and \
(nHashType & 0x1f) != SIGHASH_NONE:
hashOutputs = getHashOutputs(tx)
elif (nHashType & 0x1f) == SIGHASH_SINGLE and \
0 <= nIn and nIn < len(tx.vout):
digest = blake2b(digest_size=32, person=b'ZcashOutputsHash')
digest.update(bytes(tx.vout[nIn]))
hashOutputs = digest.digest()
if len(tx.vJoinSplit) > 0:
hashJoinSplits = getHashJoinSplits(tx)
digest = blake2b(
digest_size=32,
person=b'ZcashSigHash' + struct.pack('<I', consensusBranchId),
)
digest.update(struct.pack('<I', tx.header()))
digest.update(struct.pack('<I', tx.nVersionGroupId))
digest.update(hashPrevouts)
digest.update(hashSequence)
digest.update(hashOutputs)
digest.update(hashJoinSplits)
digest.update(struct.pack('<I', tx.nLockTime))
digest.update(struct.pack('<I', tx.nExpiryHeight))
digest.update(struct.pack('<I', nHashType))
if nIn != NOT_AN_INPUT:
digest.update(bytes(tx.vin[nIn].prevout))
digest.update(bytes(scriptCode))
digest.update(struct.pack('<Q', amount))
digest.update(struct.pack('<I', tx.vin[nIn].nSequence))
return digest.digest()
def main():
args = render_args()
from random import Random
rng = Random(0xabad533d)
def randbytes(l):
ret = []
while len(ret) < l:
ret.append(rng.randrange(0, 256))
return bytes(ret)
rand = Rand(randbytes)
consensusBranchId = 0x5ba81b19 # Overwinter
test_vectors = []
for i in range(10):
tx = Transaction(rand, OVERWINTER_TX_VERSION)
scriptCode = Script(rand)
nIn = rand.u8() % (len(tx.vin) + 1)
if nIn == len(tx.vin):
nIn = NOT_AN_INPUT
nHashType = SIGHASH_ALL if nIn == NOT_AN_INPUT else rand.a([
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ALL | SIGHASH_ANYONECANPAY,
SIGHASH_NONE | SIGHASH_ANYONECANPAY,
SIGHASH_SINGLE | SIGHASH_ANYONECANPAY,
])
amount = rand.u64() % (MAX_MONEY + 1)
sighash = signature_hash(
scriptCode,
tx,
nIn,
nHashType,
amount,
consensusBranchId,
)
test_vectors.append({
'tx': bytes(tx),
'script_code': scriptCode.raw(),
'transparent_input': nIn,
'hash_type': nHashType,
'amount': amount,
'consensus_branch_id': consensusBranchId,
'sighash': sighash,
})
render_tv(
args,
'zip_0143',
(
('tx', {'rust_type': 'Vec<u8>', 'bitcoin_flavoured': False}),
('script_code', 'Vec<u8>'),
('transparent_input', {
'rust_type': 'Option<u32>',
'rust_fmt': lambda x: None if x == -1 else Some(x),
}),
('hash_type', 'u32'),
('amount', 'i64'),
('consensus_branch_id', 'u32'),
('sighash', '[u8; 32]'),
),
test_vectors,
)
if __name__ == '__main__':
main()
| 28.745562
| 84
| 0.602923
|
4d5d18dbd9172f9d9a5a61e4828c47d14d475ba8
| 14,425
|
py
|
Python
|
astroquery/utils/commons.py
|
olyoberdorf/astroquery
|
7de90842fae95524c67173de4bae3f7d7a705aec
|
[
"BSD-3-Clause"
] | 577
|
2015-02-12T18:23:49.000Z
|
2022-03-22T21:38:58.000Z
|
astroquery/utils/commons.py
|
olyoberdorf/astroquery
|
7de90842fae95524c67173de4bae3f7d7a705aec
|
[
"BSD-3-Clause"
] | 1,812
|
2015-01-01T08:02:20.000Z
|
2022-03-31T13:03:52.000Z
|
astroquery/utils/commons.py
|
olyoberdorf/astroquery
|
7de90842fae95524c67173de4bae3f7d7a705aec
|
[
"BSD-3-Clause"
] | 322
|
2015-02-23T19:31:29.000Z
|
2022-03-25T18:51:30.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Common functions and classes that are required by all query classes.
"""
import re
import warnings
import os
import shutil
import socket
from io import BytesIO, StringIO
from urllib.error import URLError
import requests
import astropy.units as u
from astropy import coordinates as coord
from collections import OrderedDict
from astropy.utils import deprecated, minversion
import astropy.utils.data as aud
from astropy.io import fits, votable
from astropy.coordinates import BaseCoordinateFrame
from ..exceptions import TimeoutError, InputWarning
from .. import version
def ICRSCoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='icrs', **kwargs)
def GalacticCoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='galactic', **kwargs)
def FK5CoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='fk5', **kwargs)
def FK4CoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='fk4', **kwargs)
ICRSCoord = coord.SkyCoord
CoordClasses = (coord.SkyCoord, BaseCoordinateFrame)
__all__ = ['send_request',
'parse_coordinates',
'TableList',
'suppress_vo_warnings',
'validate_email',
'ASTROPY_LT_4_1',
'ASTROPY_LT_4_3']
ASTROPY_LT_4_1 = not minversion('astropy', '4.1')
ASTROPY_LT_4_3 = not minversion('astropy', '4.3')
@deprecated('0.4.4', alternative='astroquery.query.BaseQuery._request')
def send_request(url, data, timeout, request_type='POST', headers={},
**kwargs):
"""
A utility function that post HTTP requests to remote server
and returns the HTTP response.
Parameters
----------
url : str
The URL of the remote server
data : dict
A dictionary representing the payload to be posted via the HTTP request
timeout : int, quantity_like
Time limit for establishing successful connection with remote server
request_type : str
options are 'POST' (default) and 'GET'. Determines whether to perform
an HTTP POST or an HTTP GET request
headers : dict
POST or GET headers. user-agent will be set to
astropy:astroquery.version
Returns
-------
response : `requests.Response`
Response object returned by the remote server
"""
headers['User-Agent'] = ('astropy:astroquery.{vers}'
.format(vers=version.version))
if hasattr(timeout, "unit"):
warnings.warn("Converting timeout to seconds and truncating "
"to integer.", InputWarning)
timeout = int(timeout.to(u.s).value)
try:
if request_type == 'GET':
response = requests.get(url, params=data, timeout=timeout,
headers=headers, **kwargs)
elif request_type == 'POST':
response = requests.post(url, data=data, timeout=timeout,
headers=headers, **kwargs)
else:
raise ValueError("request_type must be either 'GET' or 'POST'.")
response.raise_for_status()
return response
except requests.exceptions.Timeout:
raise TimeoutError("Query timed out, time elapsed {time}s".
format(time=timeout))
except requests.exceptions.RequestException as ex:
raise Exception("Query failed: {0}\n".format(ex))
def radius_to_unit(radius, unit='degree'):
"""
Helper function: Parse a radius, then return its value in degrees
Parameters
----------
radius : str or `~astropy.units.Quantity`
The radius of a region
Returns
-------
Floating point scalar value of radius in degrees
"""
rad = coord.Angle(radius)
if isinstance(unit, str):
if hasattr(rad, unit):
return getattr(rad, unit)
elif hasattr(rad, f"{unit}s"):
return getattr(rad, f"{unit}s")
return rad.to(unit).value
def parse_coordinates(coordinates):
"""
Takes a string or astropy.coordinates object. Checks if the
string is parsable as an `astropy.coordinates`
object or is a name that is resolvable. Otherwise asserts
that the argument is an astropy.coordinates object.
Parameters
----------
coordinates : str or `astropy.coordinates` object
Astronomical coordinate
Returns
-------
coordinates : a subclass of `astropy.coordinates.BaseCoordinateFrame`
Raises
------
astropy.units.UnitsError
TypeError
"""
if isinstance(coordinates, str):
try:
c = ICRSCoordGenerator(coordinates)
warnings.warn("Coordinate string is being interpreted as an "
"ICRS coordinate.", InputWarning)
except u.UnitsError:
warnings.warn("Only ICRS coordinates can be entered as "
"strings.\n For other systems please use the "
"appropriate astropy.coordinates object.", InputWarning)
raise u.UnitsError
except ValueError as err:
if isinstance(err.args[1], u.UnitsError):
try:
c = ICRSCoordGenerator(coordinates, unit='deg')
warnings.warn("Coordinate string is being interpreted as an "
"ICRS coordinate provided in degrees.", InputWarning)
except ValueError:
c = ICRSCoord.from_name(coordinates)
else:
c = ICRSCoord.from_name(coordinates)
elif isinstance(coordinates, CoordClasses):
if hasattr(coordinates, 'frame'):
c = coordinates
else:
# Convert the "frame" object into a SkyCoord
c = coord.SkyCoord(coordinates)
else:
raise TypeError("Argument cannot be parsed as a coordinate")
return c
def coord_to_radec(coordinate):
"""
Wrapper to turn any astropy coordinate into FK5 RA in Hours and FK5 Dec in
degrees
This is a hack / temporary wrapper to deal with the unstable astropy API
(it may be wise to remove this hack since it's not clear that the old
coordinate API can even do transforms)
"""
C = coordinate.transform_to('fk5')
if hasattr(C.ra, 'hour'):
ra = C.ra.hour
elif hasattr(C.ra, 'hourangle'):
ra = C.ra.hourangle
else:
raise ValueError("API Error: RA cannot be converted to hour "
"or hourangle.")
dec = C.dec.degree
return ra, dec
class TableList(list):
"""
A class that inherits from `list` but included some pretty printing methods
for an OrderedDict of `astropy.table.Table` objects.
HINT: To access the tables by # instead of by table ID:
>>> t = TableList([('a',1),('b',2)])
>>> t[1]
2
>>> t['b']
2
"""
def __init__(self, inp):
if not isinstance(inp, OrderedDict):
try:
inp = OrderedDict(inp)
except (TypeError, ValueError):
raise ValueError("Input to TableList must be an OrderedDict "
"or list of (k,v) pairs")
self._dict = inp
super(TableList, self).__init__(inp.values())
def __getitem__(self, key):
if isinstance(key, int):
# get the value in the (key,value) pair
return super(TableList, self).__getitem__(key)
elif key in self._dict:
return self._dict[key]
else:
raise TypeError("TableLists can only be indexed with the "
"named keys and integers.")
def __setitem__(self, value):
raise TypeError("TableList is immutable.")
def __getslice__(self, slice):
return list(self.values())[slice]
def keys(self):
return list(self._dict.keys())
def values(self):
return list(self._dict.values())
def __repr__(self):
"""
Overrides the `OrderedDict.__repr__` method to return a simple summary
of the `TableList` object.
"""
return self.format_table_list()
def format_table_list(self):
"""
Prints the names of all `astropy.table.Table` objects, with their
respective number of row and columns, contained in the
`TableList` instance.
"""
ntables = len(list(self.keys()))
if ntables == 0:
return "Empty TableList"
header_str = "TableList with {keylen} tables:".format(keylen=ntables)
body_str = "\n".join(["\t'{t_number}:{t_name}' with {ncol} column(s) "
"and {nrow} row(s) "
.format(t_number=t_number, t_name=t_name,
nrow=len(self[t_number]),
ncol=len(self[t_number].colnames))
for t_number, t_name in enumerate(self.keys())])
return "\n".join([header_str, body_str])
def print_table_list(self):
print(self.format_table_list())
def pprint(self, **kwargs):
""" Helper function to make API more similar to astropy.Tables """
if kwargs != {}:
warnings.warn("TableList is a container of astropy.Tables.", InputWarning)
self.print_table_list()
def _is_coordinate(coordinates):
"""
Returns `True` if coordinates can be parsed via `astropy.coordinates`
and `False` otherwise.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as strings as specified in the
`astropy.coordinates` module.
Returns
-------
bool
"""
if hasattr(coordinates, 'fk5'):
# its coordinate-like enough
return True
try:
ICRSCoordGenerator(coordinates)
return True
except ValueError:
return False
def suppress_vo_warnings():
"""
Suppresses all warnings of the class
`astropy.io.votable.exceptions.VOWarning`.
"""
warnings.filterwarnings("ignore", category=votable.exceptions.VOWarning)
def validate_email(email):
"""
E-mail address validation. Uses validate_email if available, else a simple
regex that will let through some invalid e-mails but will catch the most
common violators.
"""
try:
import validate_email
return validate_email.validate_email(email)
except ImportError:
return bool(re.compile(r'^\S+@\S+\.\S+$').match(email))
class FileContainer:
"""
A File Object container, meant to offer lazy access to downloaded FITS
files.
"""
def __init__(self, target, **kwargs):
kwargs.setdefault('cache', True)
self._target = target
self._timeout = kwargs.get('remote_timeout', aud.conf.remote_timeout)
if (os.path.splitext(target)[1] == '.fits' and not
('encoding' in kwargs and kwargs['encoding'] == 'binary')):
warnings.warn("FITS files must be read as binaries; error is "
"likely.", InputWarning)
self._readable_object = get_readable_fileobj(target, **kwargs)
def get_fits(self):
"""
Assuming the contained file is a FITS file, read it
and return the file parsed as FITS HDUList
"""
filedata = self.get_string()
if len(filedata) == 0:
raise TypeError("The file retrieved was empty.")
self._fits = fits.HDUList.fromstring(filedata)
return self._fits
def save_fits(self, savepath, link_cache='hard'):
"""
Save a FITS file to savepath
Parameters
----------
savepath : str
The full path to a FITS filename, e.g. "file.fits", or
"/path/to/file.fits".
link_cache : 'hard', 'sym', or False
Try to create a hard or symbolic link to the astropy cached file?
If the system is unable to create a hardlink, the file will be
copied to the target location.
"""
self.get_fits()
target_key = str(self._target)
target = aud.download_file(target_key, cache=True, sources=[])
if link_cache == 'hard':
try:
os.link(target, savepath)
except (IOError, OSError, AttributeError):
shutil.copy(target, savepath)
elif link_cache == 'sym':
try:
os.symlink(target, savepath)
except AttributeError:
raise OSError('Creating symlinks is not possible on this OS.')
else:
shutil.copy(target, savepath)
def get_string(self):
"""
Download the file as a string
"""
if not hasattr(self, '_string'):
try:
with self._readable_object as f:
data = f.read()
self._string = data
except URLError as e:
if isinstance(e.reason, socket.timeout):
raise TimeoutError("Query timed out, time elapsed {t}s".
format(t=self._timeout))
else:
raise e
return self._string
def get_stringio(self):
"""
Return the file as an io.StringIO object
"""
s = self.get_string()
try:
return BytesIO(s)
except TypeError:
return StringIO(s)
def __repr__(self):
if hasattr(self, '_fits'):
return f"Downloaded FITS file: {self._fits!r}"
else:
return f"Downloaded object from URL {self._target} with ID {id(self._readable_object)}"
def get_readable_fileobj(*args, **kwargs):
"""
Overload astropy's get_readable_fileobj so that we can safely monkeypatch
it in astroquery without affecting astropy core functionality
"""
return aud.get_readable_fileobj(*args, **kwargs)
def parse_votable(content):
"""
Parse a votable in string format
"""
tables = votable.parse(BytesIO(content), verify='warn')
return tables
| 31.222944
| 99
| 0.6
|
77cf64443030ec0211626c452e237525b6a1bde6
| 2,598
|
py
|
Python
|
PyFlowPackages/PyFlowFreeCAD/FunctionLibraries/Rotation.py
|
madhusenthilvel/NodeEditor
|
e5612d917a24924a7961d196aafa85ca9b650dcf
|
[
"MIT"
] | 53
|
2019-07-17T17:42:13.000Z
|
2022-02-07T20:19:48.000Z
|
PyFlowPackages/PyFlowFreeCAD/FunctionLibraries/Rotation.py
|
madhusenthilvel/NodeEditor
|
e5612d917a24924a7961d196aafa85ca9b650dcf
|
[
"MIT"
] | 13
|
2019-07-10T11:15:34.000Z
|
2020-12-31T05:03:48.000Z
|
PyFlowPackages/PyFlowFreeCAD/FunctionLibraries/Rotation.py
|
madhusenthilvel/NodeEditor
|
e5612d917a24924a7961d196aafa85ca9b650dcf
|
[
"MIT"
] | 12
|
2019-07-10T11:03:39.000Z
|
2022-02-15T11:58:14.000Z
|
import FreeCAD
#from FreeCAD import Rotation as MRotation
from FreeCAD import Vector
from PyFlow.Core import(
FunctionLibraryBase,
IMPLEMENT_NODE
)
from PyFlow.Core.Common import *
from nodeeditor.say import *
class Rotation(FunctionLibraryBase):
'''doc string for Rotation'''
def __init__(self,packageName):
super(Rotation, self).__init__(packageName)
@staticmethod
@IMPLEMENT_NODE(returns=('RotationPin', (0,0,0)), nodeType=NodeTypes.Pure, meta={'Category': 'Rotation', 'Keywords': ['Rotation', '+']})
def rotCreateEuler(Yaw=('FloatPin', 0), Pitch=('FloatPin', 0), Roll=('FloatPin', 0)):
'''create Rotation from Euler angles'''
say("rot create euler ",Yaw,Pitch,Roll)
return [Yaw,Pitch,Roll]
@staticmethod
@IMPLEMENT_NODE(returns=('RotationPin', [0,0,0]), nodeType=NodeTypes.Pure, meta={'Category': 'Rotation', 'Keywords': ['Rotation', '+']})
def rotCreate(Axis=('VectorPin', Vector(0,0,1)), Angle=('FloatPin', 10)):
'''create Rotation from axis and angle'''
a=FreeCAD.Rotation(Axis,Angle)
say("rot create",a.toEuler())
return list(a.toEuler())
@staticmethod
@IMPLEMENT_NODE(returns=('RotationPin', [0,0,0]), nodeType=NodeTypes.Pure, meta={'Category': 'Rotation', 'Keywords': ['Rotation', '+']})
def rotCreateBy2Vectors(From=('VectorPin', Vector(1,0,0)), To=('VectorPin', Vector(0,1,0))):
'''create Rotation between vectors From and To'''
return FreeCAD.Rotation(From,To).toEuler()
@staticmethod
@IMPLEMENT_NODE(returns=('RotationPin', [0,0,0]), nodeType=NodeTypes.Pure, meta={'Category': 'Rotation', 'Keywords': ['Rotation', '+']})
def rotCreateBy3Vectors(X=('VectorPin', Vector(1,0,0)), Y=('VectorPin', Vector(0,1,0)),Z=('VectorPin', Vector(0,0,1)),mode=('StringPin','ZXY')):
''' three vectors that define rotated axes directions + an optional 3-characher string of capital letters 'X', 'Y', 'Z' that sets the order of importance of the axes (e.g., 'ZXY' means z direction is followed strictly, x is used but corrected if necessary, y is ignored) '''
return list(FreeCAD.Rotation(X,Y,Z,mode).toEuler())
@staticmethod
@IMPLEMENT_NODE(returns=('RotationPin', [0,0,0]), nodeType=NodeTypes.Pure, meta={'Category': 'Rotation', 'Keywords': ['Rotation', '+']})
def rotMultiply(a=('RotationPin', [1,0,0]), b=('RotationPin', [0,1,0])):
'''concatenate Rotation a and b'''
say("rot mult a",a)
say("rot mult b",b)
return list(FreeCAD.Rotation(*a).multiply(FreeCAD.Rotation(*b)).toEuler())
| 46.392857
| 282
| 0.655504
|
59c094462097e13ea6b755aeb81cfa0d863c0fa0
| 629
|
py
|
Python
|
test/cli_test/test_general.py
|
nprint/nPrintML
|
69e56036fd7ab6b050cbe81b31309c06f166f0f2
|
[
"Apache-2.0"
] | 13
|
2020-11-04T14:57:12.000Z
|
2021-11-18T08:50:00.000Z
|
test/cli_test/test_general.py
|
nprint/nPrintML
|
69e56036fd7ab6b050cbe81b31309c06f166f0f2
|
[
"Apache-2.0"
] | 60
|
2020-10-22T16:08:14.000Z
|
2021-12-14T23:00:36.000Z
|
test/cli_test/test_general.py
|
nprint/nprintml
|
69e56036fd7ab6b050cbe81b31309c06f166f0f2
|
[
"Apache-2.0"
] | 1
|
2021-12-16T01:10:18.000Z
|
2021-12-16T01:10:18.000Z
|
import nprintml
from .base import CLITestCase
class TestGeneral(CLITestCase):
def test_error(self):
result = self.try_execute('--yelp', raise_exc=False, stderr=True)
self.assertEqual(result.code, 2)
self.assertGreater(len(result.stderr), 0)
def test_help(self):
result = self.try_execute('--help', stdout=True)
self.assertGreater(len(result.stdout), 0)
def test_version(self):
result = self.try_execute('--version', stdout=True)
self.assertIn(f'nprintML {nprintml.__version__} | nPrint {nprintml.__nprint_version__}',
result.stdout)
| 29.952381
| 96
| 0.661367
|
cb5a8f04989eedffd10baf2023aac0b818a8db31
| 704
|
py
|
Python
|
mhkit_python_utils/pandas_dataframe.py
|
kmruehl/MHKiT-MATLAB
|
6d640144d3f48e5ddea46947e15256fc85828b01
|
[
"BSD-3-Clause"
] | 11
|
2020-03-30T07:49:23.000Z
|
2022-03-30T16:27:32.000Z
|
mhkit_python_utils/pandas_dataframe.py
|
kmruehl/MHKiT-MATLAB
|
6d640144d3f48e5ddea46947e15256fc85828b01
|
[
"BSD-3-Clause"
] | 45
|
2020-05-04T22:25:23.000Z
|
2022-03-31T20:52:36.000Z
|
mhkit_python_utils/pandas_dataframe.py
|
kmruehl/MHKiT-MATLAB
|
6d640144d3f48e5ddea46947e15256fc85828b01
|
[
"BSD-3-Clause"
] | 16
|
2020-04-06T22:21:30.000Z
|
2022-02-04T13:48:22.000Z
|
import pandas as pd
def timeseries_to_pandas(ts,ind,x):
if x>1:
ts=list(map(list,zip(*ts)))
df=pd.DataFrame(data=ts,index=ind)
else:
df=pd.DataFrame(data=ts,index=ind)
return df.astype('float64')
def spectra_to_pandas(frequency,spectra,x,cols=None):
if x>1:
ts=list(map(list,zip(*spectra)))
df=pd.DataFrame(data=ts,index=frequency)
else:
df=pd.DataFrame(data=spectra,index=frequency)
df.indexname='(Hz)'
c_name=['PM']
if cols is not None:
df.columns = cols
return df.astype('float64')
def lis(li,app):
li.append(app)
return li
| 22
| 53
| 0.558239
|
64fb2890d944fcdf377a771da30d9cc4a977ae03
| 471
|
py
|
Python
|
nsd1802/python/day06/mygui2.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1802/python/day06/mygui2.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1802/python/day06/mygui2.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
import tkinter
from functools import partial
def hello():
lb.config(text="Hello China!")
def welcome():
lb.config(text="Hello tedu!")
root = tkinter.Tk()
lb = tkinter.Label(text="Hello world!", font="Times 26")
MyBtn = partial(tkinter.Button, root, fg='white', bg='blue')
b1 = MyBtn(text='Button 1', command=hello)
b2 = MyBtn(text='Button 2', command=welcome)
b3 = MyBtn(text='quit', command=root.quit)
lb.pack()
b1.pack()
b2.pack()
b3.pack()
root.mainloop()
| 21.409091
| 60
| 0.683652
|
067aa47c6f1e82c03e466d7df5f9240a877deffd
| 6,205
|
py
|
Python
|
functions/misc_functions_folder/gcf_misc_functions.py
|
Liodeus/GCP_Security_Check
|
370fe0b951bae1a382d03021c854600d638cad68
|
[
"MIT"
] | null | null | null |
functions/misc_functions_folder/gcf_misc_functions.py
|
Liodeus/GCP_Security_Check
|
370fe0b951bae1a382d03021c854600d638cad68
|
[
"MIT"
] | null | null | null |
functions/misc_functions_folder/gcf_misc_functions.py
|
Liodeus/GCP_Security_Check
|
370fe0b951bae1a382d03021c854600d638cad68
|
[
"MIT"
] | null | null | null |
from functions.misc_functions_folder.misc_functions import *
def get_secret_search():
"""
Return all the regex to search
"""
return {
'AWS Access Key ID Value': '(A3T[A-Z0-9]|AKIA|AGPA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}',
'AWS Access Key ID': '((\"|''|`)?((?i)aws)?_?((?i)access)_?((?i)key)?_?((?i)id)?(\"|''|`)?\\s{0,50}(:|=>|=)\\s{0,50}(\"|''|`)?(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}(\"|''|`)?)',
'AWS Account ID': '((\"|''|`)?((?i)aws)?_?((?i)account)_?((?i)id)?(\"|''|`)?\\s{0,50}(:|=>|=)\\s{0,50}(\"|''|`)?[0-9]{4}-?[0-9]{4}-?[0-9]{4}(\"|''|`)?)',
'AWS Secret Access Key': '((\"|''|`)?((?i)aws)?_?((?i)secret)_?((?i)access)?_?((?i)key)?_?((?i)id)?(\"|''|`)?\\s{0,50}(:|=>|=)\\s{0,50}(\"|''|`)?[A-Za-z0-9/+=]{40}(\"|''|`)?)',
'AWS Session Token': '((\"|''|`)?((?i)aws)?_?((?i)session)?_?((?i)token)?(\"|''|`)?\\s{0,50}(:|=>|=)\\s{0,50}(\"|''|`)?[A-Za-z0-9/+=]{16,}(\"|''|`)?)',
'Artifactory': '(?i)artifactory.{0,50}(\"|''|`)?[a-zA-Z0-9=]{112}(\"|''|`)?',
'CodeClimate': '(?i)codeclima.{0,50}(\"|''|`)?[0-9a-f]{64}(\"|''|`)?',
'Facebook access token': 'EAACEdEose0cBA[0-9A-Za-z]+',
'Google (GCM) Service account': '(("|''|`)?type(\"|''|`)?\\s{0,50}(:|=>|=)\\s{0,50}(\"|''|`)?service_account(\"|''|`)?,?)',
'Stripe API key': '(?:r|s)k_[live|test]_[0-9a-zA-Z]{24}',
'Google OAuth Key': '[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com',
'Google Cloud API Key': 'AIza[0-9A-Za-z\\-_]{35}',
'Google OAuth Access Token': 'ya29\\.[0-9A-Za-z\\-_]+',
'Picatic API key': 'sk_[live|test]_[0-9a-z]{32}',
'Square Access Token': 'sq0atp-[0-9A-Za-z\-_]{22}',
'Square OAuth Secret': 'sq0csp-[0-9A-Za-z\-_]{43}',
'PayPal/Braintree Access Token': 'access_token\$production\$[0-9a-z]{16}\$[0-9a-f]{32}',
'Amazon MWS Auth Token': 'amzn\.mws\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}',
'Twilo API Key': 'SK[0-9a-fA-F]{32}',
'SendGrid API Key': 'SG\.[0-9A-Za-z\-_]{22}\.[0-9A-Za-z\-_]{43}',
'MailGun API Key': 'key-[0-9a-zA-Z]{32}',
'MailChimp API Key': '[0-9a-f]{32}-us[0-9]{12}',
'SSH Password': 'sshpass -p.*[''|\"]',
'Outlook team': '(https://outlook.office.com/webhook/[0-9a-f-]{36}@)',
'Sauce Token': '(?i)sauce.{0,50}(\"|''|`)?[0-9a-f-]{36}(\"|''|`)?',
'Slack Token': '(xox[pboa]-[0-9]{12}-[0-9]{12}-[A-Za-z0-9]{24})',
'Slack Token': '(xox[pboa]-[0-9]{12}-[0-9]{12}-[0-9]{12}-[a-z0-9]{32})',
'Slack Webhook': 'https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}',
'SonarQube Docs API Key': '(?i)sonar.{0,50}(\"|''|`)?[0-9a-f]{40}(\"|''|`)?',
'HockeyApp': '(?i)hockey.{0,50}(\"|''|`)?[0-9a-f]{32}(\"|''|`)?',
'Username and password in URI': '([\w+]{1,24})(://)([^$<]{1})([^\s";]{1,}):([^$<]{1})([^\s";/]{1,})@[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,24}([^\s]+)',
'NuGet API Key': 'oy2[a-z0-9]{43}',
'StackHawk API Key': 'hawk\.[0-9A-Za-z\-_]{20}\.[0-9A-Za-z\-_]{20}',
'Contains a private key': '-----BEGIN (EC |RSA |DSA |OPENSSH |PGP |)PRIVATE KEY',
'WP-Config': 'define(.{0,20})?(DB_CHARSET|NONCE_SALT|LOGGED_IN_SALT|AUTH_SALT|NONCE_KEY|DB_HOST|DB_PASSWORD|AUTH_KEY|SECURE_AUTH_KEY|LOGGED_IN_KEY|DB_NAME|DB_USER)(.{0,20})?[''|"].{10,120}[''|"]',
'AWS cred file info': '(?i)(aws_access_key_id|aws_secret_access_key)(.{0,20})?=.[0-9a-zA-Z\/+]{20,40}',
'Facebook Client ID': '(?i)(facebook|fb)(.{0,20})?[''\"][0-9]{13,17}[''\"]',
'Twitter Secret Key': '(?i)twitter(.{0,20})?[''\"][0-9a-z]{35,44}[''\"]',
'Twitter Client ID': '(?i)twitter(.{0,20})?[''\"][0-9a-z]{18,25}[''\"]',
'Heroku API key': '(?i)heroku(.{0,20})?[''"][0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}[''"]',
'LinkedIn Secret Key': '(?i)linkedin(.{0,20})?[''\"][0-9a-z]{16}[''\"]'
}
def get_languages():
"""
Return all the language accepted
"""
return {
"do": ["dotnet3"],
"go": ["go113"],
"ja": ["java11"],
"no": ["nodejs10", "nodejs12", "nodejs14"],
"py": ["python37", "python38", "python39"],
"ru": ["ruby26", "ruby27"]
}
def gcf_reduce(cmd_list, function_name, lock, project, mitigation_name, severity):
"""
Google Cloud Function function lines of code reducer
"""
datas = exec_cmd(cmd_list[0])
if "API [cloudfunctions.googleapis.com] not enabled" in datas:
pretty_print_error(lock, f"GCF {function_name}", "API [cloudfunctions.googleapis.com] not enabled", True, project, mitigation_name, severity)
if "The following regions were fully or partially" in datas:
pretty_print_error(lock, f"GCF {function_name}", "Missing permissions", True, project, mitigation_name, severity)
datas = datas.split('\n')[1:-1]
res = {}
for data in datas:
tmp = data.split()
name = tmp[0]
if function_name == "gcf_location":
location = tmp[4]
if "europe" not in location:
res[name] = location
else:
region = tmp[4]
res[name] = region
return res
def gfc_reduce_two(result, cmd_list, function_name):
"""
Google Cloud Function function lines of code reducer two
"""
res = {}
for name, region in result.items():
cmd = f"{cmd_list[1]}{region} {name}"
yaml_result = yaml.load(exec_cmd(cmd), Loader=yaml.FullLoader)
if function_name == "gcf_runtime":
yaml_result_runtime_variable = yaml_result["runtime"]
languages = get_languages()
short = yaml_result_runtime_variable[:2]
if yaml_result_runtime_variable in languages[short]:
continue
else:
res[name] = yaml_result_runtime_variable
elif function_name == "gcf_env_secret":
try:
yaml_result_env_variable = yaml_result["environmentVariables"]
except KeyError:
continue
secret_search_list = get_secret_search()
for var, value in yaml_result_env_variable.items():
for secret_name, regex in secret_search_list.items():
regex_result = re.search(regex, value)
if regex_result:
val = {secret_name: regex_result.group()}
try:
res[name].append(val)
except KeyError:
res[name] = []
res[name].append(val)
elif function_name == "gcf_service_account":
yaml_result_name = yaml_result["name"].split('/')[1]
email = yaml_result["serviceAccountEmail"]
if re.match(f"{yaml_result_name}@appspot.gserviceaccount.com", email):
res[name] = email
return res
| 42.793103
| 206
| 0.575181
|
9001c6012503b11400320d12d03a86e5873efcae
| 178
|
py
|
Python
|
posts/service/get_posts_list.py
|
gergerov/easy_django
|
98eea5d5c2be36c5b3ac6497d803d18d4a811ded
|
[
"MIT"
] | null | null | null |
posts/service/get_posts_list.py
|
gergerov/easy_django
|
98eea5d5c2be36c5b3ac6497d803d18d4a811ded
|
[
"MIT"
] | null | null | null |
posts/service/get_posts_list.py
|
gergerov/easy_django
|
98eea5d5c2be36c5b3ac6497d803d18d4a811ded
|
[
"MIT"
] | null | null | null |
from ..models import Posts
def get_post_list(quantity: int):
posts = Posts.objects.filter(
status="ACTIVE"
).order_by('-post_date')[:quantity]
return posts
| 19.777778
| 39
| 0.668539
|
808f173859824e2cbcc1d40600e04148ff4d09a5
| 6,485
|
py
|
Python
|
PaddleCV/Research/astar2019/score.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | 1,319
|
2020-02-14T10:42:07.000Z
|
2022-03-31T15:42:18.000Z
|
PaddleCV/Research/astar2019/score.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | 192
|
2020-02-14T02:53:34.000Z
|
2022-03-31T02:25:48.000Z
|
PaddleCV/Research/astar2019/score.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | 720
|
2020-02-14T02:12:38.000Z
|
2022-03-31T12:21:15.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# os.environ["FLAGS_fraction_of_gpu_memory_to_use"] = "0.3"
import sys
sys.path.insert(0, ".")
import argparse
import functools
import paddle.fluid as fluid
import reader
from utils import *
import json
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import tempfile
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('data_dir', str, '', "The data root path.")
add_arg('test_list', str, '', "The testing data lists.")
add_arg('model_dir', str, '', "The model path.")
add_arg('nms_threshold', float, 0.45, "NMS threshold.")
add_arg('ap_version', str, 'cocoMAP', "cocoMAP.")
add_arg('mean_value_B', float, 127.5, "Mean value for B channel which will be subtracted.") #123.68
add_arg('mean_value_G', float, 127.5, "Mean value for G channel which will be subtracted.") #116.78
add_arg('mean_value_R', float, 127.5, "Mean value for R channel which will be subtracted.") #103.94
def use_coco_api_compute_mAP(data_args, test_list, num_classes, test_reader, exe, infer_program,
feeded_var_names, feeder, target_var, batch_size):
cocoGt = COCO(os.path.join(data_args.data_dir, test_list))
json_category_id_to_contiguous_id = {
v: i + 1
for i, v in enumerate(cocoGt.getCatIds())
}
contiguous_category_id_to_json_id = {
v: k
for k, v in json_category_id_to_contiguous_id.items()
}
dts_res = []
executor = fluid.Executor(fluid.CUDAPlace(0))
test_program = fluid.Program()
with fluid.program_guard(test_program):
boxes = fluid.layers.data(
name='boxes', shape=[-1, -1, 4], dtype='float32')
scores = fluid.layers.data(
name='scores', shape=[-1, num_classes, -1], dtype='float32')
pred_result = fluid.layers.multiclass_nms(
bboxes=boxes,
scores=scores,
score_threshold=0.01,
nms_top_k=-1,
nms_threshold=0.45,
keep_top_k=-1,
normalized=False)
executor.run(fluid.default_startup_program())
for batch_id, data in enumerate(test_reader()):
boxes_np, scores_np = exe.run(program=infer_program,
feed={feeded_var_names[0]: feeder.feed(data)['image']},
fetch_list=target_var)
nms_out = executor.run(
program=test_program,
feed={
'boxes': boxes_np,
'scores': scores_np
},
fetch_list=[pred_result], return_numpy=False)
if batch_id % 20 == 0:
print("Batch {0}".format(batch_id))
dts_res += get_batch_dt_res(nms_out, data, contiguous_category_id_to_json_id, batch_size)
_, tmp_file = tempfile.mkstemp()
with open(tmp_file, 'w') as outfile:
json.dump(dts_res, outfile)
print("start evaluate using coco api")
cocoDt = cocoGt.loadRes(tmp_file)
cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
mAP = cocoEval.stats[0]
return mAP
def compute_score(model_dir, data_dir, test_list='annotations/instances_val2017.json', batch_size=32, height=300, width=300, num_classes=81,
mean_value=[127.5, 127.5, 127.5]):
"""
compute score, mAP, flops of a model
Args:
model_dir (string): directory of model
data_dir (string): directory of coco dataset, like '/your/path/to/coco', '/work/datasets/coco'
Returns:
tuple: score, mAP, flops.
"""
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=model_dir, executor=exe)
image_shape = [3, height, width]
data_args = reader.Settings(
dataset='coco2017',
data_dir=data_dir,
resize_h=height,
resize_w=width,
mean_value=mean_value,
apply_distort=False,
apply_expand=False,
ap_version='cocoMAP')
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
gt_box = fluid.layers.data(
name='gt_box', shape=[4], dtype='float32', lod_level=1)
gt_label = fluid.layers.data(
name='gt_label', shape=[1], dtype='int32', lod_level=1)
gt_iscrowd = fluid.layers.data(
name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)
gt_image_info = fluid.layers.data(
name='gt_image_id', shape=[3], dtype='int32')
test_reader = reader.test(data_args, test_list, batch_size)
feeder = fluid.DataFeeder(
place=place,
feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
mAP = use_coco_api_compute_mAP(data_args, test_list, num_classes, test_reader, exe, infer_program,
feeded_var_names, feeder, target_var, batch_size)
total_flops_params, is_quantize = summary(infer_program)
MAdds = np.sum(total_flops_params['flops']) / 2000000.0
if is_quantize:
MAdds /= 2.0
print('mAP:', mAP)
print('MAdds:', MAdds)
if MAdds < 160.0:
MAdds = 160.0
if MAdds > 1300.0:
score = 0.0
else:
score = mAP * 100 - (5.1249 * np.log(MAdds) - 14.499)
print('score:', score)
return score, mAP, MAdds
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
score, mAP, flops = compute_score(args.model_dir, args.data_dir, batch_size=args.batch_size)
| 36.22905
| 140
| 0.640555
|
ad2042ea19cf829c65e5003282a49b611a6fdbb4
| 4,233
|
py
|
Python
|
test/test_signal.py
|
danbar/YaFFT
|
ba913f33a1e542bc0bba9664e76b0a0d65e76497
|
[
"MIT"
] | null | null | null |
test/test_signal.py
|
danbar/YaFFT
|
ba913f33a1e542bc0bba9664e76b0a0d65e76497
|
[
"MIT"
] | null | null | null |
test/test_signal.py
|
danbar/YaFFT
|
ba913f33a1e542bc0bba9664e76b0a0d65e76497
|
[
"MIT"
] | null | null | null |
import sys
import os
import unittest
import numpy as np
import numpy.testing as npt
from oct2py import octave
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'build', 'debug'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'build', 'debug', 'swig'))
import yafft
def _create_sine(num_samples):
fs = 1000 # Hz, sampling frequency
n = np.arange(0., num_samples)
T = 1./fs # s, sampling period
t = n*T # s, time vector
x = np.sin(2.*np.pi*50.*t)
return x.astype(np.complex64)
class Test16SamplesSine(unittest.TestCase):
def setUp(self):
octave.restart()
octave.addpath('./octave')
octave.addpath('./test/octave')
octave.eval('pkg load signal') # load signal package
# Test data
N = 16
self.in1 = _create_sine(N)
self.out1 = np.squeeze(octave.fft(self.in1, N))
def tearDown(self):
octave.exit()
def test_octave_sine(self):
res = octave.my_fft(self.in1)
res = np.squeeze(res)
npt.assert_almost_equal(res, self.out1, decimal=6)
def test_dit_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_TIME)
npt.assert_almost_equal(data, self.out1, decimal=6)
def test_dif_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_FREQUENCY)
npt.assert_almost_equal(data, self.out1, decimal=6)
class Test32SamplesSine(unittest.TestCase):
def setUp(self):
octave.restart()
octave.addpath('./octave')
octave.addpath('./test/octave')
octave.eval('pkg load signal') # load signal package
# Test data
N = 32
self.in1 = _create_sine(N)
self.out1 = np.squeeze(octave.fft(self.in1, N))
def tearDown(self):
octave.exit()
def test_octave_sine(self):
res = octave.my_fft(self.in1)
res = np.squeeze(res)
npt.assert_almost_equal(res, self.out1, decimal=6)
def test_dit_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_TIME)
npt.assert_almost_equal(data, self.out1, decimal=6)
def test_dif_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_FREQUENCY)
npt.assert_almost_equal(data, self.out1, decimal=6)
class Test64SamplesSine(unittest.TestCase):
def setUp(self):
octave.restart()
octave.addpath('./octave')
octave.addpath('./test/octave')
octave.eval('pkg load signal') # load signal package
# Test data
N = 64
self.in1 = _create_sine(N)
self.out1 = np.squeeze(octave.fft(self.in1, N))
def tearDown(self):
octave.exit()
def test_octave_sine(self):
res = octave.my_fft(self.in1)
res = np.squeeze(res)
npt.assert_almost_equal(res, self.out1, decimal=5)
def test_dit_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_TIME)
npt.assert_almost_equal(data, self.out1, decimal=5)
def test_dif_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_FREQUENCY)
npt.assert_almost_equal(data, self.out1, decimal=5)
class Test128SamplesSine(unittest.TestCase):
def setUp(self):
octave.restart()
octave.addpath('./octave')
octave.addpath('./test/octave')
octave.eval('pkg load signal') # load signal package
# Test data
N = 128
self.in1 = _create_sine(N)
self.out1 = np.squeeze(octave.fft(self.in1, N))
def tearDown(self):
octave.exit()
def test_octave_sine(self):
res = octave.my_fft(self.in1)
res = np.squeeze(res)
npt.assert_almost_equal(res, self.out1, decimal=5)
def test_dit_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_TIME)
npt.assert_almost_equal(data, self.out1, decimal=5)
def test_dif_sine(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_FREQUENCY)
npt.assert_almost_equal(data, self.out1, decimal=5)
if __name__ == '__main__':
unittest.main()
| 26.961783
| 88
| 0.631467
|
0da946fbbaf95f72bd8f2937c715f068d60f9a31
| 956
|
py
|
Python
|
genes.py
|
austinmdillow/natural-selection-simulator
|
01c7d3ba310a3629a04f1a7e67c04a1b87ee4f09
|
[
"MIT"
] | null | null | null |
genes.py
|
austinmdillow/natural-selection-simulator
|
01c7d3ba310a3629a04f1a7e67c04a1b87ee4f09
|
[
"MIT"
] | 1
|
2020-06-12T20:52:07.000Z
|
2020-06-12T20:52:07.000Z
|
genes.py
|
austinmdillow/natural-selection-simulator
|
01c7d3ba310a3629a04f1a7e67c04a1b87ee4f09
|
[
"MIT"
] | null | null | null |
import random
class Genes():
mutation = False
Animal = 0
Plant = 1
def __init__(self, entity_type):
self.size = 5 # default value
self.typeSpecificTraits(entity_type)
def typeSpecificTraits(self, entity_type):
if entity_type == Genes.Animal:
self.sense = 50
self.speed = 10
self.strength = 3
self.defense = 1
elif entity_type == Genes.Plant:
pass
def printDebug(self):
print("Sense = " + str(self.sense) + ", Speed = " + str(self.speed))
@staticmethod
def combineGenes(g1, g2):
g3 = Genes(Genes.Animal)
g3.sense = random.choice([g1.sense, g2.sense]) * random.uniform(.9,1.1)
g3.speed = random.choice([g1.speed, g2.speed]) * random.uniform(.9,1.1)
g3.strength = random.choice([g1.strength, g2.strength]) #* random.uniform(.9,1.1)
g3.defense = random.choice([g1.defense, g2.defense]) #* random.uniform(.9,1.1)
g1.printDebug()
g3.printDebug()
return g3
| 26.555556
| 85
| 0.640167
|
bbb8e4865cb2b7b764cb0dfa66d4a296497a7c5e
| 527
|
py
|
Python
|
maml/apps/bowsr/model/base.py
|
anooptp/maml
|
fdd95f3d60c9281d871d89b25b073e87b6ba4e52
|
[
"BSD-3-Clause"
] | 161
|
2020-01-26T08:24:41.000Z
|
2022-03-29T06:42:42.000Z
|
maml/apps/bowsr/model/base.py
|
anooptp/maml
|
fdd95f3d60c9281d871d89b25b073e87b6ba4e52
|
[
"BSD-3-Clause"
] | 195
|
2020-01-25T19:35:20.000Z
|
2022-03-28T13:14:30.000Z
|
maml/apps/bowsr/model/base.py
|
anooptp/maml
|
fdd95f3d60c9281d871d89b25b073e87b6ba4e52
|
[
"BSD-3-Clause"
] | 46
|
2020-03-30T12:56:39.000Z
|
2022-03-27T12:53:23.000Z
|
"""
Base class that expose a predict_energy method
"""
from pymatgen.core.structure import Structure
class EnergyModel:
"""
Base energy model class. For any model used in BOWSR, it has to have
a predict_energy method that returns a float
"""
def predict_energy(self, structure: Structure) -> float:
"""
Predict the energy of a structure
Args:
structure: Pymatgen Structure object.
Returns: (float) energy value.
"""
raise NotImplementedError
| 22.913043
| 72
| 0.648956
|
4e88d7083c43cfb02bfe88bdc07ad70620b0b451
| 10,879
|
py
|
Python
|
tests/components/ecobee/test_climate.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 6
|
2020-07-18T16:33:25.000Z
|
2021-09-26T09:52:04.000Z
|
tests/components/ecobee/test_climate.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 52
|
2020-07-14T14:12:26.000Z
|
2022-03-31T06:24:02.000Z
|
tests/components/ecobee/test_climate.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 5
|
2020-03-29T00:29:13.000Z
|
2021-09-06T20:58:40.000Z
|
"""The test for the Ecobee thermostat module."""
import unittest
from unittest import mock
from homeassistant.components.ecobee import climate as ecobee
import homeassistant.const as const
from homeassistant.const import STATE_OFF
class TestEcobee(unittest.TestCase):
"""Tests for Ecobee climate."""
def setUp(self):
"""Set up test variables."""
vals = {
"name": "Ecobee",
"program": {
"climates": [
{"name": "Climate1", "climateRef": "c1"},
{"name": "Climate2", "climateRef": "c2"},
],
"currentClimateRef": "c1",
},
"runtime": {
"actualTemperature": 300,
"actualHumidity": 15,
"desiredHeat": 400,
"desiredCool": 200,
"desiredFanMode": "on",
},
"settings": {
"hvacMode": "auto",
"heatStages": 1,
"coolStages": 1,
"fanMinOnTime": 10,
"heatCoolMinDelta": 50,
"holdAction": "nextTransition",
},
"equipmentStatus": "fan",
"events": [
{
"name": "Event1",
"running": True,
"type": "hold",
"holdClimateRef": "away",
"endDate": "2017-01-01 10:00:00",
"startDate": "2017-02-02 11:00:00",
}
],
}
self.ecobee = mock.Mock()
self.ecobee.__getitem__ = mock.Mock(side_effect=vals.__getitem__)
self.ecobee.__setitem__ = mock.Mock(side_effect=vals.__setitem__)
self.data = mock.Mock()
self.data.ecobee.get_thermostat.return_value = self.ecobee
self.thermostat = ecobee.Thermostat(self.data, 1)
def test_name(self):
"""Test name property."""
assert "Ecobee" == self.thermostat.name
def test_current_temperature(self):
"""Test current temperature."""
assert 30 == self.thermostat.current_temperature
self.ecobee["runtime"]["actualTemperature"] = const.HTTP_NOT_FOUND
assert 40.4 == self.thermostat.current_temperature
def test_target_temperature_low(self):
"""Test target low temperature."""
assert 40 == self.thermostat.target_temperature_low
self.ecobee["runtime"]["desiredHeat"] = 502
assert 50.2 == self.thermostat.target_temperature_low
def test_target_temperature_high(self):
"""Test target high temperature."""
assert 20 == self.thermostat.target_temperature_high
self.ecobee["runtime"]["desiredCool"] = 103
assert 10.3 == self.thermostat.target_temperature_high
def test_target_temperature(self):
"""Test target temperature."""
assert self.thermostat.target_temperature is None
self.ecobee["settings"]["hvacMode"] = "heat"
assert 40 == self.thermostat.target_temperature
self.ecobee["settings"]["hvacMode"] = "cool"
assert 20 == self.thermostat.target_temperature
self.ecobee["settings"]["hvacMode"] = "auxHeatOnly"
assert 40 == self.thermostat.target_temperature
self.ecobee["settings"]["hvacMode"] = "off"
assert self.thermostat.target_temperature is None
def test_desired_fan_mode(self):
"""Test desired fan mode property."""
assert "on" == self.thermostat.fan_mode
self.ecobee["runtime"]["desiredFanMode"] = "auto"
assert "auto" == self.thermostat.fan_mode
def test_fan(self):
"""Test fan property."""
assert const.STATE_ON == self.thermostat.fan
self.ecobee["equipmentStatus"] = ""
assert STATE_OFF == self.thermostat.fan
self.ecobee["equipmentStatus"] = "heatPump, heatPump2"
assert STATE_OFF == self.thermostat.fan
def test_hvac_mode(self):
"""Test current operation property."""
assert self.thermostat.hvac_mode == "heat_cool"
self.ecobee["settings"]["hvacMode"] = "heat"
assert self.thermostat.hvac_mode == "heat"
self.ecobee["settings"]["hvacMode"] = "cool"
assert self.thermostat.hvac_mode == "cool"
self.ecobee["settings"]["hvacMode"] = "auxHeatOnly"
assert self.thermostat.hvac_mode == "heat"
self.ecobee["settings"]["hvacMode"] = "off"
assert self.thermostat.hvac_mode == "off"
def test_hvac_modes(self):
"""Test operation list property."""
assert ["heat_cool", "heat", "cool", "off"] == self.thermostat.hvac_modes
def test_hvac_mode2(self):
"""Test operation mode property."""
assert self.thermostat.hvac_mode == "heat_cool"
self.ecobee["settings"]["hvacMode"] = "heat"
assert self.thermostat.hvac_mode == "heat"
def test_device_state_attributes(self):
"""Test device state attributes property."""
self.ecobee["equipmentStatus"] = "heatPump2"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "heatPump2",
} == self.thermostat.device_state_attributes
self.ecobee["equipmentStatus"] = "auxHeat2"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "auxHeat2",
} == self.thermostat.device_state_attributes
self.ecobee["equipmentStatus"] = "compCool1"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "compCool1",
} == self.thermostat.device_state_attributes
self.ecobee["equipmentStatus"] = ""
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "",
} == self.thermostat.device_state_attributes
self.ecobee["equipmentStatus"] = "Unknown"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "Unknown",
} == self.thermostat.device_state_attributes
self.ecobee["program"]["currentClimateRef"] = "c2"
assert {
"fan": "off",
"climate_mode": "Climate2",
"fan_min_on_time": 10,
"equipment_running": "Unknown",
} == self.thermostat.device_state_attributes
def test_is_aux_heat_on(self):
"""Test aux heat property."""
assert not self.thermostat.is_aux_heat
self.ecobee["equipmentStatus"] = "fan, auxHeat"
assert self.thermostat.is_aux_heat
def test_set_temperature(self):
"""Test set temperature."""
# Auto -> Auto
self.data.reset_mock()
self.thermostat.set_temperature(target_temp_low=20, target_temp_high=30)
self.data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 30, 20, "nextTransition")]
)
# Auto -> Hold
self.data.reset_mock()
self.thermostat.set_temperature(temperature=20)
self.data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 25, 15, "nextTransition")]
)
# Cool -> Hold
self.data.reset_mock()
self.ecobee["settings"]["hvacMode"] = "cool"
self.thermostat.set_temperature(temperature=20.5)
self.data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 20.5, 20.5, "nextTransition")]
)
# Heat -> Hold
self.data.reset_mock()
self.ecobee["settings"]["hvacMode"] = "heat"
self.thermostat.set_temperature(temperature=20)
self.data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 20, 20, "nextTransition")]
)
# Heat -> Auto
self.data.reset_mock()
self.ecobee["settings"]["hvacMode"] = "heat"
self.thermostat.set_temperature(target_temp_low=20, target_temp_high=30)
assert not self.data.ecobee.set_hold_temp.called
def test_set_hvac_mode(self):
"""Test operation mode setter."""
self.data.reset_mock()
self.thermostat.set_hvac_mode("heat_cool")
self.data.ecobee.set_hvac_mode.assert_has_calls([mock.call(1, "auto")])
self.data.reset_mock()
self.thermostat.set_hvac_mode("heat")
self.data.ecobee.set_hvac_mode.assert_has_calls([mock.call(1, "heat")])
def test_set_fan_min_on_time(self):
"""Test fan min on time setter."""
self.data.reset_mock()
self.thermostat.set_fan_min_on_time(15)
self.data.ecobee.set_fan_min_on_time.assert_has_calls([mock.call(1, 15)])
self.data.reset_mock()
self.thermostat.set_fan_min_on_time(20)
self.data.ecobee.set_fan_min_on_time.assert_has_calls([mock.call(1, 20)])
def test_resume_program(self):
"""Test resume program."""
# False
self.data.reset_mock()
self.thermostat.resume_program(False)
self.data.ecobee.resume_program.assert_has_calls([mock.call(1, "false")])
self.data.reset_mock()
self.thermostat.resume_program(None)
self.data.ecobee.resume_program.assert_has_calls([mock.call(1, "false")])
self.data.reset_mock()
self.thermostat.resume_program(0)
self.data.ecobee.resume_program.assert_has_calls([mock.call(1, "false")])
# True
self.data.reset_mock()
self.thermostat.resume_program(True)
self.data.ecobee.resume_program.assert_has_calls([mock.call(1, "true")])
self.data.reset_mock()
self.thermostat.resume_program(1)
self.data.ecobee.resume_program.assert_has_calls([mock.call(1, "true")])
def test_hold_preference(self):
"""Test hold preference."""
assert "nextTransition" == self.thermostat.hold_preference()
for action in [
"useEndTime4hour",
"useEndTime2hour",
"nextPeriod",
"indefinite",
"askMe",
]:
self.ecobee["settings"]["holdAction"] = action
assert "nextTransition" == self.thermostat.hold_preference()
def test_set_fan_mode_on(self):
"""Test set fan mode to on."""
self.data.reset_mock()
self.thermostat.set_fan_mode("on")
self.data.ecobee.set_fan_mode.assert_has_calls(
[mock.call(1, "on", 20, 40, "nextTransition")]
)
def test_set_fan_mode_auto(self):
"""Test set fan mode to auto."""
self.data.reset_mock()
self.thermostat.set_fan_mode("auto")
self.data.ecobee.set_fan_mode.assert_has_calls(
[mock.call(1, "auto", 20, 40, "nextTransition")]
)
| 37.643599
| 81
| 0.592702
|
c276c90544772ac5e62b8ba97b8e8b4a5ffac85b
| 459
|
py
|
Python
|
run_files/ppo_reg_adapt/run_ppo_reg_adapt_ipsu.py
|
YunjaeChoi/Gain-Risk_Framework_for_Stabilization_of_Deep_Policy_Gradient_Optimization
|
68d3f8fca6c6e6b356261f568f0d8562242fa649
|
[
"MIT"
] | null | null | null |
run_files/ppo_reg_adapt/run_ppo_reg_adapt_ipsu.py
|
YunjaeChoi/Gain-Risk_Framework_for_Stabilization_of_Deep_Policy_Gradient_Optimization
|
68d3f8fca6c6e6b356261f568f0d8562242fa649
|
[
"MIT"
] | null | null | null |
run_files/ppo_reg_adapt/run_ppo_reg_adapt_ipsu.py
|
YunjaeChoi/Gain-Risk_Framework_for_Stabilization_of_Deep_Policy_Gradient_Optimization
|
68d3f8fca6c6e6b356261f568f0d8562242fa649
|
[
"MIT"
] | null | null | null |
from ppo_reg_adapt import ppo_reg_adapt
from env_functions import *
from utils.run_utils import ExperimentGrid
if __name__ == '__main__':
exp_name = 'ipsu-ppo_reg_adapt'
eg = ExperimentGrid(name=exp_name)
eg.add('seed', [10*i for i in range(5)])
#pendulum
eg.add('env_fn', inverted_pendulum_swingup_env_fn, '', False)
eg.add('epochs', 200)
eg.add('target_gr_ratio', 2.0)
eg.add('save_freq', 100)
eg.run(ppo_reg_adapt)
| 24.157895
| 65
| 0.694989
|
166b88a9fe6a51b81b2918d13ffc4464793d62e4
| 17,188
|
py
|
Python
|
lib-python/2.7/test/list_tests.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 333
|
2015-08-08T18:03:38.000Z
|
2022-03-22T18:13:12.000Z
|
lib-python/2.7/test/list_tests.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 7
|
2020-02-16T16:49:05.000Z
|
2021-11-26T09:00:56.000Z
|
lib-python/2.7/test/list_tests.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
"""
Tests common to list and UserList.UserList
"""
import sys
import os
from test import test_support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(repr(a2), repr(l2))
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
def test_repr_deep(self):
if test_support.check_impl_detail():
depth = sys.getrecursionlimit() + 100
else:
depth = 1000 * 1000 # should be enough to exhaust the stack
a = self.type2test([])
for i in xrange(depth):
a = self.type2test([a])
self.assertRaises(RuntimeError, repr, a)
def test_print(self):
d = self.type2test(xrange(200))
d.append(d)
d.extend(xrange(200,400))
d.append(d)
d.append(400)
try:
with open(test_support.TESTFN, "wb") as fo:
print >> fo, d,
with open(test_support.TESTFN, "rb") as fo:
self.assertEqual(fo.read(), repr(d))
finally:
os.remove(test_support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, r.next)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
# Bug 3689: make sure list-reversed-iterator doesn't have __len__
self.assertRaises(TypeError, len, reversed([1,2,3]))
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0L] = 1
a[1L] = 2
a[2L] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2L] = 88
a[-1L] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setslice__, 0, 1, 5)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5))
self.assertRaises(TypeError, a.__setslice__)
self.assertRaises(TypeError, a.__setitem__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:2L]
del a[0L:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2L:-1L]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:]
del a[:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1L:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertIs(x, y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxint, 4*sys.maxint), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxint,-4*sys.maxint)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in xrange(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_sort(self):
with test_support.check_py3k_warnings(
("the cmp argument is not supported", DeprecationWarning)):
self._test_sort()
def _test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
return cmp(b, a)
u.sort(revcmp)
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = self.type2test(range(12))
z.sort(myComparison)
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
return cmp(x, y)
self.assertRaises(ValueError, z.sort, selfmodifyingComparison)
self.assertRaises(TypeError, z.sort, lambda x, y: 's')
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super(CommonTest, self).test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assertIs(u, u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
def f_iadd(u, x):
u += x
return u
self.assertRaises(TypeError, f_iadd, u, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
# test issue7788
a = self.type2test(range(10))
del a[9::1<<333]
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
def test_exhausted_iterator(self):
a = self.type2test([1, 2, 3])
exhit = iter(a)
empit = iter(a)
for x in exhit: # exhaust the iterator
next(empit) # not exhausted
a.append(9)
self.assertEqual(list(exhit), [])
self.assertEqual(list(empit), [9])
self.assertEqual(a, self.type2test([1, 2, 3, 9]))
| 30.969369
| 81
| 0.521119
|
60868e8912f936783032e42516a8077d467ca132
| 1,848
|
py
|
Python
|
modelbased-rl/BMPO/env/walker2dNT.py
|
TJU-DRL-LAB/ai-optimizer
|
f558cc524c66460913989519779873b371bf78bc
|
[
"MIT"
] | 19
|
2020-09-02T05:58:09.000Z
|
2021-08-23T11:03:00.000Z
|
modelbased-rl/BMPO/env/walker2dNT.py
|
TJU-DRL-LAB/ai-optimizer
|
f558cc524c66460913989519779873b371bf78bc
|
[
"MIT"
] | 4
|
2020-07-19T13:57:56.000Z
|
2021-11-10T19:42:58.000Z
|
env/walker2dNT.py
|
apexrl/bmpo
|
d065cf195b942c3e534d4c789a0982e0ec09957c
|
[
"MIT"
] | 3
|
2020-09-28T11:21:24.000Z
|
2020-12-28T23:27:25.000Z
|
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class Walker2dNTEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "walker2d.xml", 4)
utils.EzPickle.__init__(self)
def step(self, a):
'''
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = ((posafter - posbefore) / self.dt)
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
done = not (height > 0.8 and height < 2.0 and
ang > -1.0 and ang < 1.0)
ob = self._get_obs()
return ob, reward, done, {}
'''
vel = self.sim.data.qvel[0]
height = self.sim.data.qpos[1]
self.do_simulation(a, self.frame_skip)
alive_bonus = 1.0
target_height = 1.3
reward = vel
reward += alive_bonus
reward -= 0.1 * np.square(a).sum()
reward -= 3 * (height - target_height) ** 2
done = False
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] = 1.15
self.viewer.cam.elevation = -20
| 33
| 94
| 0.579545
|
16f73a0ef01f2a3d56601915dbd064462d7e0bea
| 276
|
py
|
Python
|
hw1/choose.py
|
BobAnkh/MaC
|
f43c75576ea5af35e4c67f593627cbe1d479648e
|
[
"MIT"
] | 2
|
2021-02-17T08:18:33.000Z
|
2021-02-21T10:48:47.000Z
|
hw1/choose.py
|
BobAnkh/MaC
|
f43c75576ea5af35e4c67f593627cbe1d479648e
|
[
"MIT"
] | null | null | null |
hw1/choose.py
|
BobAnkh/MaC
|
f43c75576ea5af35e4c67f593627cbe1d479648e
|
[
"MIT"
] | null | null | null |
import numpy as np
# choose 1: B
a = np.arange(60.).reshape(3, 4, 5)
print(np.arange(60.).reshape(3, 4, 5))
b = np.sum(a, axis=0, keepdims=True)
print(b)
print(b.shape)
c = np.sum(a, axis=0)
print(c)
print(c.shape)
# choose 2: A
# choose 3: A
# choose 4: A
# choose 5: C
| 13.8
| 38
| 0.619565
|
871c3976d647277832e5d1365abfb8e0758edfa6
| 27,127
|
py
|
Python
|
retopoflow/rftool_polystrips/polystrips.py
|
senjacob/retopoflow
|
7817bb7d68f98e5ae2c7835f28eeafe76367789e
|
[
"OML"
] | null | null | null |
retopoflow/rftool_polystrips/polystrips.py
|
senjacob/retopoflow
|
7817bb7d68f98e5ae2c7835f28eeafe76367789e
|
[
"OML"
] | null | null | null |
retopoflow/rftool_polystrips/polystrips.py
|
senjacob/retopoflow
|
7817bb7d68f98e5ae2c7835f28eeafe76367789e
|
[
"OML"
] | null | null | null |
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson, and Patrick Moore
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import bgl
import bpy
import math
import time
import random
from mathutils import Matrix, Vector
from mathutils.geometry import intersect_point_tri_2d, intersect_point_tri_2d
from ..rftool import RFTool
from ...addon_common.common.decorators import timed_call
class RFTool_PolyStrips(RFTool):
name = 'PolyStrips'
description = 'Create and edit strips of quads'
icon = 'polystrips-icon.png'
help = 'polystrips.md'
shortcut = 'polystrips tool'
statusbar = '{{insert}} Insert strip of quads\t{{brush radius}} Brush size\t{{action}} Grab selection\t{{increase count}} Increase segments\t{{decrease count}} Decrease segments'
ui_config = 'polystrips_options.html'
################################################################################################
# following imports must happen *after* the above class, because each subclass depends on
# above class to be defined
from .polystrips_ops import PolyStrips_Ops
from .polystrips_props import PolyStrips_Props
from .polystrips_rfwidgets import PolyStrips_RFWidgets
from .polystrips_utils import (
RFTool_PolyStrips_Strip,
hash_face_pair,
crawl_strip,
is_boundaryvert, is_boundaryedge,
process_stroke_filter, process_stroke_source,
process_stroke_get_next, process_stroke_get_marks,
mark_info,
)
from ...addon_common.common.bezier import CubicBezierSpline, CubicBezier
from ...addon_common.common.blender import matrix_vector_mult
from ...addon_common.common.debug import dprint
from ...addon_common.common.drawing import Drawing, Cursors
from ...addon_common.common.maths import Vec2D, Point, rotate2D, Direction2D, Point2D, RelPoint2D
from ...addon_common.common.profiler import profiler
from ...addon_common.common.utils import iter_pairs
from ...config.options import options
class PolyStrips(RFTool_PolyStrips, PolyStrips_Props, PolyStrips_Ops, PolyStrips_RFWidgets):
@RFTool_PolyStrips.on_init
def init(self):
self.init_rfwidgets()
@RFTool_PolyStrips.on_reset
def reset(self):
self.strips = []
self.strip_pts = []
self.hovering_strips = set()
self.hovering_handles = []
self.hovering_sel_face = None
self.sel_cbpts = []
self.stroke_cbs = CubicBezierSpline()
self.clear_count_data()
@RFTool_PolyStrips.on_target_change
@profiler.function
def update_target(self, force=False):
if not force and self._fsm.state in {'move handle', 'rotate', 'scale'}: return
self.strips = []
self._var_cut_count.disabled = True
# get selected quads
bmquads = set(bmf for bmf in self.rfcontext.get_selected_faces() if len(bmf.verts) == 4)
if not bmquads: return
# find junctions at corners
junctions = set()
for bmf in bmquads:
# skip if in middle of a selection
if not any(is_boundaryvert(bmv, bmquads) for bmv in bmf.verts): continue
# skip if in middle of possible strip
edge0,edge1,edge2,edge3 = [is_boundaryedge(bme, bmquads) for bme in bmf.edges]
if (edge0 or edge2) and not (edge1 or edge3): continue
if (edge1 or edge3) and not (edge0 or edge2): continue
junctions.add(bmf)
# find junctions that might be in middle of strip but are ends to other strips
boundaries = set((bme,bmf) for bmf in bmquads for bme in bmf.edges if is_boundaryedge(bme, bmquads))
while boundaries:
bme,bmf = boundaries.pop()
for bme_ in bmf.neighbor_edges(bme):
strip = crawl_strip(bmf, bme_, bmquads, junctions)
if strip is None: continue
junctions.add(strip[-1])
# find strips between junctions
touched = set()
for bmf0 in junctions:
bme0,bme1,bme2,bme3 = bmf0.edges
edge0,edge1,edge2,edge3 = [is_boundaryedge(bme, bmquads) for bme in bmf0.edges]
def add_strip(bme):
strip = crawl_strip(bmf0, bme, bmquads, junctions)
if not strip:
return
bmf1 = strip[-1]
if len(strip) > 1 and hash_face_pair(bmf0, bmf1) not in touched:
touched.add(hash_face_pair(bmf0,bmf1))
touched.add(hash_face_pair(bmf1,bmf0))
self.strips.append(RFTool_PolyStrips_Strip(strip))
if not edge0: add_strip(bme0)
if not edge1: add_strip(bme1)
if not edge2: add_strip(bme2)
if not edge3: add_strip(bme3)
if options['polystrips max strips'] and len(self.strips) > options['polystrips max strips']:
self.strips = []
break
self.update_strip_viz()
if len(self.strips) == 1:
self._var_cut_count.set(len(self.strips[0]))
self._var_cut_count.disabled = False
if self.rfcontext.undo_last_action() != 'change segment count':
self.setup_change_count()
@profiler.function
def update_strip_viz(self):
self.strip_pts = [[strip.curve.eval(i/10) for i in range(10+1)] for strip in self.strips]
@RFTool_PolyStrips.on_target_change
@RFTool_PolyStrips.on_view_change
@RFTool_PolyStrips.FSM_OnlyInState('main')
def update_next_state(self):
self.vis_accel = self.rfcontext.get_vis_accel()
@RFTool_PolyStrips.FSM_State('main')
def main(self):
Point_to_Point2D = self.rfcontext.Point_to_Point2D
mouse = self.actions.mouse
if not self.actions.using('action', ignoredrag=True):
# only update while not pressing action, because action includes drag, and
# the artist might move mouse off selected edge before drag kicks in!
self.hovering_sel_face,_ = self.rfcontext.accel_nearest2D_face(max_dist=options['action dist'], selected_only=True)
self.hovering_handles.clear()
self.hovering_strips.clear()
for strip in self.strips:
for i,cbpt in enumerate(strip.curve):
v = Point_to_Point2D(cbpt)
if v is None: continue
if (mouse - v).length > self.drawing.scale(options['select dist']): continue
# do not filter out non-visible handles, because otherwise
# they might not be movable if they are inside the model
self.hovering_handles.append(cbpt)
self.hovering_strips.add(strip)
if self.actions.using_onlymods('insert'):
self.rfwidget = self.rfwidgets['brushstroke']
elif self.hovering_handles:
self.rfwidget = self.rfwidgets['move']
elif self.hovering_sel_face:
self.rfwidget = self.rfwidgets['move']
else:
self.rfwidget = self.rfwidgets['default']
for rfwidget in self.rfwidgets.values():
if self.rfwidget == rfwidget: continue
if rfwidget.inactive_passthrough():
self.rfwidget = rfwidget
return
# handle edits
if self.hovering_handles:
if self.actions.pressed('action'):
return 'move handle'
if self.actions.pressed('action alt0'):
return 'rotate'
if self.actions.pressed('action alt1'):
return 'scale'
if self.hovering_sel_face:
if self.actions.pressed('action', unpress=False):
return 'move all'
if self.actions.pressed('grab', unpress=False):
return 'move all'
if self.actions.pressed('increase count'):
self.change_count(delta=1)
return
if self.actions.pressed('decrease count'):
self.change_count(delta=-1)
return
if self.actions.pressed({'select path add'}):
return self.rfcontext.select_path(
{'face'},
kwargs_select={'supparts': False},
)
if self.actions.pressed({'select paint', 'select paint add'}, unpress=False):
sel_only = self.actions.pressed('select paint')
return self.rfcontext.setup_smart_selection_painting(
{'face'},
selecting=not sel_only,
deselect_all=sel_only,
# fn_filter_bmelem=self.filter_edge_selection,
kwargs_select={'supparts': False},
kwargs_deselect={'subparts': False},
)
if self.actions.pressed({'select single', 'select single add'}, unpress=False):
# TODO: DO NOT PAINT!
sel_only = self.actions.pressed('select single')
return self.rfcontext.setup_smart_selection_painting(
{'face'},
selecting=not sel_only,
deselect_all=sel_only,
# fn_filter_bmelem=self.filter_edge_selection,
kwargs_select={'supparts': False},
kwargs_deselect={'subparts': False},
)
@RFTool_PolyStrips.FSM_State('move handle', 'can enter')
def movehandle_canenter(self):
return len(self.hovering_handles) > 0
@RFTool_PolyStrips.FSM_State('move handle', 'enter')
def movehandle_enter(self):
self.sel_cbpts = []
self.mod_strips = set()
cbpts = list(self.hovering_handles)
self.mod_strips |= self.hovering_strips
for strip in self.strips:
p0,p1,p2,p3 = strip.curve.points()
if p0 in cbpts and p1 not in cbpts:
cbpts.append(p1)
self.mod_strips.add(strip)
if p3 in cbpts and p2 not in cbpts:
cbpts.append(p2)
self.mod_strips.add(strip)
for strip in self.mod_strips: strip.capture_edges()
inners = [ p for strip in self.strips for p in strip.curve.points()[1:3] ]
self.sel_cbpts = [(cbpt, cbpt in inners, Point(cbpt), self.rfcontext.Point_to_Point2D(cbpt)) for cbpt in cbpts]
self.mousedown = self.actions.mouse
self.rfwidget = self.rfwidgets['move']
self.move_done_pressed = 'confirm'
self.move_done_released = 'action'
self.move_cancelled = 'cancel'
self.rfcontext.undo_push('manipulate bezier')
self._timer = self.actions.start_timer(120.0)
self.rfcontext.set_accel_defer(True)
@RFTool_PolyStrips.FSM_State('move handle')
def movehandle(self):
if self.actions.pressed(self.move_done_pressed):
return 'main'
if self.actions.released(self.move_done_released):
return 'main'
if self.actions.pressed(self.move_cancelled):
self.rfcontext.undo_cancel()
return 'main'
if self.actions.mousemove or not self.actions.mousemove_prev: return
delta = Vec2D(self.actions.mouse - self.mousedown)
up,rt,fw = self.rfcontext.Vec_up(),self.rfcontext.Vec_right(),self.rfcontext.Vec_forward()
for cbpt,inner,oco,oco2D in self.sel_cbpts:
nco2D = oco2D + delta
if not inner:
xyz,_,_,_ = self.rfcontext.raycast_sources_Point2D(nco2D)
if xyz: cbpt.xyz = xyz
else:
ov = self.rfcontext.Point2D_to_Vec(oco2D)
nr = self.rfcontext.Point2D_to_Ray(nco2D)
od = self.rfcontext.Point_to_depth(oco)
cbpt.xyz = nr.eval(od / ov.dot(nr.d))
for strip in self.hovering_strips:
strip.update(self.rfcontext.nearest_sources_Point, self.rfcontext.raycast_sources_Point, self.rfcontext.update_face_normal)
self.update_strip_viz()
self.rfcontext.dirty()
@RFTool_PolyStrips.FSM_State('move handle', 'exit')
def movehandle_exit(self):
self._timer.done()
self.rfcontext.set_accel_defer(False)
self.update_target(force=True)
@RFTool_PolyStrips.FSM_State('rotate', 'can enter')
def rotate_canenter(self):
if not self.hovering_handles: return False
self.sel_cbpts = []
self.mod_strips = set()
Point_to_Point2D = self.rfcontext.Point_to_Point2D
# find hovered inner point, the corresponding outer point and its face
innerP,outerP,outerF = None,None,None
for strip in self.strips:
bmf0,bmf1 = strip.end_faces()
p0,p1,p2,p3 = strip.curve.points()
if p1 in self.hovering_handles: innerP,outerP,outerF = p1,p0,bmf0
if p2 in self.hovering_handles: innerP,outerP,outerF = p2,p3,bmf1
if not innerP or not outerP or not outerF: return False
# scan through all selected strips and collect all inner points next to outerP
for strip in self.strips:
bmf0,bmf3 = strip.end_faces()
if outerF != bmf0 and outerF != bmf3: continue
p0,p1,p2,p3 = strip.curve.points()
if outerF == bmf0: self.sel_cbpts.append( (p1, Point(p1), Point_to_Point2D(p1)) )
else: self.sel_cbpts.append( (p2, Point(p2), Point_to_Point2D(p2)) )
self.mod_strips.add(strip)
self.rotate_about = Point_to_Point2D(outerP)
if not self.rotate_about: return False
@RFTool_PolyStrips.FSM_State('rotate', 'enter')
def rotate_enter(self):
for strip in self.mod_strips: strip.capture_edges()
self.mousedown = self.actions.mouse
self.rfwidget = self.rfwidgets['move']
self.move_done_pressed = 'confirm'
self.move_done_released = 'action alt0'
self.move_cancelled = 'cancel'
self.rfcontext.undo_push('rotate')
self._timer = self.actions.start_timer(120.0)
self.rfcontext.set_accel_defer(True)
@RFTool_PolyStrips.FSM_State('rotate')
@profiler.function
def rotate(self):
if not self.rotate_about: return 'main'
if self.actions.pressed(self.move_done_pressed):
return 'main'
if self.actions.released(self.move_done_released):
return 'main'
if self.actions.pressed(self.move_cancelled):
self.rfcontext.undo_cancel()
return 'main'
if self.actions.mousemove or not self.actions.mousemove_prev: return
prev_diff = self.mousedown - self.rotate_about
prev_rot = math.atan2(prev_diff.x, prev_diff.y)
cur_diff = self.actions.mouse - self.rotate_about
cur_rot = math.atan2(cur_diff.x, cur_diff.y)
angle = prev_rot - cur_rot
for cbpt,oco,oco2D in self.sel_cbpts:
xy = rotate2D(oco2D, angle, origin=self.rotate_about)
xyz,_,_,_ = self.rfcontext.raycast_sources_Point2D(xy)
if xyz: cbpt.xyz = xyz
for strip in self.mod_strips:
strip.update(self.rfcontext.nearest_sources_Point, self.rfcontext.raycast_sources_Point, self.rfcontext.update_face_normal)
self.update_strip_viz()
self.rfcontext.dirty()
@RFTool_PolyStrips.FSM_State('rotate', 'exit')
def rotate_exit(self):
self._timer.done()
self.rfcontext.set_accel_defer(False)
self.update_target(force=True)
@RFTool_PolyStrips.FSM_State('scale', 'can enter')
@profiler.function
def scale_canenter(self):
if not self.hovering_handles: return False
self.mod_strips = set()
Point_to_Point2D = self.rfcontext.Point_to_Point2D
innerP,outerP,outerF = None,None,None
for strip in self.strips:
bmf0,bmf1 = strip.end_faces()
p0,p1,p2,p3 = strip.curve.points()
if p1 in self.hovering_handles: innerP,outerP,outerF = p1,p0,bmf0
if p2 in self.hovering_handles: innerP,outerP,outerF = p2,p3,bmf1
if not innerP or not outerP or not outerF: return False
self.scale_strips = []
for strip in self.strips:
bmf0,bmf1 = strip.end_faces()
if bmf0 == outerF:
self.scale_strips.append((strip, 1))
self.mod_strips.add(strip)
if bmf1 == outerF:
self.scale_strips.append((strip, 2))
self.mod_strips.add(strip)
for strip in self.mod_strips: strip.capture_edges()
if not self.scale_strips: return False
self.scale_from = Point_to_Point2D(outerP)
@RFTool_PolyStrips.FSM_State('scale', 'enter')
def scale_enter(self):
self.mousedown = self.actions.mouse
self.rfwidget = None #self.rfwidgets['default']
self.rfcontext.undo_push('scale')
self.move_done_pressed = None
self.move_done_released = 'action'
self.move_cancelled = 'cancel'
falloff = options['polystrips scale falloff']
self.scale_bmf = {}
self.scale_bmv = {}
for strip,iinner in self.scale_strips:
iend = 0 if iinner == 1 else 3
s0,s1 = (1,0) if iend == 0 else (0,1)
l = len(strip.bmf_strip)
for ibmf,bmf in enumerate(strip.bmf_strip):
if bmf in self.scale_bmf: continue
p = ibmf/(l-1)
s = (s0 + (s1-s0) * p) ** falloff
self.scale_bmf[bmf] = s
for bmf in self.scale_bmf.keys():
c = bmf.center()
s = self.scale_bmf[bmf]
for bmv in bmf.verts:
if bmv not in self.scale_bmv:
self.scale_bmv[bmv] = []
self.scale_bmv[bmv] += [(c, bmv.co-c, s)]
self._timer = self.actions.start_timer(120.0)
self.rfcontext.set_accel_defer(True)
@RFTool_PolyStrips.FSM_State('scale')
@profiler.function
def scale(self):
if self.actions.pressed(self.move_done_pressed):
return 'main'
if self.actions.released(self.move_done_released, ignoremods=True):
return 'main'
if self.actions.pressed(self.move_cancelled):
self.rfcontext.undo_cancel()
return 'main'
if self.actions.mousemove or not self.actions.mousemove_prev: return
vec0 = self.mousedown - self.scale_from
vec1 = self.actions.mouse - self.scale_from
scale = vec1.length / vec0.length
snap2D_vert = self.rfcontext.snap2D_vert
snap_vert = self.rfcontext.snap_vert
for bmv in self.scale_bmv.keys():
l = self.scale_bmv[bmv]
n = Vector()
for c,v,sc in l:
n += c + v * max(0, 1 + (scale-1) * sc)
bmv.co = n / len(l)
snap_vert(bmv)
self.rfcontext.dirty()
@RFTool_PolyStrips.FSM_State('scale', 'exit')
def scale_exit(self):
self._timer.done()
self.rfcontext.set_accel_defer(False)
self.update_target(force=True)
@RFTool_PolyStrips.FSM_State('move all', 'can enter')
@profiler.function
def moveall_canenter(self):
bmfaces = self.rfcontext.get_selected_faces()
if not bmfaces: return False
bmverts = set(bmv for bmf in bmfaces for bmv in bmf.verts)
self.bmverts = [(bmv, self.rfcontext.Point_to_Point2D(bmv.co)) for bmv in bmverts]
@RFTool_PolyStrips.FSM_State('move all', 'enter')
def moveall_enter(self):
lmb_drag = self.actions.using('action')
self.actions.unpress()
self.rfwidget = None # self.rfwidgets['default']
self.rfcontext.undo_push('move grabbed')
self.moveall_opts = {
'mousedown': self.actions.mouse,
'move_done_pressed': None if lmb_drag else 'confirm',
'move_done_released': 'action' if lmb_drag else None,
'move_cancelled': 'cancel',
'timer': self.actions.start_timer(120.0),
}
self.rfcontext.split_target_visualization_selected()
self.rfcontext.set_accel_defer(True)
@RFTool_PolyStrips.FSM_State('move all')
@profiler.function
def moveall(self):
opts = self.moveall_opts
if self.actions.pressed(opts['move_done_pressed']):
return 'main'
if self.actions.released(opts['move_done_released']):
return 'main'
if self.actions.pressed(opts['move_cancelled']):
self.rfcontext.undo_cancel()
return 'main'
if self.actions.mousemove or not self.actions.mousemove_prev: return
delta = Vec2D(self.actions.mouse - opts['mousedown'])
set2D_vert = self.rfcontext.set2D_vert
for bmv,xy in self.bmverts:
if not bmv.is_valid: continue
set2D_vert(bmv, xy + delta)
self.rfcontext.update_verts_faces(v for v,_ in self.bmverts)
self.rfcontext.dirty()
#self.update()
@RFTool_PolyStrips.FSM_State('move all', 'exit')
def moveall_exit(self):
self.moveall_opts['timer'].done()
self.rfcontext.set_accel_defer(False)
self.rfcontext.clear_split_target_visualization()
self.update_target(force=True)
@RFTool_PolyStrips.Draw('post3d')
def draw_post3d_spline(self):
if not self.strips: return
strips = self.strips
hov_strips = self.hovering_strips
Point_to_Point2D = self.rfcontext.Point_to_Point2D
def is_visible(v):
return True # self.rfcontext.is_visible(v, None)
def draw(alphamult, hov_alphamult, hover):
nonlocal strips
if not hover: hov_alphamult = alphamult
size_outer = options['polystrips handle outer size']
size_inner = options['polystrips handle inner size']
border_outer = options['polystrips handle border']
border_inner = options['polystrips handle border']
bgl.glEnable(bgl.GL_BLEND)
# draw outer-inner lines
pts = [Point_to_Point2D(p) for strip in strips for p in strip.curve.points()]
self.rfcontext.drawing.draw2D_lines(pts, (1,1,1,0.45), width=2)
# draw junction handles (outer control points of curve)
faces_drawn = set() # keep track of faces, so don't draw same handles 2+ times
pts_outer,pts_inner = [],[]
for strip in strips:
bmf0,bmf1 = strip.end_faces()
p0,p1,p2,p3 = strip.curve.points()
if bmf0 not in faces_drawn:
if is_visible(p0): pts_outer += [Point_to_Point2D(p0)]
faces_drawn.add(bmf0)
if bmf1 not in faces_drawn:
if is_visible(p3): pts_outer += [Point_to_Point2D(p3)]
faces_drawn.add(bmf1)
if is_visible(p1): pts_inner += [Point_to_Point2D(p1)]
if is_visible(p2): pts_inner += [Point_to_Point2D(p2)]
pts_outer = [p for p in pts_outer if p]
pts_inner = [p for p in pts_inner if p]
self.rfcontext.drawing.draw2D_points(pts_outer, (1.00,1.00,1.00,1.0), radius=size_outer, border=border_outer, borderColor=(0.00,0.00,0.00,0.5))
self.rfcontext.drawing.draw2D_points(pts_inner, (0.25,0.25,0.25,0.8), radius=size_inner, border=border_inner, borderColor=(0.75,0.75,0.75,0.4))
if True:
# always draw on top!
bgl.glEnable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glDepthMask(bgl.GL_FALSE)
draw(1.0, 1.0, False)
bgl.glEnable(bgl.GL_DEPTH_TEST)
bgl.glDepthMask(bgl.GL_TRUE)
else:
# allow handles to go under surface
bgl.glDepthRange(0, 0.9999) # squeeze depth just a bit
bgl.glEnable(bgl.GL_BLEND)
bgl.glDepthMask(bgl.GL_FALSE) # do not overwrite depth
bgl.glEnable(bgl.GL_DEPTH_TEST)
# draw behind geometry
bgl.glDepthFunc(bgl.GL_GREATER)
draw(
options['target hidden alpha'],
options['target hidden alpha'], # hover
False, #options['polystrips handle hover']
)
# draw in front of geometry
bgl.glDepthFunc(bgl.GL_LEQUAL)
draw(
options['target alpha'],
options['target alpha'], # hover
False, #options['polystrips handle hover']
)
bgl.glDepthFunc(bgl.GL_LEQUAL)
bgl.glDepthRange(0.0, 1.0)
bgl.glDepthMask(bgl.GL_TRUE)
if False:
# draw spline for each strip
for strip in self.strips:
pp = None
for (_,p,_) in strip.curve.tessellation:
p = self.rfcontext.Point_to_Point2D(p)
if p and pp:
self.rfcontext.drawing.draw2D_line(
pp, p,
(1,1,1,0.5),
width=2, stipple=[2,2],
)
pp = p
if False and hasattr(self, 'count_data'):
# draw strip segment count change data (splines for edges)
splines = self.count_data['splines']
for spline in splines:
for cb in spline.tessellation:
pp = None
for (_,p,_) in cb:
p = self.rfcontext.Point_to_Point2D(p)
if p and pp:
self.rfcontext.drawing.draw2D_line(
pp, p,
(1,1,0.2,0.5),
width=2, stipple=[2,2],
)
pp = p
# self.rfcontext.drawing.draw2D_points([self.rfcontext.Point_to_Point2D(p) for p in self.count_data['points']], (1, 0.5, 0.5, 1.0), radius=5)
@RFTool_PolyStrips.Draw('post2d')
def draw_post2d(self):
self.rfcontext.drawing.set_font_size(12)
Point_to_Point2D = self.rfcontext.Point_to_Point2D
text_draw2D = self.rfcontext.drawing.text_draw2D
for strip in self.strips:
strip = [f for f in strip if f.is_valid]
c = len(strip)
vs = [Point_to_Point2D(f.center()) for f in strip]
vs = [Vec2D(v) for v in vs if v]
if not vs: continue
ctr = sum(vs, Vec2D((0,0))) / len(vs)
text_draw2D('%d' % c, ctr+Vec2D((2,14)), color=(1,1,0,1), dropshadow=(0,0,0,0.5))
| 39.371553
| 184
| 0.607955
|
2b65d566b9e2a0182f334a7077590771b4cb818b
| 974
|
py
|
Python
|
job/models.py
|
AnsGoo/cronJob
|
0f9aedbe2ffe3c405376c13a7c2d24540360bd0e
|
[
"MIT"
] | 11
|
2021-06-27T05:00:09.000Z
|
2022-02-15T14:31:21.000Z
|
job/models.py
|
AnsGoo/cornJob
|
0f9aedbe2ffe3c405376c13a7c2d24540360bd0e
|
[
"MIT"
] | 1
|
2021-12-01T12:20:54.000Z
|
2021-12-08T11:54:12.000Z
|
job/models.py
|
AnsGoo/cornJob
|
0f9aedbe2ffe3c405376c13a7c2d24540360bd0e
|
[
"MIT"
] | 2
|
2021-06-27T05:00:16.000Z
|
2021-08-09T06:36:09.000Z
|
import json
from typing import Dict
from sqlalchemy import Column, Integer, String, Text, Enum, DateTime
from app.database import Base
class JobRecord(Base):
id = Column(Integer, primary_key=True, autoincrement=True, index=True)
job_id = Column(String(191))
name = Column(String(255))
args = Column(Text, nullable=True)
kwargs = Column(Text, nullable=True)
trigger = Column(Enum('date', 'cron', 'interval'))
result = Column(Enum('SUCCESS', 'FAILED', 'MISSED'))
out = Column(Text, nullable=True, server_default=None)
runtime = Column(DateTime)
def to_json(self) -> Dict:
return {
'id': self.id,
'name': self.name,
'args': json.loads(self.args) if self.args else [],
'kwargs': json.loads(self.kwargs) if self.kwargs else dict(),
'trigger': self.trigger,
'result': self.result,
'out': self.out,
'runtime': self.runtime
}
| 32.466667
| 74
| 0.61191
|
fd4f99e84c943319e6544bdfaf7596fc366fda4e
| 4,290
|
py
|
Python
|
ete2/treeview/_about.py
|
csc8630Spring2014/Clusterizer
|
f64b0cabfbf4fa117b73a2af9355f67cc9207fe5
|
[
"MIT"
] | 1
|
2017-05-10T02:51:51.000Z
|
2017-05-10T02:51:51.000Z
|
ete2/treeview/_about.py
|
csc8630Spring2014/Clusterizer
|
f64b0cabfbf4fa117b73a2af9355f67cc9207fe5
|
[
"MIT"
] | null | null | null |
ete2/treeview/_about.py
|
csc8630Spring2014/Clusterizer
|
f64b0cabfbf4fa117b73a2af9355f67cc9207fe5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
__VERSION__="ete2-2.2rev1056"
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created: Tue Jan 10 15:56:58 2012
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_About(object):
def setupUi(self, About):
About.setObjectName("About")
About.resize(462, 249)
self.verticalLayoutWidget = QtGui.QWidget(About)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 0, 441, 208))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.version = QtGui.QLabel(self.verticalLayoutWidget)
self.version.setObjectName("version")
self.verticalLayout.addWidget(self.version)
self.retranslateUi(About)
QtCore.QMetaObject.connectSlotsByName(About)
def retranslateUi(self, About):
About.setWindowTitle(QtGui.QApplication.translate("About", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("About", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'DejaVu Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><img src=\":/ete icons/ete_logo.png\" /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-weight:600;\"><span style=\" font-size:11pt;\">ETE: a python Environment for Tree Exploration</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt; font-weight:600;\"><a href=\"http://ete.cgenomics.org\"><span style=\" text-decoration: underline; color:#0057ae;\">http://ete.cgenomics.org</span></a></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.version.setText(QtGui.QApplication.translate("About", "VERSION", None, QtGui.QApplication.UnicodeUTF8))
import ete_resources_rc
| 51.071429
| 311
| 0.680886
|
082718e61a663b216fa681e53fe7397a0a761266
| 21,352
|
py
|
Python
|
venv/lib/python3.7/site-packages/datalad/support/archives.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/datalad/support/archives.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/datalad/support/archives.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Various handlers/functionality for different types of files (e.g. for archives)
"""
import hashlib
import patoolib
from .external_versions import external_versions
# There were issues, so let's stay consistently with recent version
assert(external_versions["patoolib"] >= "1.7")
import os
import tempfile
from .exceptions import MissingExternalDependency
from .path import (
basename,
join as opj,
exists, abspath, isabs, normpath, relpath, pardir, isdir,
realpath,
sep as opsep,
)
from six import next, PY2
from six.moves.urllib.parse import unquote as urlunquote
import string
import random
from .locking import lock_if_check_fails
from ..utils import (
any_re_search,
assure_bytes,
chpwd,
rmdir,
unlink,
)
import logging
lgr = logging.getLogger('datalad.files')
# Monkey-patch patoolib's logging, so it logs coherently with the rest of
# datalad
import patoolib.util
#
# Seems have managed with swallow_outputs
#
# def _patool_log(level, msg):
# lgr.log(level, "patool: %s" % msg)
#
# def _patool_log_info(msg, *args, **kwargs):
# _patool_log(logging.DEBUG, msg)
#
# def _patool_log_error(msg, *args, **kwargs):
# _patool_log(logging.ERROR, msg)
#
# patoolib.util.log_info = _patool_log_info
# patoolib.util.log_error = _patool_log_error
# patoolib.util.log_internal_error = _patool_log_error
# we need to decorate patool.util.run
# because otherwise it just lets processes to spit out everything to std and we
# do want to use it at "verbosity>=0" so we could get idea on what is going on.
# And I don't want to mock for every invocation
from ..support.exceptions import CommandError
from ..utils import swallow_outputs
from ..utils import rmtemp
from ..cmd import Runner
from ..consts import ARCHIVES_TEMP_DIR
from ..utils import rmtree
from ..utils import get_tempfile_kwargs
from ..utils import assure_unicode
from ..utils import on_windows
_runner = Runner()
def _patool_run(cmd, verbosity=0, **kwargs):
"""Decorated runner for patool so it doesn't spit out outputs to stdout"""
# use our runner
try:
# kwargs_ = kwargs[:]; kwargs_['shell'] = True
# Any debug/progress output could be spit out to stderr so let's
# "expect" it.
#
if isinstance(cmd, (list, tuple)) and kwargs.get('shell'):
# patool (as far as I see it) takes care about quoting args
cmd = ' '.join(cmd)
out, err = _runner.run(cmd,
#log_stdout='offline',
#log_stderr='offline',
#expect_stderr=True,
#stdin=open('/dev/null'),
**kwargs)
lgr.debug("Finished running for patool. stdout=%s, stderr=%s", out, err)
return 0
except CommandError as e:
return e.code
except Exception as e:
lgr.error("While invoking runner caught unexpected exception: %s" % e)
return 100 # unknown beast
patoolib.util.run = _patool_run
# yoh: only keys are used atm, logic in decompress_file is replaced to use
# patool
DECOMPRESSORS = {
r'\.(tar\.bz|tbz)$': 'tar -xjvf %(file)s -C %(dir)s',
r'\.(tar\.xz)$': 'tar -xJvf %(file)s -C %(dir)s',
r'\.(tar\.gz|tgz)$': 'tar -xzvf %(file)s -C %(dir)s',
r'\.(zip)$': 'unzip %(file)s -d %(dir)s',
}
def unixify_path(path):
r"""On windows convert paths from drive:\d\file to /drive/d/file
This overcomes problems with various cmdline tools we are to use,
such as tar etc
"""
if on_windows:
drive, path_ = os.path.splitdrive(path)
path_ = path_.split(os.sep)
path_ = '/'.join(path_)
if drive:
# last one must be :
assert(drive[-1] == ":")
return '/%s%s' % (drive[:-1], path_)
else:
return path_
else:
return path
def decompress_file(archive, dir_, leading_directories='strip'):
"""Decompress `archive` into a directory `dir_`
Parameters
----------
archive: str
dir_: str
leading_directories: {'strip', None}
If `strip`, and archive contains a single leading directory under which
all content is stored, all the content will be moved one directory up
and that leading directory will be removed.
"""
if not exists(dir_):
lgr.debug("Creating directory %s to extract archive into" % dir_)
os.makedirs(dir_)
with swallow_outputs() as cmo:
archive = assure_bytes(archive)
dir_ = assure_bytes(dir_)
patoolib.util.check_existing_filename(archive)
patoolib.util.check_existing_filename(dir_, onlyfiles=False)
# Call protected one to avoid the checks on existence on unixified path
outdir = unixify_path(dir_)
if not PY2:
# should be supplied in PY3 to avoid b''
outdir = assure_unicode(outdir)
archive = assure_unicode(archive)
format_compression = patoolib.get_archive_format(archive)
if format_compression == ('gzip', None):
# Yarik fell into the trap of being lazy and not providing proper
# support for .gz .xz etc "stream archivers" formats in handling
# of archives. ATM out support for .gz relies on behavior of 7z while
# extracting them and respecting possibly present .gz filename
# header field.
# See more https://github.com/datalad/datalad/pull/3176#issuecomment-466819861
# TODO: provide proper handling of all those archives without
# relying on any filename been stored in the header
program = patoolib.find_archive_program(
format_compression[0], 'extract')
if basename(program) != '7z':
raise MissingExternalDependency(
"cmd:7z",
msg="(Not) Funny enough but ATM we need p7zip installation "
"to handle .gz files extraction 'correctly'"
)
patoolib._extract_archive(unixify_path(archive),
outdir=outdir,
verbosity=100)
if cmo.out:
lgr.debug("patool gave stdout:\n%s" % cmo.out)
if cmo.err:
lgr.debug("patool gave stderr:\n%s" % cmo.err)
# Note: (ben) Experienced issue, where extracted tarball
# lacked execution bit of directories, leading to not being
# able to delete them while having write permission.
# Can't imagine a situation, where we would want to fail on
# that kind of mess. So, to be sure set it.
if not on_windows:
os.chmod(dir_,
os.stat(dir_).st_mode |
os.path.stat.S_IEXEC)
for root, dirs, files in os.walk(dir_, followlinks=False):
for d in dirs:
subdir = opj(root, d)
os.chmod(subdir,
os.stat(subdir).st_mode |
os.path.stat.S_IEXEC)
if leading_directories == 'strip':
_, dirs, files = next(os.walk(dir_))
if not len(files) and len(dirs) == 1:
# move all the content under dirs[0] up 1 level
widow_dir = opj(dir_, dirs[0])
lgr.debug("Moving content within %s upstairs" % widow_dir)
subdir, subdirs_, files_ = next(os.walk(opj(dir_, dirs[0])))
for f in subdirs_ + files_:
os.rename(opj(subdir, f), opj(dir_, f))
rmdir(widow_dir)
elif leading_directories is None:
pass # really do nothing
else:
raise NotImplementedError("Not supported %s" % leading_directories)
def compress_files(files, archive, path=None, overwrite=True):
"""Compress `files` into an `archive` file
Parameters
----------
files : list of str
archive : str
path : str
Alternative directory under which compressor will be invoked, to e.g.
take into account relative paths of files and/or archive
overwrite : bool
Whether to allow overwriting the target archive file if one already exists
"""
with swallow_outputs() as cmo:
with chpwd(path):
if not overwrite:
patoolib.util.check_new_filename(archive)
patoolib.util.check_archive_filelist(files)
# Call protected one to avoid the checks on existence on unixified path
patoolib._create_archive(unixify_path(archive),
[unixify_path(f) for f in files],
verbosity=100)
if cmo.out:
lgr.debug("patool gave stdout:\n%s" % cmo.out)
if cmo.err:
lgr.debug("patool gave stderr:\n%s" % cmo.err)
def _get_cached_filename(archive):
"""A helper to generate a filename which has original filename and additional suffix
which wouldn't collide across files with the same name from different locations
"""
#return "%s_%s" % (basename(archive), hashlib.md5(archive).hexdigest()[:5])
# per se there is no reason to maintain any long original name here.
archive_cached = hashlib.md5(assure_bytes(realpath(archive))).hexdigest()[:10]
lgr.debug("Cached directory for archive %s is %s", archive, archive_cached)
return archive_cached
def _get_random_id(size=6, chars=string.ascii_uppercase + string.digits):
"""Return a random ID composed from digits and uppercase letters
upper-case so we are tolerant to unlikely collisions on dummy FSs
"""
return ''.join(random.choice(chars) for _ in range(size))
class ArchivesCache(object):
"""Cache to maintain extracted archives
Parameters
----------
toppath : str
Top directory under .git/ of which temp directory would be created.
If not provided -- random tempdir is used
persistent : bool, optional
Passed over into generated ExtractedArchives
"""
# TODO: make caching persistent across sessions/runs, with cleanup
# IDEA: extract under .git/annex/tmp so later on annex unused could clean it
# all up
def __init__(self, toppath=None, persistent=False):
self._toppath = toppath
if toppath:
path = opj(toppath, ARCHIVES_TEMP_DIR)
if not persistent:
tempsuffix = "-" + _get_random_id()
lgr.debug("For non-persistent archives using %s suffix for path %s",
tempsuffix, path)
path += tempsuffix
else:
if persistent:
raise ValueError("%s cannot be persistent since no toppath was provided" % self)
path = tempfile.mktemp(**get_tempfile_kwargs())
self._path = path
self.persistent = persistent
# TODO? assure that it is absent or we should allow for it to persist a bit?
#if exists(path):
# self._clean_cache()
self._archives = {}
# TODO: begging for a race condition
if not exists(path):
lgr.debug("Initiating clean cache for the archives under %s" % self.path)
try:
self._made_path = True
os.makedirs(path)
lgr.debug("Cache initialized")
except:
lgr.error("Failed to initialize cached under %s" % path)
raise
else:
lgr.debug("Not initiating existing cache for the archives under %s" % self.path)
self._made_path = False
@property
def path(self):
return self._path
def clean(self, force=False):
for aname, a in list(self._archives.items()):
a.clean(force=force)
del self._archives[aname]
# Probably we should not rely on _made_path and not bother if persistent removing it
# if ((not self.persistent) or force) and self._made_path:
# lgr.debug("Removing the entire archives cache under %s" % self.path)
# rmtemp(self.path)
if (not self.persistent) or force:
lgr.debug("Removing the entire archives cache under %s" % self.path)
rmtemp(self.path)
def _get_normalized_archive_path(self, archive):
"""Return full path to archive
So we have consistent operation from different subdirs,
while referencing archives from the topdir
TODO: why do we need it???
"""
if not isabs(archive) and self._toppath:
out = normpath(opj(self._toppath, archive))
if relpath(out, self._toppath).startswith(pardir):
raise RuntimeError("%s points outside of the topdir %s"
% (archive, self._toppath))
if isdir(out):
raise RuntimeError("got a directory here... bleh")
return out
return archive
def get_archive(self, archive):
archive = self._get_normalized_archive_path(archive)
if archive not in self._archives:
self._archives[archive] = \
ExtractedArchive(archive,
opj(self.path, _get_cached_filename(archive)),
persistent=self.persistent)
return self._archives[archive]
def __getitem__(self, archive):
return self.get_archive(archive)
def __delitem__(self, archive):
archive = self._get_normalized_archive_path(archive)
self._archives[archive].clean()
del self._archives[archive]
def __del__(self):
try:
# we can at least try
if not self.persistent:
self.clean()
except: # MIH: IOError?
pass
class ExtractedArchive(object):
"""Container for the extracted archive
"""
# suffix to use for a stamp so we could guarantee that extracted archive is
STAMP_SUFFIX = '.stamp'
def __init__(self, archive, path=None, persistent=False):
self._archive = archive
# TODO: bad location for extracted archive -- use tempfile
if not path:
path = tempfile.mktemp(**get_tempfile_kwargs(prefix=_get_cached_filename(archive)))
if exists(path) and not persistent:
raise RuntimeError("Directory %s already exists whenever it should not "
"persist" % path)
self._persistent = persistent
self._path = path
def __repr__(self):
return "%s(%r, path=%r)" % (self.__class__.__name__, self._archive, self.path)
def clean(self, force=False):
# would interfere with tests
# if os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
# lgr.info("As instructed, not cleaning up the cache under %s"
# % self._path)
# return
for path, name in [
(self._path, 'cache'),
(self.stamp_path, 'stamp file')
]:
if exists(path):
if (not self._persistent) or force:
lgr.debug("Cleaning up the %s for %s under %s", name, self._archive, path)
# TODO: we must be careful here -- to not modify permissions of files
# only of directories
(rmtree if isdir(path) else unlink)(path)
@property
def path(self):
"""Given an archive -- return full path to it within cache (extracted)
"""
return self._path
@property
def stamp_path(self):
return self._path + self.STAMP_SUFFIX
@property
def is_extracted(self):
return exists(self.path) and exists(self.stamp_path) \
and os.stat(self.stamp_path).st_mtime >= os.stat(self.path).st_mtime
def assure_extracted(self):
"""Return path to the extracted `archive`. Extract archive if necessary
"""
path = self.path
with lock_if_check_fails(
check=(lambda s: s.is_extracted, (self,)),
lock_path=path,
operation="extract"
) as (check, lock):
if lock:
assert not check
self._extract_archive(path)
return path
def _extract_archive(self, path):
# we need to extract the archive
# TODO: extract to _tmp and then move in a single command so we
# don't end up picking up broken pieces
lgr.debug(u"Extracting {self._archive} under {path}".format(**locals()))
if exists(path):
lgr.debug(
"Previous extracted (but probably not fully) cached archive "
"found. Removing %s",
path)
rmtree(path)
os.makedirs(path)
assert (exists(path))
# remove old stamp
if exists(self.stamp_path):
rmtree(self.stamp_path)
decompress_file(self._archive, path, leading_directories=None)
# TODO: must optional since we might to use this content, move it
# into the tree etc
# lgr.debug("Adjusting permissions to R/O for the extracted content")
# rotree(path)
assert (exists(path))
# create a stamp
with open(self.stamp_path, 'wb') as f:
f.write(assure_bytes(self._archive))
# assert that stamp mtime is not older than archive's directory
assert (self.is_extracted)
# TODO: remove?
#def has_file_ready(self, afile):
# lgr.debug(u"Checking file {afile} from archive {archive}".format(**locals()))
# return exists(self.get_extracted_filename(afile))
def get_extracted_filename(self, afile):
"""Return full path to the `afile` within extracted `archive`
It does not actually extract any archive
"""
return opj(self.path, urlunquote(afile))
def get_extracted_files(self):
"""Generator to provide filenames which are available under extracted archive
"""
path = self.assure_extracted()
path_len = len(path) + (len(os.sep) if not path.endswith(os.sep) else 0)
for root, dirs, files in os.walk(path): # TEMP
for name in files:
yield assure_unicode(opj(root, name)[path_len:])
def get_leading_directory(self, depth=None, consider=None, exclude=None):
"""Return leading directory of the content within archive
Parameters
----------
depth: int or None, optional
Maximal depth of leading directories to consider. If None - no upper
limit
consider : list of str, optional
Regular expressions for file/directory names to be considered (before
exclude). Applied to the entire relative path to the file as in the archive
exclude: list of str, optional
Regular expressions for file/directory names to be excluded from consideration.
Applied to the entire relative path to the file as in the archive
Returns
-------
str or None:
If there is no single leading directory -- None returned
"""
leading = None
# returns only files, so no need to check if a dir or not
for fpath in self.get_extracted_files():
if consider and not any_re_search(consider, fpath):
continue
if exclude and any_re_search(exclude, fpath):
continue
lpath = fpath.split(opsep)
dpath = lpath[:-1] # directory path components
if leading is None:
leading = dpath if depth is None else dpath[:depth]
else:
if dpath[:len(leading)] != leading:
# find smallest common path
leading_ = []
# TODO: there might be more efficient pythonic way
for d1, d2 in zip(leading, dpath):
if d1 != d2:
break
leading_.append(d1)
leading = leading_
if not len(leading):
# no common leading - ready to exit
return None
return leading if leading is None else opj(*leading)
def get_extracted_file(self, afile):
lgr.debug(u"Requested file {afile} from archive {self._archive}".format(**locals()))
# TODO: That could be a good place to provide "compatibility" layer if
# filenames within archive are too obscure for local file system.
# We could somehow adjust them while extracting and here channel back
# "fixed" up names since they are only to point to the load
self.assure_extracted()
path = self.get_extracted_filename(afile)
# TODO: make robust
lgr.log(2, "Verifying that %s exists" % abspath(path))
assert exists(path), "%s must exist" % path
return path
def __del__(self):
try:
if self._persistent:
self.clean()
except: # MIH: IOError?
pass
| 37.263525
| 96
| 0.601255
|
4ab25938c8ced62720075fdd9777b402c305d44e
| 3,317
|
py
|
Python
|
lib/SetAPI/generic/WorkspaceListObjectsIterator.py
|
r2sunita/SetAPI
|
4ed769ed9678c057c7ded05fb93b9b7dc0874fc2
|
[
"MIT"
] | null | null | null |
lib/SetAPI/generic/WorkspaceListObjectsIterator.py
|
r2sunita/SetAPI
|
4ed769ed9678c057c7ded05fb93b9b7dc0874fc2
|
[
"MIT"
] | null | null | null |
lib/SetAPI/generic/WorkspaceListObjectsIterator.py
|
r2sunita/SetAPI
|
4ed769ed9678c057c7ded05fb93b9b7dc0874fc2
|
[
"MIT"
] | null | null | null |
import time
from collections import deque
class WorkspaceListObjectsIterator:
# ws_info - optional workspace info tuple (if is not defined then either ws_id
# or ws_name should be provided),
# ws_id/ws_name - optional workspace identification (if neither is defined
# then ws_info should be provided),
# list_objects_params - optional structure with such Woskspace.ListObjectsParams
# as 'type' or 'before', 'after', 'showHidden', 'includeMetadata' and so on,
# wherein there is no need to set 'ids' or 'workspaces' or 'min/maxObjectID'.
def __init__(self, ws_client, ws_info_list = None, ws_id = None, ws_name = None,
list_objects_params = {}, part_size = 10000, global_limit = 100000):
self.ws = ws_client
if not ws_info_list:
if (not ws_id) and (not ws_name):
raise ValueError("In case ws_info_list is not set either ws_id or " +
"ws_name should be set")
ws_info_list = [self.ws.get_workspace_info({"id": ws_id, "workspace": ws_name})]
# Let's split workspaces into blocks
blocks = [] # Each block is array of ws_info
sorted_ws_info_deque = deque(sorted(ws_info_list, key = lambda x: x[4]))
while sorted_ws_info_deque:
block_size = 0
block = []
while sorted_ws_info_deque:
item = sorted_ws_info_deque.popleft()
if len(block) == 0 or block_size + item[4] <= part_size:
block.append(item)
block_size += item[4]
else:
sorted_ws_info_deque.appendleft(item)
break
blocks.append(block)
self.block_iter = blocks.__iter__()
self.list_objects_params = list_objects_params
self.min_obj_id = -1
self.max_obj_count = -1
self.part_size = part_size
self.global_limit = global_limit
self.total_counter = 0
self.part_iter = self._load_next_part()
pass
# iterator implementation
def __iter__(self):
return self
def next(self):
while self.part_iter is not None:
try:
self.total_counter += 1
if self.total_counter > self.global_limit:
raise StopIteration
return self.part_iter.next()
except StopIteration:
self.part_iter = self._load_next_part()
raise StopIteration
def _load_next_part(self):
if self.min_obj_id < 0 or self.min_obj_id > self.max_obj_count:
try:
block = self.block_iter.next()
self.list_objects_params['ids'] = [ws_info[0] for ws_info in block]
except StopIteration:
return None
last_ws_info = block[len(block) - 1]
self.min_obj_id = 1
self.max_obj_count = last_ws_info[4]
max_obj_id = self.min_obj_id + self.part_size - 1
self.list_objects_params['minObjectID'] = self.min_obj_id
self.list_objects_params['maxObjectID'] = max_obj_id
self.min_obj_id += self.part_size # For next load cycle
ret = self.ws.list_objects(self.list_objects_params)
return ret.__iter__()
| 43.077922
| 92
| 0.602352
|
2736a3cd1f2c78689bfad9057739dbce76e6f686
| 779
|
py
|
Python
|
shop/migrations/0011_genre.py
|
dada00321/clothing_website
|
8834aa5136d18435e095aa00a3e1a70a7dbbcb43
|
[
"MIT"
] | 1
|
2021-04-19T08:36:43.000Z
|
2021-04-19T08:36:43.000Z
|
shop/migrations/0011_genre.py
|
dada00321/clothing_website
|
8834aa5136d18435e095aa00a3e1a70a7dbbcb43
|
[
"MIT"
] | null | null | null |
shop/migrations/0011_genre.py
|
dada00321/clothing_website
|
8834aa5136d18435e095aa00a3e1a70a7dbbcb43
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2021-02-20 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0010_auto_20210220_1946'),
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, default='Women', max_length=200)),
('slug', models.SlugField(default='women', max_length=200, unique=True)),
('index', models.IntegerField(default=1, null=True)),
],
options={
'ordering': ('index',),
},
),
]
| 29.961538
| 114
| 0.553273
|
f00826f68bcd67af2cb16741925440c648e23966
| 87,860
|
py
|
Python
|
setup.py
|
PonyPC/python-pcl
|
b2df1599d2794a176caee25ae536ba232ea90254
|
[
"BSD-3-Clause"
] | 1
|
2021-05-12T05:21:50.000Z
|
2021-05-12T05:21:50.000Z
|
setup.py
|
PonyPC/python-pcl
|
b2df1599d2794a176caee25ae536ba232ea90254
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
PonyPC/python-pcl
|
b2df1599d2794a176caee25ae536ba232ea90254
|
[
"BSD-3-Clause"
] | 1
|
2021-07-21T03:07:17.000Z
|
2021-07-21T03:07:17.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
from Cython.Distutils import build_ext
from distutils.core import setup
from distutils.extension import Extension
# from Cython.Build import cythonize # MacOS NG
from setuptools import setup, find_packages, Extension
import subprocess
import numpy
import sys
import platform
import os
import time
import shutil
from ctypes.util import find_library
setup_requires = []
install_requires = [
'filelock',
'mock',
'nose',
# RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility
# https://github.com/scikit-image/scikit-image/issues/3655
# 'numpy>=1.15.1,!=1.50.0',
# numpy.ufunc size changed, may indicate binary incompatibility.
'numpy>=1.16.1,!=1.16.2',
'Cython>=0.26.0',
]
def pkgconfig(flag):
# Equivalent in Python 2.7 (but not 2.6):
# subprocess.check_output(['pkg-config', flag] + pcl_libs).split()
p = subprocess.Popen(['pkg-config', flag] +
pcl_libs, stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# Assume no evil spaces in filenames; unsure how pkg-config would
# handle those, anyway.
# decode() is required in Python 3. TODO how do know the encoding?
return stdout.decode().split()
def pkgconfig_win(flag, cut):
# Equivalent in Python 2.7 (but not 2.6):
# subprocess.check_output(['pkg-config', flag] + pcl_libs).split()
p = subprocess.Popen(['.\\pkg-config\\pkg-config.exe', flag] +
pcl_libs, stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# Assume no evil spaces in filenames; unsure how pkg-config would
# handle those, anyway.
# decode() is required in Python 3. TODO how do know the encoding?
# return stdout.decode().split()
# Windows
return stdout.decode().replace('\r\n', '').replace('\ ', ' ').replace('/', '\\').split(cut)
if platform.system() == "Windows":
# Check 32bit or 64bit
is_64bits = sys.maxsize > 2**32
# if is_64bits == True
# environment Value
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "PCL_ROOT":
pcl_root = v
# print(pcl_root)
# print("%s: find environment PCL_ROOT" % pcl_root)
break
else:
print("cannot find environment PCL_ROOT", file=sys.stderr)
sys.exit(1)
# Add environment Value
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "PKG_CONFIG_PATH":
pkgconfigstr = v
break
else:
# print("cannot find environment PKG_CONFIG_PATH", file=sys.stderr)
print("cannot find environment PKG_CONFIG_PATH")
pkgconfigstr = pcl_root + '\\lib\\pkgconfig;' + pcl_root + \
'\\3rdParty\\FLANN\\lib\\pkgconfig;' + \
pcl_root + '\\3rdParty\\Eigen\\lib\\pkgconfig;'
os.environ["PKG_CONFIG_PATH"] = pcl_root + '\\lib\\pkgconfig;' + pcl_root + \
'\\3rdParty\\FLANN\\lib\\pkgconfig;' + \
pcl_root + '\\3rdParty\\Eigen\\lib\\pkgconfig;'
print("set environment PKG_CONFIG_PATH=%s" % pkgconfigstr)
# other package(common)
# BOOST_ROOT
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "BOOST_ROOT":
boost_root = v
break
else:
boost_root = pcl_root + '\\3rdParty\\Boost'
# EIGEN_ROOT
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "EIGEN_ROOT":
eigen_root = v
break
else:
eigen_root = pcl_root + '\\3rdParty\\Eigen'
# FLANN_ROOT
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "FLANN_ROOT":
flann_root = v
break
else:
flann_root = pcl_root + '\\3rdParty\\FLANN'
# QHULL_ROOT
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "QHULL_ROOT":
qhull_root = v
break
else:
qhull_root = pcl_root + '\\3rdParty\\Qhull'
# VTK_DIR
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "VTK_DIR":
vtk_root = v
break
else:
vtk_root = pcl_root + '\\3rdParty\\VTK'
# custom(CUDA)
# custom(WinPcap)
# get pkg-config.exe filePath
pkgconfigPath = os.getcwd() + '\\pkg-config\\pkg-config.exe'
print(pkgconfigPath)
# AppVeyor Check
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "PCL_VERSION":
pcl_version = '-' + v
break
else:
# Try to find PCL. XXX we should only do this when trying to build or install.
# in order of preference
PCL_SUPPORTED = ["-1.9", "-1.8", "-1.7", "-1.6", ""]
for pcl_version in PCL_SUPPORTED:
if subprocess.call(['.\\pkg-config\\pkg-config.exe', 'pcl_common%s' % pcl_version]) == 0:
# if subprocess.call([pkgconfigPath, 'pcl_common%s' % pcl_version]) == 0:
break
else:
print("%s: error: cannot find PCL, tried" %
sys.argv[0], file=sys.stderr)
for version in PCL_SUPPORTED:
print(' pkg-config pcl_common%s' % version, file=sys.stderr)
sys.exit(1)
print(pcl_version)
# pcl_version = '-1.6'
# Python Version Check
info = sys.version_info
if pcl_version == '-1.6':
# PCL 1.6.0 python Version == 3.4(>= 3.4?, 2.7 -> NG)
# Visual Studio 2010
if info.major == 3 and info.minor == 4:
boost_version = '1_49'
vtk_version = '5.8'
pcl_libs = ["common", "features", "filters", "kdtree", "octree",
"registration", "sample_consensus", "search", "segmentation",
"surface", "tracking", "visualization"]
pass
else:
print('no building Python Version')
sys.exit(1)
elif pcl_version == '-1.7':
# PCL 1.7.2 python Version >= 3.5
# Visual Studio 2015
if info.major == 3 and info.minor >= 5:
boost_version = '1_57'
vtk_version = '6.2'
pass
# pcl-1.7?
pcl_libs = ["2d", "common", "features", "filters", "geometry",
"io", "kdtree", "keypoints", "ml", "octree", "outofcore", "people",
"recognition", "registration", "sample_consensus", "search",
"segmentation", "surface", "tracking", "visualization"]
else:
print('no building Python Version')
sys.exit(1)
elif pcl_version == '-1.8':
# PCL 1.8.0 python Version >= 3.5
# Visual Studio 2015/2017
if info.major == 3 and info.minor >= 5:
# PCL 1.8.1
boost_version = '1_64'
vtk_version = '6.3'
# pcl-1.8
# 1.8.1 use 2d required features
pcl_libs = ["2d", "common", "features", "filters", "geometry",
"io", "kdtree", "keypoints", "ml", "octree", "outofcore", "people",
"recognition", "registration", "sample_consensus", "search",
"segmentation", "stereo", "surface", "tracking", "visualization"]
pass
else:
# if info.major == 2 and info.minor == 7:
# import _msvccompiler
# import distutils.msvc9compiler
#
# def find_vcvarsall(version):
# # use vc2017 set vcvarsall.bat path
# # return "C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Auxiliary/Build/vcvarsall.bat"
# # return "C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvarsall.bat"
# vcvarsall, vcruntime = _msvccompiler._find_vcvarsall('x64')
# if vcvarsall is not None:
# print('set msvc2017/2015 compiler')
# print(vcvarsall)
# return vcvarsall
# else:
# print('no set msvc2017/2015 compiler')
# return None
#
# distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
#
# boost_version = '1_64'
# vtk_version = '8.0'
# # pcl-1.8
# # 1.8.1 use 2d required features
# pcl_libs = ["2d", "common", "features", "filters", "geometry",
# "io", "kdtree", "keypoints", "ml", "octree", "outofcore", "people",
# "recognition", "registration", "sample_consensus", "search",
# "segmentation", "stereo", "surface", "tracking", "visualization"]
# else:
# print('no building Python Version')
# sys.exit(1)
print('no building Python Version')
sys.exit(1)
elif pcl_version == '-1.9':
# PCL 1.9.1 python Version >= 3.5
# Visual Studio 2015/2017
if info.major == 3 and info.minor >= 5:
# PCL 1.9.1
boost_version = '1_68'
vtk_version = '8.1'
# pcl-1.9
# 1.9.1 use 2d required features
pcl_libs = ["2d", "common", "features", "filters", "geometry",
"io", "kdtree", "keypoints", "ml", "octree", "outofcore", "people",
"recognition", "registration", "sample_consensus", "search",
"segmentation", "stereo", "surface", "tracking", "visualization"]
pass
else:
# if info.major == 2 and info.minor == 7:
print('no building Python Version')
sys.exit(1)
else:
print('pcl_version Unknown')
sys.exit(1)
# Find build/link options for PCL using pkg-config.
pcl_libs = ["pcl_%s%s" % (lib, pcl_version) for lib in pcl_libs]
# pcl_libs += ['Eigen3']
# print(pcl_libs)
ext_args = defaultdict(list)
# set include path
ext_args['include_dirs'].append(numpy.get_include())
# no use pkg-config
if pcl_version == '-1.6':
# 1.6.0
# boost 1.5.5
# vtk 5.8
# + add VTK
inc_dirs = [pcl_root + '\\include\\pcl' + pcl_version,
pcl_root + '\\3rdParty\\Eigen\\include',
pcl_root + '\\3rdParty\\Boost\\include',
flann_root + '\\include',
qhull_root + '\\include',
vtk_root + '\\include\\vtk-' + vtk_version]
elif pcl_version == '-1.7':
# 1.7.2
# boost 1.5.7
# vtk 6.2
inc_dirs = [pcl_root + '\\include\\pcl' + pcl_version,
eigen_root + '\\eigen3',
boost_root + '\\include\\boost-' + boost_version,
flann_root + '\\include',
qhull_root + '\\include',
vtk_root + '\\include\\vtk-' + vtk_version]
elif pcl_version == '-1.8':
# 1.8.0
# boost 1.6.1
# vtk 7.0
# 1.8.1/vtk 8.0
inc_dirs = [pcl_root + '\\include\\pcl' + pcl_version,
eigen_root + '\\eigen3',
boost_root + '\\include\\boost-' + boost_version,
flann_root + '\\include',
qhull_root + '\\include',
vtk_root + '\\include\\vtk-' + vtk_version]
elif pcl_version == '-1.9':
# 1.9.1
# boost 1.6.8
# vtk 8.1?
# not path set libqhull/libqhull_r(conflict io.h)
inc_dirs = [pcl_root + '\\include\\pcl' + pcl_version,
eigen_root + '\\eigen3',
boost_root + '\\include\\boost-' + boost_version,
flann_root + '\\include',
qhull_root + '\\include',
vtk_root + '\\include\\vtk-' + vtk_version]
else:
inc_dirs = []
for inc_dir in inc_dirs:
ext_args['include_dirs'].append(inc_dir)
# for flag in pkgconfig_win('--libs-only-L', '-L'):
# print(flag.lstrip().rstrip())
# ext_args['library_dirs'].append(flag[2:])
# for flag in pkgconfig_win('--libs-only-other', '-l'):
# print(flag.lstrip().rstrip())
# ext_args['extra_link_args'].append(flag)
# end
# set library path
if pcl_version == '-1.6':
# 3rdParty(+Boost, +VTK)
lib_dirs = [pcl_root + '\\lib',
boost_root + '\\lib',
flann_root + '\\lib',
qhull_root + '\\lib',
vtk_root + '\\lib']
elif pcl_version == '-1.7':
# 1.7.2
# 3rdParty(+Boost, +VTK)
lib_dirs = [pcl_root + '\\lib',
boost_root + '\\lib',
flann_root + '\\lib',
qhull_root + '\\lib',
vtk_root + '\\lib']
elif pcl_version == '-1.8':
# 1.8.0
# 3rdParty(+Boost, +VTK)
lib_dirs = [pcl_root + '\\lib',
boost_root + '\\lib',
flann_root + '\\lib',
qhull_root + '\\lib',
vtk_root + '\\lib']
elif pcl_version == '-1.9':
# 1.9.1
# 3rdParty(+Boost, +VTK)
lib_dirs = [pcl_root + '\\lib',
boost_root + '\\lib',
flann_root + '\\lib',
qhull_root + '\\lib',
vtk_root + '\\lib']
else:
lib_dir = []
for lib_dir in lib_dirs:
ext_args['library_dirs'].append(lib_dir)
# OpenNI2?
# %OPENNI2_REDIST64% %OPENNI2_REDIST%
if pcl_version == '-1.6':
# release
# libreleases = ['pcl_apps_release', 'pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_octree_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_segmentation_release', 'pcl_search_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s', 'qhull', 'qhull_p', 'qhull_r', 'qhullcpp']
# release + vtk5.3?
libreleases = ['pcl_apps_release', 'pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_octree_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_segmentation_release', 'pcl_search_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s']
elif pcl_version == '-1.7':
# release
# libreleases = ['pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_octree_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_segmentation_release', 'pcl_search_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s', 'qhull', 'qhull_p', 'qhull_r', 'qhullcpp']
# release + vtk6.2?/6.3?
libreleases = ['pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_octree_release', 'pcl_outofcore_release', 'pcl_people_release', 'pcl_recognition_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_search_release', 'pcl_segmentation_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s', 'qhull', 'qhull_p', 'qhull_r', 'qhullcpp']
elif pcl_version == '-1.8':
# release
# libreleases = ['pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_octree_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_segmentation_release', 'pcl_search_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s', 'qhull', 'qhull_p', 'qhull_r', 'qhullcpp']
# release + vtk7.0
libreleases = ['pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_ml_release', 'pcl_octree_release', 'pcl_outofcore_release', 'pcl_people_release', 'pcl_recognition_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_search_release', 'pcl_segmentation_release', 'pcl_stereo_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s', 'qhull', 'qhull_p', 'qhull_r', 'qhullcpp']
elif pcl_version == '-1.9':
# release
# libreleases = ['pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_octree_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_segmentation_release', 'pcl_search_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s', 'qhull', 'qhull_p', 'qhull_r', 'qhullcpp']
# release + vtk8.1?
libreleases = ['pcl_common_release', 'pcl_features_release', 'pcl_filters_release', 'pcl_io_release', 'pcl_io_ply_release', 'pcl_kdtree_release', 'pcl_keypoints_release', 'pcl_ml_release', 'pcl_octree_release', 'pcl_outofcore_release', 'pcl_people_release', 'pcl_recognition_release', 'pcl_registration_release', 'pcl_sample_consensus_release', 'pcl_search_release', 'pcl_segmentation_release', 'pcl_stereo_release', 'pcl_surface_release', 'pcl_tracking_release', 'pcl_visualization_release', 'flann', 'flann_s', 'qhull', 'qhull_p', 'qhull_r', 'qhullcpp']
else:
libreleases = []
for librelease in libreleases:
ext_args['libraries'].append(librelease)
# vtk 5.8
# vtk 6.2/6.3
# vtk 7.0/8.0
# vtk 8.1
if vtk_version == '5.8':
# pcl1.6 3rdParty
# vtklibreleases = ['vtkInfovis', 'MapReduceMPI', 'vtkNetCDF', 'QVTK', 'vtkNetCDF_cxx', 'vtkRendering', 'vtkViews', 'vtkVolumeRendering', 'vtkWidgets', 'mpistubs', 'vtkalglib', 'vtkCharts', 'vtkexoIIc', 'vtkexpat', 'vtkCommon', 'vtkfreetype', 'vtkDICOMParser', 'vtkftgl', 'vtkFiltering', 'vtkhdf5', 'vtkjpeg', 'vtkGenericFiltering', 'vtklibxml2', 'vtkGeovis', 'vtkmetaio', 'vtkpng', 'vtkGraphics', 'vtkproj4', 'vtkHybrid', 'vtksqlite', 'vtksys', 'vtkIO', 'vtktiff', 'vtkImaging', 'vtkverdict', 'vtkzlib']
vtklibreleases = []
elif vtk_version == '6.3':
# pcl1.7.2 3rdParty
# vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLIC-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksys-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version]
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkexpat-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkfreetype-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkhdf5-' + vtk_version, 'vtkhdf5_hl-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkjpeg-' + vtk_version, 'vtkjsoncpp-' + vtk_version, 'vtklibxml2-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkNetCDF-' + vtk_version, 'vtkNetCDF_cxx-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkpng-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLIC-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtktiff-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkzlib-' + vtk_version]
elif vtk_version == '7.0':
# pcl_version 1.8.0
# pcl1.6 3rdParty
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkexpat-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkfreetype-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkhdf5-' + vtk_version, 'vtkhdf5_hl-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkjpeg-' + vtk_version, 'vtkjsoncpp-' + vtk_version, 'vtklibxml2-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkNetCDF-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkpng-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLIC-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtktiff-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkzlib-' + vtk_version]
elif vtk_version == '8.0':
# pcl_version 1.8.1
# vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkfreetype-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkhdf5-' + vtk_version, 'vtkhdf5_hl-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkjpeg-' + vtk_version, 'vtkjsoncpp-' + vtk_version, 'vtklibxml2-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkNetCDF-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkpng-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLIC-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtktiff-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkzlib-' + vtk_version]
# vtk8.0
# all-in-one-package(OpenGL)
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkhdf5-' + vtk_version, 'vtkhdf5_hl-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkjsoncpp-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkNetCDF-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtktiff-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkzlib-' + vtk_version]
# conda?(OpenGL2)
# vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkDomainsChemistryOpenGL2-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersPoints-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersPython-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersTopology-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkglew-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOExportOpenGL2-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOTecplotTable-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtklibharu-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkPythonInterpreter-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL2-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PSOpenGL2-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingMatplotlib-' + vtk_version, 'vtkRenderingOpenGL2-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL2-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkWrappingTools-' + vtk_version, 'vtkCommonCorePython35D-8.0', 'vtkWrappingPython35Core-8.0']
elif vtk_version == '8.1':
# pcl_version 1.9.0/1.9.1
# all-in-one-package(OpenGL)
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersPoints-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersTopology-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOTecplotTable-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtklibharu-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtknetcdfcpp-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkzlib-' + vtk_version]
# conda?(OpenGL2)
# vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkDomainsChemistryOpenGL2-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersPoints-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersPython-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersTopology-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkglew-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOExportOpenGL2-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOTecplotTable-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtklibharu-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtknetcdfcpp-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkPythonInterpreter-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL2-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PSOpenGL2-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingMatplotlib-' + vtk_version, 'vtkRenderingOpenGL2-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL2-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkWrappingTools-' + vtk_version]
else:
vtklibreleases = []
for librelease in vtklibreleases:
ext_args['libraries'].append(librelease)
# Note :
# vtk Version setting
# use vtk need library(Windows base library)
# http://public.kitware.com/pipermail/vtkusers/2008-July/047291.html
win_libreleases = ['kernel32', 'user32', 'gdi32', 'winspool', 'comdlg32',
'advapi32', 'shell32', 'ole32', 'oleaut32', 'uuid', 'odbc32', 'odbccp32']
for win_librelease in win_libreleases:
ext_args['libraries'].append(win_librelease)
# http://www.pcl-users.org/error-in-use-PCLVisualizer-td3719235.html
# Download MSSDKs
# http://msdn.microsoft.com/en-us/windows/bb980924.aspx
#
# http://stackoverflow.com/questions/1236670/how-to-make-opengl-apps-in-64-bits-windows
# C:\Program Files (x86)\Microsoft SDKs\Windows\7.0\Lib\x64\OpenGL32.lib
# C:\Program Files (x86)\Microsoft SDKs\Windows\v7.0A\Lib\x64\OpenGL32.lib
# Add OpenGL32 .h/.lib
win_kit_incs = []
win_kit_libdirs = []
# using _open, _close, _chsize functions (pcl/io/low_level_io.h)
# win_kit_libreleases = ['ucrt', 'libucrt']
# for win_kit_librelease in win_kit_libreleases:
# ext_args['libraries'].append(win_kit_librelease)
if pcl_version == '-1.6':
if is_64bits == True:
# win_opengl_libdirs = ['C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\Lib\\x64']
# AppVeyor
win_kit_libdirs = [
'C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\Lib\\x64']
else:
# win_opengl_libdirs = ['C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\Lib\\win32']
# AppVeyor
win_kit_libdirs = [
'C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\Lib\\win32']
elif pcl_version == '-1.7':
if is_64bits == True:
win_kit_libdirs = [
'C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v8.0A\\Lib\\x64']
else:
win_kit_libdirs = [
'C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v8.0A\\Lib\\win32']
elif pcl_version == '-1.8':
if is_64bits == True:
# already set path
# win_kit_libdirs = ['C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v8.1A\\Lib\\x64']
# Windows OS 7?
# win_kit_incs = ['C:\\Program Files (x86)\\Windows Kits\\8.1\\Include\\shared', 'C:\\Program Files (x86)\\Windows Kits\\8.1\\Include\\um']
# win_kit_libdirs = ['C:\\Program Files (x86)\\Windows Kits\\8.1\\Lib\\winv6.3\\um\\x64']
# win_kit_libdirs = ['C:\\Program Files (x86)\\Windows Kits\\10\\Lib\\10.0.10240.0\\ucrt\\x64']
# Windows OS 8/8.1/10?
# win_kit_10_version = '10.0.10240.0'
# win_kit_incs = ['C:\\Program Files (x86)\\Windows Kits\\10\\Include\\10.0.10240.0\\ucrt', 'C:\\Program Files (x86)\\Windows Kits\\10\\Include\\10.0.10240.0\\um']
# win_kit_libdirs = ['C:\\Program Files (x86)\\Windows Kits\\10\\Include\\10.0.10240.0\\ucrt', 'C:\\Program Files (x86)\\Windows Kits\\10\\Include\\10.0.10240.0\\um']
pass
else:
# already set path
# Windows OS 7
# win_kit_libdirs = ['C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v8.1A\\Lib\\win32']
# win_kit_libdirs = ['C:\\Program Files (x86)\\Windows Kits\\8.1\\Lib\\winv6.3\\um\\x86']
# win_kit_incs = ['C:\\Program Files (x86)\\Windows Kits\\8.1\\Include\\shared', 'C:\\Program Files (x86)\\Windows Kits\\8.1\\Include\\um']
pass
elif pcl_version == '-1.9':
if is_64bits == True:
# win_kit_10_version = '10.0.15063.0'
# win_kit_incs = ['C:\\Program Files (x86)\\Windows Kits\\10\\Include\\' + win_kit_10_version+ '\\ucrt', 'C:\\Program Files (x86)\\Windows Kits\\10\\Include\\' + win_kit_10_version + '\\um']
# win_kit_libdirs = ['C:\\Program Files (x86)\\Windows Kits\\10\\Include\\' + win_kit_10_version + '\\ucrt\\x64', 'C:\\Program Files (x86)\\Windows Kits\\10\\Include\\' + win_kit_10_version + '\\um\\x64']
pass
else:
pass
else:
pass
for inc_dir in win_kit_incs:
ext_args['include_dirs'].append(inc_dir)
for lib_dir in win_kit_libdirs:
ext_args['library_dirs'].append(lib_dir)
win_opengl_libreleases = ['OpenGL32']
for opengl_librelease in win_opengl_libreleases:
ext_args['libraries'].append(opengl_librelease)
# use OpenNI
# use OpenNI2
# add environment PATH : pcl/bin, OpenNI2/Tools
# use CUDA?
# CUDA_PATH
# CUDA_PATH_V7_5
# CUDA_PATH_V8_0
for k, v in os.environ.items():
# print("{key} : {value}".format(key=k, value=v))
if k == "CUDA_PATH":
cuda_root = v
break
else:
print('No use cuda.')
pass
# ext_args['define_macros'].append(('EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET', '1'))
# define_macros=[('BOOST_NO_EXCEPTIONS', 'None')],
# debugs = [('EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET', '1'), ('BOOST_NO_EXCEPTIONS', 'None')]
# _CRT_SECURE_NO_WARNINGS : windows cutr warning no view
defines = [('EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET',
'1'), ('_CRT_SECURE_NO_WARNINGS', '1')]
for define in defines:
ext_args['define_macros'].append(define)
# ext_args['extra_compile_args'].append('/DWIN32')
# ext_args['extra_compile_args'].append('/D_WINDOWS')
# ext_args['extra_compile_args'].append('/W3')
# ext_args['extra_compile_args'].append('/GR')
ext_args['extra_compile_args'].append('/EHsc')
# FW: Link time errors in RangeImage (with /clr)
# http://www.pcl-users.org/FW-Link-time-errors-in-RangeImage-with-clr-td3581422.html
# ext_args['extra_compile_args'].append('/clr:nostdlib')
# OpenNI2?(+Python3)
# https://ci.appveyor.com/project/KazuakiM/vim-ms-translator/branch/master
# ext_args['extra_compile_args'].append('/DDYNAMIC_MSVCRT_DLL=\"msvcr100.dll\"')
# ext_args['extra_compile_args'].append('/DDYNAMIC_MSVCRT_DLL=\"msvcr100.dll\"')
# NG
# ext_args['extra_compile_args'].append('/NODEFAULTLIB:msvcrtd')
# https://blogs.msdn.microsoft.com/vcblog/2015/03/03/introducing-the-universal-crt/
# runtime libraries args(default MT?)
# use all-in-one package on vtk libraries.(use Dynamic?)
ext_args['extra_compile_args'].append('/MD')
# ext_args['extra_compile_args'].append('/MDd')
# custom build module(static build)
# ext_args['extra_compile_args'].append('/MTd')
# ext_args['extra_compile_args'].append('/MT')
# use OpenMP
# https://stackoverflow.com/questions/7844830/cython-openmp-compiler-flag
# ext_args['extra_compile_args'].append('/openmp')
# ext_args['extra_link_args'].append('/openmp')
# Debug View
# print(ext_args)
if pcl_version == '-1.6':
module = [Extension("pcl._pcl", ["pcl/_pcl.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
# Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
# Extension("pcl.pcl_visualization", ["pcl/pcl_visualization_160.pyx"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
elif pcl_version == '-1.7':
module = [Extension("pcl._pcl", ["pcl/_pcl_172.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
elif pcl_version == '-1.8':
module = [Extension("pcl._pcl", ["pcl/_pcl_180.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
# conda
# Extension("pcl.pcl_visualization", [
# "pcl/pcl_visualization.pyx", "pcl/vtkInteracterWrapper.cpp"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
elif pcl_version == '-1.9':
module = [Extension("pcl._pcl", ["pcl/_pcl_190.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
# conda
# Extension("pcl.pcl_visualization", [
# "pcl/pcl_visualization.pyx", "pcl/vtkInteracterWrapper.cpp"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
else:
print('no pcl install or pkg-config missed.')
sys.exit(1)
# copy the pcl dll to local subfolder so that it can be added to the package through the data_files option
listDlls = []
if not os.path.isdir('./dlls'):
os.mkdir('./dlls')
for dll in libreleases:
pathDll = find_library(dll)
if not pathDll is None:
shutil.copy2(pathDll, './dlls')
listDlls.append(os.path.join('.\\dlls', dll+'.dll'))
# the path is relative to the python root folder
data_files = [('Lib/site-packages/pcl', listDlls)]
else:
# Not 'Windows'
if sys.platform == 'darwin':
os.environ['ARCHFLAGS'] = ''
# Try to find PCL. XXX we should only do this when trying to build or install.
PCL_SUPPORTED = ["-1.9", "-1.8", "-1.7", "-1.6", ""] # in order of preference
for pcl_version in PCL_SUPPORTED:
if subprocess.call(['pkg-config', 'pcl_common%s' % pcl_version]) == 0:
break
else:
print("%s: error: cannot find PCL, tried" %
sys.argv[0], file=sys.stderr)
for version in PCL_SUPPORTED:
print(' pkg-config pcl_common%s' % version, file=sys.stderr)
sys.exit(1)
# Find build/link options for PCL using pkg-config.
# version 1.6
# pcl_libs = ["common", "features", "filters", "io", "kdtree", "octree",
# "registration", "sample_consensus", "search", "segmentation",
# "surface", "tracking", "visualization"]
# version 1.7
if pcl_version == '-1.7':
pcl_libs = ["common", "features", "filters", "geometry",
"io", "kdtree", "keypoints", "octree", "outofcore", "people",
"recognition", "registration", "sample_consensus", "search",
"segmentation", "surface", "tracking", "visualization"]
else:
# version 1.8
pcl_libs = ["2d", "common", "features", "filters", "geometry",
"io", "kdtree", "keypoints", "ml", "octree", "outofcore", "people",
"recognition", "registration", "sample_consensus", "search",
"segmentation", "stereo", "surface", "tracking", "visualization"]
pcl_libs = ["pcl_%s%s" % (lib, pcl_version) for lib in pcl_libs]
ext_args = defaultdict(list)
ext_args['include_dirs'].append(numpy.get_include())
for flag in pkgconfig('--cflags-only-I'):
ext_args['include_dirs'].append(flag[2:])
# OpenNI?
# "-I/usr/include/openni"
# "-I/usr/include/openni"
# /usr/include/ni
ext_args['include_dirs'].append('/usr/include/ni')
# ext_args['library_dirs'].append()
# ext_args['libraries'].append()
# OpenNI2
ext_args['include_dirs'].append('/usr/include/openni2')
# VTK use
if sys.platform == 'darwin':
# pcl 1.8.1(MacOSX)
# if pcl_version == '-1.8':
# vtk_version = '8.0'
# ext_args['include_dirs'].append('/usr/local/include/vtk-' + vtk_version)
# ext_args['library_dirs'].append('/usr/local/lib')
# ext_args['include_dirs'].append('/usr/local/Cellar/vtk/8.0.1/include')
# ext_args['library_dirs'].append('/usr/local/Cellar/vtk/8.0.1/lib')
if pcl_version == '-1.9':
# pcl 1.9.1
# build install?
# vtk_version = '8.1'
# vtk_include_dir = os.path.join('/usr/local' ,'include/vtk-8.1')
# vtk_library_dir = os.path.join('/usr/local', 'lib')
# homebrew(MacOSX homebrew)
# (pcl 1.9.1_3)
# vtk_version = '8.1.2_3'
# vtk_include_dir = os.path.join('/usr/local/Cellar/vtk', vtk_version ,'include/vtk-8.2')
# 2019/05/08 check(pcl 1.9.1_4)
vtk_version = '8.2.0'
vtk_include_dir = os.path.join('/usr/local/Cellar/vtk', vtk_version ,'include/vtk-8.2')
vtk_library_dir = os.path.join('/usr/local/Cellar/vtk', vtk_version, 'lib')
pass
else:
# pcl 1.7.0?(Ubuntu 14.04)
# vtk_version = '5.8'
# ext_args['include_dirs'].append('/usr/include/vtk-' + vtk_version)
# ext_args['library_dirs'].append('/usr/lib')
# pcl 1.7.2(Ubuntu 16.04)(xenial)
if pcl_version == '-1.7':
vtk_version = '6.2'
vtk_include_dir = os.path.join('/usr/include/vtk-' + vtk_version)
vtk_library_dir = os.path.join('/usr/lib')
elif pcl_version == '-1.8':
# pcl 1.8.0/1?(Ubuntu 18.04)(melodic)
vtk_version = '6.3'
# pcl 1.8.1?
# vtk_version = '8.0'
vtk_include_dir = os.path.join('/usr/include/vtk-' + vtk_version)
vtk_library_dir = os.path.join('/usr/lib')
elif pcl_version == '-1.9':
# pcl 1.9.1
# build install?
vtk_version = '8.1'
vtk_include_dir = os.path.join('/usr/include/vtk-' + vtk_version)
vtk_library_dir = os.path.join('/usr/lib')
else:
pass
# other
# pcl 1.9.1(Conda)
# vtk_version = '8.1'
# vtk_include_dir = os.path.join(os.environ["PREFIX"] ,'include/vtk-8.1')
# vtk_library_dir = os.path.join(os.environ["PREFIX"], 'lib')
ext_args['include_dirs'].append(vtk_include_dir)
ext_args['library_dirs'].append(vtk_library_dir)
if vtk_version == '5.8':
vtklibreleases = ['vtkInfovis', 'MapReduceMPI', 'vtkNetCDF', 'QVTK', 'vtkNetCDF_cxx', 'vtkRendering', 'vtkViews', 'vtkVolumeRendering', 'vtkWidgets', 'mpistubs', 'vtkalglib', 'vtkCharts', 'vtkexoIIc', 'vtkexpat', 'vtkCommon', 'vtkfreetype', 'vtkDICOMParser', 'vtkftgl', 'vtkFiltering', 'vtkhdf5', 'vtkjpeg', 'vtkGenericFiltering', 'vtklibxml2', 'vtkGeovis', 'vtkmetaio', 'vtkpng', 'vtkGraphics', 'vtkproj4', 'vtkHybrid', 'vtksqlite', 'vtksys', 'vtkIO', 'vtktiff', 'vtkImaging', 'vtkverdict', 'vtkzlib']
elif vtk_version == '6.3':
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLIC-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksys-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version]
elif vtk_version == '7.0':
# apt package?(vtk use OpenGL?)
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkexpat-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkfreetype-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkhdf5-' + vtk_version, 'vtkhdf5_hl-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkjpeg-' + vtk_version, 'vtkjsoncpp-' + vtk_version, 'vtklibxml2-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkNetCDF-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkpng-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLIC-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtktiff-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkzlib-' + vtk_version]
elif vtk_version == '8.0':
# vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkhdf5-' + vtk_version, 'vtkhdf5_hl-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtkjsoncpp-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkNetCDF-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PSOpenGL2-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingOpenGL-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtktiff-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkzlib-' + vtk_version]
# apt package?(vtk use OpenGL?)
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkDomainsChemistryOpenGL2-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersPoints-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersPython-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersTopology-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkglew-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOExportOpenGL2-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOTecplotTable-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtklibharu-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkPythonInterpreter-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL2-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PS-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingMatplotlib-' + vtk_version, 'vtkRenderingOpenGL2-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL2-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkWrappingTools-' + vtk_version]
elif vtk_version == '8.1':
# pcl_version 1.9.1
# conda or build module, MacOS X
vtklibreleases = ['vtkalglib-' + vtk_version, 'vtkChartsCore-' + vtk_version, 'vtkCommonColor-' + vtk_version, 'vtkCommonComputationalGeometry-' + vtk_version, 'vtkCommonCore-' + vtk_version, 'vtkCommonDataModel-' + vtk_version, 'vtkCommonExecutionModel-' + vtk_version, 'vtkCommonMath-' + vtk_version, 'vtkCommonMisc-' + vtk_version, 'vtkCommonSystem-' + vtk_version, 'vtkCommonTransforms-' + vtk_version, 'vtkDICOMParser-' + vtk_version, 'vtkDomainsChemistry-' + vtk_version, 'vtkDomainsChemistryOpenGL2-' + vtk_version, 'vtkexoIIc-' + vtk_version, 'vtkFiltersAMR-' + vtk_version, 'vtkFiltersCore-' + vtk_version, 'vtkFiltersExtraction-' + vtk_version, 'vtkFiltersFlowPaths-' + vtk_version, 'vtkFiltersGeneral-' + vtk_version, 'vtkFiltersGeneric-' + vtk_version, 'vtkFiltersGeometry-' + vtk_version, 'vtkFiltersHybrid-' + vtk_version, 'vtkFiltersHyperTree-' + vtk_version, 'vtkFiltersImaging-' + vtk_version, 'vtkFiltersModeling-' + vtk_version, 'vtkFiltersParallel-' + vtk_version, 'vtkFiltersParallelImaging-' + vtk_version, 'vtkFiltersPoints-' + vtk_version, 'vtkFiltersProgrammable-' + vtk_version, 'vtkFiltersPython-' + vtk_version, 'vtkFiltersSelection-' + vtk_version, 'vtkFiltersSMP-' + vtk_version, 'vtkFiltersSources-' + vtk_version, 'vtkFiltersStatistics-' + vtk_version, 'vtkFiltersTexture-' + vtk_version, 'vtkFiltersTopology-' + vtk_version, 'vtkFiltersVerdict-' + vtk_version, 'vtkGeovisCore-' + vtk_version, 'vtkgl2ps-' + vtk_version, 'vtkglew-' + vtk_version, 'vtkImagingColor-' + vtk_version, 'vtkImagingCore-' + vtk_version, 'vtkImagingFourier-' + vtk_version, 'vtkImagingGeneral-' + vtk_version, 'vtkImagingHybrid-' + vtk_version, 'vtkImagingMath-' + vtk_version, 'vtkImagingMorphological-' + vtk_version, 'vtkImagingSources-' + vtk_version, 'vtkImagingStatistics-' + vtk_version, 'vtkImagingStencil-' + vtk_version, 'vtkInfovisCore-' + vtk_version, 'vtkInfovisLayout-' + vtk_version, 'vtkInteractionImage-' + vtk_version, 'vtkInteractionStyle-' + vtk_version, 'vtkInteractionWidgets-' + vtk_version, 'vtkIOAMR-' + vtk_version, 'vtkIOCore-' + vtk_version, 'vtkIOEnSight-' + vtk_version, 'vtkIOExodus-' + vtk_version, 'vtkIOExport-' + vtk_version, 'vtkIOExportOpenGL2-' + vtk_version, 'vtkIOGeometry-' + vtk_version, 'vtkIOImage-' + vtk_version, 'vtkIOImport-' + vtk_version, 'vtkIOInfovis-' + vtk_version, 'vtkIOLegacy-' + vtk_version, 'vtkIOLSDyna-' + vtk_version, 'vtkIOMINC-' + vtk_version, 'vtkIOMovie-' + vtk_version, 'vtkIONetCDF-' + vtk_version, 'vtkIOParallel-' + vtk_version, 'vtkIOParallelXML-' + vtk_version, 'vtkIOPLY-' + vtk_version, 'vtkIOSQL-' + vtk_version, 'vtkIOTecplotTable-' + vtk_version, 'vtkIOVideo-' + vtk_version, 'vtkIOXML-' + vtk_version, 'vtkIOXMLParser-' + vtk_version, 'vtklibharu-' + vtk_version, 'vtkmetaio-' + vtk_version, 'vtknetcdfcpp-' + vtk_version, 'vtkoggtheora-' + vtk_version, 'vtkParallelCore-' + vtk_version, 'vtkproj4-' + vtk_version, 'vtkPythonInterpreter-' + vtk_version, 'vtkRenderingAnnotation-' + vtk_version, 'vtkRenderingContext2D-' + vtk_version, 'vtkRenderingContextOpenGL2-' + vtk_version, 'vtkRenderingCore-' + vtk_version, 'vtkRenderingFreeType-' + vtk_version, 'vtkRenderingGL2PSOpenGL2-' + vtk_version, 'vtkRenderingImage-' + vtk_version, 'vtkRenderingLabel-' + vtk_version, 'vtkRenderingLOD-' + vtk_version, 'vtkRenderingMatplotlib-' + vtk_version, 'vtkRenderingOpenGL2-' + vtk_version, 'vtkRenderingVolume-' + vtk_version, 'vtkRenderingVolumeOpenGL2-' + vtk_version, 'vtksqlite-' + vtk_version, 'vtksys-' + vtk_version, 'vtkverdict-' + vtk_version, 'vtkViewsContext2D-' + vtk_version, 'vtkViewsCore-' + vtk_version, 'vtkViewsInfovis-' + vtk_version, 'vtkWrappingTools-' + vtk_version]
else:
vtklibreleases = []
for librelease in vtklibreleases:
ext_args['libraries'].append(librelease)
for flag in pkgconfig('--cflags-only-other'):
if flag.startswith('-D'):
macro, value = flag[2:].split('=', 1)
ext_args['define_macros'].append((macro, value))
else:
ext_args['extra_compile_args'].append(flag)
# clang?
# https://github.com/strawlab/python-pcl/issues/129
# gcc base libc++, clang base libstdc++
# ext_args['extra_compile_args'].append("-stdlib=libstdc++")
# ext_args['extra_compile_args'].append("-stdlib=libc++")
if sys.platform == 'darwin':
# not use gcc?
# ext_args['extra_compile_args'].append("-stdlib=libstdc++")
# clang(min : 10.7?/10.9?)
# minimum deployment target of OS X 10.9
ext_args['extra_compile_args'].append("-stdlib=libc++")
ext_args['extra_compile_args'].append("-mmacosx-version-min=10.9")
ext_args['extra_link_args'].append("-stdlib=libc++")
ext_args['extra_link_args'].append("-mmacosx-version-min=10.9")
# vtk error : not set override function error.
ext_args['extra_compile_args'].append("-std=c++11")
# mac os using openmp
# https://iscinumpy.gitlab.io/post/omp-on-high-sierra/
# before setting.
# $ brew install libomp
# ext_args['extra_compile_args'].append('-fopenmp -Xpreprocessor')
# ext_args['extra_link_args'].append('-fopenmp -Xpreprocessor -lomp')
pass
else:
ext_args['extra_compile_args'].append("-std=c++11")
if platform.machine() == 'x86_64':
ext_args['library_dirs'].append("/usr/lib/x86_64-linux-gnu/")
elif platform.machine() == 'aarch64':
ext_args['library_dirs'].append("/usr/lib/aarch64-linux-gnu/")
# gcc? use standard library
# ext_args['extra_compile_args'].append("-stdlib=libstdc++")
# ext_args['extra_link_args'].append("-stdlib=libstdc++")
# clang use standard library
# ext_args['extra_compile_args'].append("-stdlib=libc++")
# ext_args['extra_link_args'].append("-stdlib=libc++")
# using openmp
# ext_args['extra_compile_args'].append('-fopenmp')
# ext_args['extra_link_args'].append('-fopenmp')
pass
for flag in pkgconfig('--libs-only-l'):
if flag == "-lflann_cpp-gd":
print(
"skipping -lflann_cpp-gd (see https://github.com/strawlab/python-pcl/issues/29")
continue
ext_args['libraries'].append(flag[2:])
for flag in pkgconfig('--libs-only-L'):
ext_args['library_dirs'].append(flag[2:])
for flag in pkgconfig('--libs-only-other'):
ext_args['extra_link_args'].append(flag)
# grabber?
# -lboost_system
# ext_args['extra_link_args'].append('-lboost_system')
# MacOSX?
# ext_args['extra_link_args'].append('-lboost_system_mt')
# ext_args['extra_link_args'].append('-lboost_bind')
# Fix compile error on Ubuntu 12.04 (e.g., Travis-CI).
ext_args['define_macros'].append(
("EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET", "1"))
if pcl_version == '-1.6':
module = [Extension("pcl._pcl", ["pcl/_pcl.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
# Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
Extension("pcl.pcl_visualization", ["pcl/pcl_visualization_160.pyx"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
elif pcl_version == '-1.7':
module = [Extension("pcl._pcl", ["pcl/_pcl_172.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
elif pcl_version == '-1.8':
module = [Extension("pcl._pcl", ["pcl/_pcl_180.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
elif pcl_version == '-1.9':
module = [Extension("pcl._pcl", ["pcl/_pcl_190.pyx", "pcl/minipcl.cpp", "pcl/ProjectInliers.cpp"], language="c++", **ext_args),
Extension("pcl.pcl_visualization", ["pcl/pcl_visualization.pyx"], language="c++", **ext_args),
# Extension("pcl.pcl_grabber", ["pcl/pcl_grabber.pyx", "pcl/grabber_callback.cpp"], language="c++", **ext_args),
# debug
# gdb_debug=True,
]
else:
print('no pcl install or pkg-config missed.')
sys.exit(1)
listDlls = []
data_files = None
setup(name='python-pcl',
description='Python bindings for the Point Cloud Library (PCL). using Cython.',
url='http://github.com/strawlab/python-pcl',
version='0.3.0rc1',
author='John Stowers',
author_email='john.stowers@gmail.com',
maintainer='Tooru Oonuma',
maintainer_email='t753github@gmail.com',
license='BSD',
packages=[
"pcl",
# "pcl.pcl_visualization",
],
zip_safe=False,
setup_requires=setup_requires,
install_requires=install_requires,
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
tests_require=['mock', 'nose'],
ext_modules=module,
cmdclass={'build_ext': build_ext},
data_files=data_files
)
| 97.190265
| 3,707
| 0.669804
|
8e835131f0cd1fd040b14b3ee3cefdc0fa289b33
| 1,117
|
py
|
Python
|
setup.py
|
mwilliamson/python-cobble
|
2212d04bf51e8fb1b1c05998614c06023305cbd0
|
[
"BSD-2-Clause"
] | 6
|
2015-08-08T16:34:59.000Z
|
2020-01-07T22:10:04.000Z
|
setup.py
|
dougmassay/python-cobble
|
2212d04bf51e8fb1b1c05998614c06023305cbd0
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
dougmassay/python-cobble
|
2212d04bf51e8fb1b1c05998614c06023305cbd0
|
[
"BSD-2-Clause"
] | 2
|
2015-11-08T21:48:34.000Z
|
2017-07-20T20:37:10.000Z
|
#!/usr/bin/env python
import os
from setuptools import setup
import sys
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='cobble',
version='0.1.3',
description='Create data objects',
long_description=read("README.rst"),
author='Michael Williamson',
author_email='mike@zwobble.org',
url='http://github.com/mwilliamson/python-cobble',
keywords="data object case class",
packages=['cobble'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: OS Independent',
],
)
| 31.027778
| 70
| 0.612355
|
1c7e1888a0171272023e97a155919bcb2d2fd51d
| 154
|
py
|
Python
|
makewiki/forms.py
|
franklin-phan/makewikiv2
|
5bb2dce694e2a2fffcab5a30355be5fa57718c55
|
[
"MIT"
] | null | null | null |
makewiki/forms.py
|
franklin-phan/makewikiv2
|
5bb2dce694e2a2fffcab5a30355be5fa57718c55
|
[
"MIT"
] | 5
|
2020-06-06T01:32:33.000Z
|
2022-02-10T09:25:44.000Z
|
makewiki/forms.py
|
franklin-phan/makewikiv2
|
5bb2dce694e2a2fffcab5a30355be5fa57718c55
|
[
"MIT"
] | null | null | null |
from django import forms
class FriendlyForm(forms.Form):
first_name = forms.CharField(max_length=100)
last_name = forms.CharField(max_length=100)
| 30.8
| 48
| 0.779221
|
98410bf4e92206f97fb5860918b86d698d8e401e
| 1,241
|
py
|
Python
|
0401-0500/0407-Trapping Rain Water II/0407-Trapping Rain Water II.py
|
jiadaizhao/LeetCode
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
[
"MIT"
] | 49
|
2018-05-05T02:53:10.000Z
|
2022-03-30T12:08:09.000Z
|
0401-0500/0407-Trapping Rain Water II/0407-Trapping Rain Water II.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 11
|
2017-12-15T22:31:44.000Z
|
2020-10-02T12:42:49.000Z
|
0401-0500/0407-Trapping Rain Water II/0407-Trapping Rain Water II.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 28
|
2017-12-05T10:56:51.000Z
|
2022-01-26T18:18:27.000Z
|
import heapq
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
m = len(heightMap)
if m == 0:
return 0
n = len(heightMap[0])
if n == 0:
return 0
visited = [[False]*n for _ in range(m)]
pq = []
for i in range(m):
heapq.heappush(pq, (heightMap[i][0], i, 0))
visited[i][0] = True
if n > 1:
heapq.heappush(pq, (heightMap[i][n - 1], i, n - 1))
visited[i][n - 1] = True
for j in range(1, n - 1):
heapq.heappush(pq, (heightMap[0][j], 0, j))
visited[0][j] = True
if m > 1:
heapq.heappush(pq, (heightMap[m - 1][j], m - 1, j))
visited[m - 1][j] = True
vol = 0
while pq:
h, row, col = heapq.heappop(pq)
for nr, nc in (row-1, col), (row+1, col), (row, col-1), (row, col+1):
if 0 <= nr < m and 0 <= nc < n and (not visited[nr][nc]):
heapq.heappush(pq, (max(h, heightMap[nr][nc]), nr, nc))
visited[nr][nc] = True
vol += max(h - heightMap[nr][nc], 0)
return vol
| 36.5
| 81
| 0.424658
|
a23e9d1041fb419479de9006470ac81832acafc3
| 2,032
|
py
|
Python
|
FastAutoAugment/nas/evaluate.py
|
sytelus/fast-autoaugment
|
a53708699dce1233ce2a0bf0416ae2278007d506
|
[
"MIT"
] | null | null | null |
FastAutoAugment/nas/evaluate.py
|
sytelus/fast-autoaugment
|
a53708699dce1233ce2a0bf0416ae2278007d506
|
[
"MIT"
] | null | null | null |
FastAutoAugment/nas/evaluate.py
|
sytelus/fast-autoaugment
|
a53708699dce1233ce2a0bf0416ae2278007d506
|
[
"MIT"
] | null | null | null |
from typing import Optional
import torch
from ..common.trainer import Trainer
from ..common.config import Config
from ..common.common import get_logger
from ..common import data
from .model_desc import ModelDesc
from .micro_builder import MicroBuilder
from . import nas_utils
def eval_arch(conf_eval:Config, micro_builder:Optional[MicroBuilder]):
logger = get_logger()
# region conf vars
conf_loader = conf_eval['loader']
save_filename = conf_eval['save_filename']
conf_model_desc = conf_eval['model_desc']
conf_checkpoint = conf_eval['checkpoint']
resume = conf_eval['resume']
conf_train = conf_eval['trainer']
final_desc_filename = conf_eval['final_desc_filename']
full_desc_filename = conf_eval['full_desc_filename']
# endregion
# load model desc file to get template model
template_model_desc = ModelDesc.load(final_desc_filename)
device = torch.device(conf_eval['device'])
if micro_builder:
micro_builder.register_ops()
model, checkpoint = nas_utils.model_and_checkpoint(
conf_checkpoint, resume, full_desc_filename,
conf_model_desc, device,
aux_tower=True,
affine=True, droppath=True,
template_model_desc=template_model_desc)
# get data
train_dl, _, test_dl = data.get_data(conf_loader)
assert train_dl is not None and test_dl is not None
trainer = Trainer(conf_train, model, device, checkpoint, aux_tower=True)
trainer.fit(train_dl, test_dl)
# save metrics
train_metrics, test_metrics = trainer.get_metrics()
train_metrics.save('eval_train_metrics')
if test_metrics:
test_metrics.save('eval_test_metrics')
# save model
save_path = model.save(save_filename)
if save_path:
logger.info(f"Model saved in {save_path}")
else:
logger.info("Model is not saved because file path config not set")
| 29.882353
| 76
| 0.677165
|
a7c3e2c19f22aa65d1560768808ed28bbcc874cb
| 1,196
|
py
|
Python
|
katrain/gui/sound.py
|
NhanHo/katrain
|
070420941c117e2ca2fdff8e0c7521566149e760
|
[
"MIT"
] | 1
|
2022-01-16T04:22:59.000Z
|
2022-01-16T04:22:59.000Z
|
katrain/gui/sound.py
|
PeonyWhite/katrain
|
070420941c117e2ca2fdff8e0c7521566149e760
|
[
"MIT"
] | null | null | null |
katrain/gui/sound.py
|
PeonyWhite/katrain
|
070420941c117e2ca2fdff8e0c7521566149e760
|
[
"MIT"
] | 1
|
2022-03-08T12:23:04.000Z
|
2022-03-08T12:23:04.000Z
|
from kivy.clock import Clock
from kivymd.app import MDApp
from kivy.core.audio import SoundLoader
from kivy.utils import platform
cached_sounds = {}
# prefer ffpyplayer on linux, then others, avoid gst and avoid or ffpyplayer on windows
ranking = [("ffpy", 98 if platform in ["win", "macosx"] else -2), ("sdl", -1), ("gst", 99), ("", 0)]
try:
SoundLoader._classes.sort(key=lambda cls: [v for k, v in ranking if k in cls.__name__.lower()][0])
except Exception as e:
print("Exception sorting sound loaders: ", e) # private vars, so could break with versions etc
def play_sound(file, volume=1, cache=True):
def _play(sound):
if sound:
sound.play()
sound.seek(0)
app = MDApp.get_running_app()
if app and app.gui and app.gui.config("timer/sound"):
sound = cached_sounds.get(file)
if sound is None:
sound = SoundLoader.load(file)
if cache:
cached_sounds[file] = sound
if sound is not None:
sound.volume = volume
Clock.schedule_once(lambda _dt: _play(sound), 0)
def stop_sound(file):
sound = cached_sounds.get(file)
if sound:
sound.stop()
| 31.473684
| 102
| 0.635452
|
9a9bba7149f3be6c0985c894daf8b09d35092064
| 6,448
|
py
|
Python
|
qa327/backend.py
|
EduardVar/BrainBench
|
abc26c5a6e7492e5c0ef03457c91175b0bb8ba41
|
[
"MIT"
] | null | null | null |
qa327/backend.py
|
EduardVar/BrainBench
|
abc26c5a6e7492e5c0ef03457c91175b0bb8ba41
|
[
"MIT"
] | null | null | null |
qa327/backend.py
|
EduardVar/BrainBench
|
abc26c5a6e7492e5c0ef03457c91175b0bb8ba41
|
[
"MIT"
] | 2
|
2021-01-04T04:44:28.000Z
|
2021-01-16T19:41:29.000Z
|
from qa327.models import db, User, Ticket, Order
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import date, datetime
from typing import List, Union, Optional
import re
"""
This file defines all backend logic that interacts with database and other services
"""
def validate_email(email : str):
# RFC 5322 specification: https://emailregex.com/
regex = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
return re.search(regex, email)
def validate_name(name):
if 2 > len(name) > 20: return "Username must be between 2 and 20 characters."
if not name.isalnum(): return "Name must be alphanumeric only."
return None
def validate_password(password : str):
pkg = {'state': True, 'msg': ''}
regex = r"(^(?=.*[a-z])(?=.*[A-Z])(?=.*[@$!%*?&])[A-Za-z\d@$!%*?&]{6,}$)"
if len(password) < 6:
pkg['state'] = False
pkg['msg'] = "Password length must be greator then 6."
elif re.search(regex, password) == None:
pkg['state'] = False
pkg['msg'] = "You password must meet the required complexity: minimum length 6, at least one upper case, at least one lower case, and at least one special character (@$!%*?&)"
return pkg
def get_user(email : str) -> Optional[User]:
"""
Get a user by a given email
:param email: the email of the user
:return: a user that has the matched email address
"""
user = User.query.filter_by(email=email).first()
return user
def add_user_funds(email : str, amount) -> None:
user = get_user(email)
user.balance += amount
db.session.commit()
def login_user(email, password):
"""
Check user authentication by comparing the password
:param email: the email of the user
:param password: the password input
:return: the user if login succeeds
:return: message "email/password combination incorrect" if login fails
"""
# if this returns a user, then the name already exists in database
email = email.strip()
password = password.strip()
user = get_user(email)
if not user or not check_password_hash(user.password, password):
return None
return user
def register_user(email : str, name : str, password : str, password2 : str):
"""
Register the user to the database
:param email: the email of the user
:param name: the name of the user
:param password: the password of user
:param password2: another password input to make sure the input is correct
:return: an error message if there is any, or None if register succeeds
"""
email = email.strip()
name = name.strip().lower()
password = password.strip()
password2 = password2.strip()
user = User.query.filter_by(email=email).first()
if user:
return "This email is already in use."
name_validation_error = validate_name(name)
if not name_validation_error == None:
return name_validation_error
password_validation_error = validate_password(password)
if not password_validation_error['state']:
return password_validation_error['msg']
if password != password2:
return "The passwords do not match."
if not validate_email(email):
return 'Invalid Email.'
hashed_pw = generate_password_hash(password, method='sha256')
# store the encrypted password rather than the plain password
new_user = User(email=email, name=name, password=hashed_pw, balance=5000)
db.session.add(new_user)
db.session.commit()
return None
def get_available_tickets(user : User = None) -> List[Ticket]:
if (user):
return Ticket.query.filter_by(creator=user.id)
return Ticket.query.all()
def check_if_expired(ticket : Ticket) -> bool:
return ticket.date <= date.today().strftime("%Y%m%d")
def validate_ticket_inputs(name, price, day : str, amount, user):
if not bool(re.search(r'^[A-Za-z0-9 ]*$', name)):
return "Name must be alphanumeric"
if not (len(name) in range(6, 61)):
return "Name length must be between 6 and 60 characters"
if datetime.strptime(day, '%Y%m%d').date() < date.today():
return "This ticket has expired"
if not (amount in range(1, 101)):
return "Please select 1 to 100 tickets"
return None
def buy_ticket(name : str, price : float, day : str, amount : int, user : User) -> Union[str, None]:
errors = validate_ticket_inputs(name, price, day, amount, user)
if (errors != None): return errors
price *= amount
price += (price * 0.35) + (price * 0.05)
print(f'User Balance: {user.balance}')
print(f'Ticket Price: {price}')
if user.balance < price:
return "You do not have enough funds to purchase this"
ticket = Ticket.query.filter_by(name=name).filter_by(date=day).first()
if (ticket == None):
return "The requested ticket was not found"
if (ticket.quantity < amount):
return "There are not enough tickets available"
ticket.quantity -= amount
user.balance -= price
order = Order(user_id=user.id, ticket_id=ticket.id, quantity=amount)
db.session.add(order)
db.session.commit()
def sell_ticket(name : str, price : float, day : str, amount : int, user : User) -> Union[str, None]:
errors = validate_ticket_inputs(name, price, day, amount, user)
if (errors != None): return errors
if not (price in range(10, 101)):
return "Please enter an amount between 10 and 100"
if (Ticket.query.filter_by(name=name).filter_by(date=day).first() != None):
return "There is a ticket already specified"
ticket = Ticket(name=name, price=price, date=day, creator=user.id, quantity=amount)
db.session.add(ticket)
db.session.commit()
def update_ticket(name : str, price : str, day : str, amount : str, user : User, ticket_id : int) -> Union[str, None]:
errors = validate_ticket_inputs(name, price, day, amount, user)
if (errors != None): return errors
if not (price in range(10, 101)):
return "Please enter an amount between 10 and 100"
ticket = Ticket.query.filter_by(id=ticket_id).first()
if (ticket == None):
return "The requested ticket was not found"
ticket.name = name
ticket.price = price
ticket.quantity = amount
ticket.date = day.replace("/", "")
db.session.commit()
def clean_database():
db.session.query(Ticket).delete()
db.session.commit()
| 30.704762
| 183
| 0.66067
|
1d06240994456fc8dfb552b14ce6f1186bbf36e0
| 461
|
py
|
Python
|
cannabis_api/api/migrations/0002_auto_20190409_2107.py
|
abyrne55/cs411-a3
|
919f2d77ecd2e97c4b548d441eddda0248a640b1
|
[
"MIT"
] | null | null | null |
cannabis_api/api/migrations/0002_auto_20190409_2107.py
|
abyrne55/cs411-a3
|
919f2d77ecd2e97c4b548d441eddda0248a640b1
|
[
"MIT"
] | null | null | null |
cannabis_api/api/migrations/0002_auto_20190409_2107.py
|
abyrne55/cs411-a3
|
919f2d77ecd2e97c4b548d441eddda0248a640b1
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-04-09 21:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='strain',
name='strain_type',
field=models.CharField(blank=True, choices=[('Sativa', 'Sativa'), ('Hybrid', 'Hybrid'), ('Indica', 'Indica')], max_length=6),
),
]
| 24.263158
| 137
| 0.587852
|
560691c5ae6da26984d6a99927d2093b0e4227fe
| 15,861
|
py
|
Python
|
flow/utils/aimsun/run.py
|
syuntoku14/flow
|
3a1157cde31d0b7d6a3cc2f91eef0ec9ea53575e
|
[
"MIT"
] | null | null | null |
flow/utils/aimsun/run.py
|
syuntoku14/flow
|
3a1157cde31d0b7d6a3cc2f91eef0ec9ea53575e
|
[
"MIT"
] | null | null | null |
flow/utils/aimsun/run.py
|
syuntoku14/flow
|
3a1157cde31d0b7d6a3cc2f91eef0ec9ea53575e
|
[
"MIT"
] | null | null | null |
# flake8: noqa
import flow.config as config
import sys
import os
sys.path.append(os.path.join(config.AIMSUN_NEXT_PATH,
'programming/Aimsun Next API/AAPIPython/Micro'))
import flow.utils.aimsun.constants as ac
import AAPI as aimsun_api
from AAPI import *
from PyANGKernel import *
import socket
import struct
from thread import start_new_thread
import numpy as np
PORT = 9999
entered_vehicles = []
exited_vehicles = []
def send_message(conn, in_format, values):
"""Send a message to the client.
If the message is a string, it is sent in segments of length 256 (if the
string is longer than such) and concatenated on the client end.
Parameters
----------
conn : socket.socket
socket for server connection
in_format : str
format of the input structure
values : tuple of Any
commands to be encoded and issued to the client
"""
if in_format == 'str':
packer = struct.Struct(format='i')
values = values[0]
# when the message is too large, send value in segments and inform the
# client that additional information will be sent. The value will be
# concatenated on the other end
while len(values) > 256:
# send the next set of data
conn.send(values[:256])
values = values[256:]
# wait for a reply
data = None
while data is None:
data = conn.recv(2048)
# send a not-done signal
packed_data = packer.pack(*(1,))
conn.send(packed_data)
# send the remaining components of the message (which is of length less
# than or equal to 256)
conn.send(values)
# wait for a reply
data = None
while data is None:
data = conn.recv(2048)
# send a done signal
packed_data = packer.pack(*(0,))
conn.send(packed_data)
else:
packer = struct.Struct(format=in_format)
packed_data = packer.pack(*values)
conn.send(packed_data)
def retrieve_message(conn, out_format):
"""Retrieve a message from the client.
Parameters
----------
conn : socket.socket
socket for server connection
out_format : str or None
format of the output structure
Returns
-------
Any
received message
"""
unpacker = struct.Struct(format=out_format)
try:
data = conn.recv(unpacker.size)
unpacked_data = unpacker.unpack(data)
finally:
pass
return unpacked_data
def threaded_client(conn):
# send feedback that the connection is active
conn.send('Ready.')
done = False
while not done:
# receive the next message
data = conn.recv(2048)
if data is not None:
# if the message is empty, search for the next message
if data == '':
continue
# convert to integer
data = int(data)
# if the simulation step is over, terminate the loop and let
# the step be executed
if data == ac.SIMULATION_STEP:
send_message(conn, in_format='i', values=(0,))
done = True
# Note that alongside this, the process is closed in Flow,
# thereby terminating the socket connection as well.
elif data == ac.SIMULATION_TERMINATE:
send_message(conn, in_format='i', values=(0,))
done = True
elif data == ac.ADD_VEHICLE:
send_message(conn, in_format='i', values=(0,))
edge, lane, type_id, pos, speed, next_section = \
retrieve_message(conn, 'i i i f f i')
# 1 if tracked, 0 otherwise
tracking = 1
veh_id = aimsun_api.AKIPutVehTrafficFlow(
edge, lane+1, type_id, pos, speed, next_section,
tracking
)
send_message(conn, in_format='i', values=(veh_id,))
elif data == ac.REMOVE_VEHICLE:
send_message(conn, in_format='i', values=(0,))
veh_id, = retrieve_message(conn, 'i')
aimsun_api.AKIVehTrackedRemove(veh_id)
send_message(conn, in_format='i', values=(0,))
elif data == ac.VEH_SET_SPEED:
send_message(conn, in_format='i', values=(0,))
veh_id, speed = retrieve_message(conn, 'i f')
new_speed = speed * 3.6
# aimsun_api.AKIVehTrackedForceSpeed(veh_id, new_speed)
aimsun_api.AKIVehTrackedModifySpeed(veh_id, new_speed)
send_message(conn, in_format='i', values=(0,))
elif data == ac.VEH_SET_LANE:
conn.send('Set vehicle lane.')
veh_id, target_lane = retrieve_message(conn, 'i i')
aimsun_api.AKIVehTrackedModifyLane(veh_id, target_lane)
send_message(conn, in_format='i', values=(0,))
elif data == ac.VEH_SET_ROUTE:
send_message(conn, in_format='i', values=(0,))
# TODO
elif data == ac.VEH_SET_COLOR:
send_message(conn, in_format='i', values=(0,))
veh_id, r, g, b = retrieve_message(conn, 'i i i i')
# TODO
send_message(conn, in_format='i', values=(0,))
elif data == ac.VEH_GET_ENTERED_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global entered_vehicles
if len(entered_vehicles) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in entered_vehicles])
send_message(conn, in_format='str', values=(output,))
entered_vehicles = []
elif data == ac.VEH_GET_EXITED_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global exited_vehicles
if len(exited_vehicles) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in exited_vehicles])
send_message(conn, in_format='str', values=(output,))
exited_vehicles = []
elif data == ac.VEH_GET_TYPE_ID:
send_message(conn, in_format='i', values=(0,))
# get the type ID in flow
type_id = None
while type_id is None:
type_id = conn.recv(2048)
# convert the edge name to an edge name in Aimsun
model = GKSystem.getSystem().getActiveModel()
type_vehicle = model.getType("GKVehicle")
vehicle = model.getCatalog().findByName(
type_id, type_vehicle)
aimsun_type = vehicle.getId()
aimsun_type_pos = AKIVehGetVehTypeInternalPosition(aimsun_type)
send_message(conn, in_format='i', values=(aimsun_type_pos,))
elif data == ac.VEH_GET_STATIC:
send_message(conn, in_format='i', values=(0,))
veh_id, = retrieve_message(conn, 'i')
static_info = aimsun_api.AKIVehGetStaticInf(veh_id)
output = (static_info.report,
static_info.idVeh,
static_info.type,
static_info.length,
static_info.width,
static_info.maxDesiredSpeed,
static_info.maxAcceleration,
static_info.normalDeceleration,
static_info.maxDeceleration,
static_info.speedAcceptance,
static_info.minDistanceVeh,
static_info.giveWayTime,
static_info.guidanceAcceptance,
static_info.enrouted,
static_info.equipped,
static_info.tracked,
static_info.keepfastLane,
static_info.headwayMin,
static_info.sensitivityFactor,
static_info.reactionTime,
static_info.reactionTimeAtStop,
static_info.reactionTimeAtTrafficLight,
static_info.centroidOrigin,
static_info.centroidDest,
static_info.idsectionExit,
static_info.idLine)
send_message(conn,
in_format='i i i f f f f f f f f f f i i i ? '
'f f f f f i i i i',
values=output)
elif data == ac.VEH_GET_TRACKING:
send_message(conn, in_format='i', values=(0,))
veh_id, = retrieve_message(conn, 'i')
tracking_info = aimsun_api.AKIVehTrackedGetInf(veh_id)
output = (
# tracking_info.report,
# tracking_info.idVeh,
# tracking_info.type,
tracking_info.CurrentPos,
tracking_info.distance2End,
tracking_info.xCurrentPos,
tracking_info.yCurrentPos,
tracking_info.zCurrentPos,
tracking_info.xCurrentPosBack,
tracking_info.yCurrentPosBack,
tracking_info.zCurrentPosBack,
tracking_info.CurrentSpeed,
# tracking_info.PreviousSpeed,
tracking_info.TotalDistance,
# tracking_info.SystemGenerationT,
# tracking_info.SystemEntranceT,
tracking_info.SectionEntranceT,
tracking_info.CurrentStopTime,
tracking_info.stopped,
tracking_info.idSection,
tracking_info.segment,
tracking_info.numberLane,
tracking_info.idJunction,
tracking_info.idSectionFrom,
tracking_info.idLaneFrom,
tracking_info.idSectionTo,
tracking_info.idLaneTo)
send_message(conn,
in_format='f f f f f f f f f f f f f i i i i i i '
'i i',
values=output)
elif data == ac.VEH_GET_LEADER:
send_message(conn, in_format='i', values=(0,))
veh_id, = retrieve_message(conn, 'i')
leader = aimsun_api.AKIVehGetLeaderId(veh_id)
send_message(conn, in_format='i', values=(leader,))
elif data == ac.VEH_GET_FOLLOWER:
send_message(conn, in_format='i', values=(0,))
veh_id, = retrieve_message(conn, 'i')
follower = aimsun_api.AKIVehGetFollowerId(veh_id)
send_message(conn, in_format='i', values=(follower,))
elif data == ac.VEH_GET_NEXT_SECTION:
send_message(conn, in_format='i', values=(0,))
veh_id, section = retrieve_message(conn, 'i i')
next_section = AKIVehInfPathGetNextSection(veh_id, section)
send_message(conn, in_format='i', values=(next_section,))
elif data == ac.VEH_GET_ROUTE:
send_message(conn, in_format='i', values=(0,))
# veh_id, = retrieve_message(conn, 'i')
# TODO
elif data == ac.TL_GET_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
num_meters = aimsun_api.ECIGetNumberMeterings()
if num_meters == 0:
output = '-1'
else:
meter_ids = []
for i in range(1, num_meters + 1):
struct_metering = ECIGetMeteringProperties(i)
meter_id = struct_metering.Id
meter_ids.append(meter_id)
output = ':'.join([str(e) for e in meter_ids])
send_message(conn, in_format='str', values=(output,))
elif data == ac.TL_SET_STATE:
send_message(conn, in_format='i', values=(0,))
meter_aimsun_id, state = retrieve_message(conn, 'i i')
time = AKIGetCurrentSimulationTime() # simulation time
sim_step = AKIGetSimulationStepTime()
identity = 0
ECIChangeStateMeteringById(
meter_aimsun_id, state, time, sim_step, identity)
send_message(conn, in_format='i', values=(0,))
elif data == ac.TL_GET_STATE:
send_message(conn, in_format='i', values=(0,))
meter_aimsun_id = retrieve_message(conn, 'i')
lane_id = 1 # TODO double check
state = ECIGetCurrentStateofMeteringById(
meter_aimsun_id, lane_id)
send_message(conn, in_format='i', values=(state,))
elif data == ac.GET_EDGE_NAME:
send_message(conn, in_format='i', values=(0,))
# get the edge ID in flow
edge = None
while edge is None:
edge = conn.recv(2048)
model = GKSystem.getSystem().getActiveModel()
edge_aimsun = model.getCatalog().findByName(
edge, model.getType('GKSection'))
send_message(conn, in_format='i',
values=(edge_aimsun.getId(),))
# in case the message is unknown, return -1001
else:
send_message(conn, in_format='i', values=(-1001,))
# close the connection
conn.close()
def AAPILoad():
return 0
def AAPIInit():
# set the simulation time to be very large
AKISetEndSimTime(2e6)
return 0
def AAPIManage(time, timeSta, timeTrans, acycle):
# tcp/ip connection from the aimsun process
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('localhost', PORT))
# connect to the Flow instance
server_socket.listen(10)
c, address = server_socket.accept()
# start the threaded process
start_new_thread(threaded_client, (c,))
return 0
def AAPIPostManage(time, timeSta, timeTrans, acycle):
return 0
def AAPIFinish():
return 0
def AAPIUnLoad():
return 0
def AAPIPreRouteChoiceCalculation(time, timeSta):
return 0
def AAPIEnterVehicle(idveh, idsection):
global entered_vehicles
entered_vehicles.append(idveh)
return 0
def AAPIExitVehicle(idveh, idsection):
global exited_vehicles
exited_vehicles.append(idveh)
return 0
def AAPIEnterPedestrian(idPedestrian, originCentroid):
return 0
def AAPIExitPedestrian(idPedestrian, destinationCentroid):
return 0
def AAPIEnterVehicleSection(idveh, idsection, atime):
return 0
def AAPIExitVehicleSection(idveh, idsection, atime):
return 0
| 35.013245
| 79
| 0.531555
|
36d258fa632117a09c96705c2e7efcaf62773a84
| 4,131
|
py
|
Python
|
test/test_redisqlite.py
|
Tejusbharadwaj/nimbella-sdk-python
|
cdd05c656450ef5e92ababd88a6db5c2c51393f8
|
[
"Apache-2.0"
] | 1
|
2021-02-22T20:14:09.000Z
|
2021-02-22T20:14:09.000Z
|
test/test_redisqlite.py
|
Tejusbharadwaj/nimbella-sdk-python
|
cdd05c656450ef5e92ababd88a6db5c2c51393f8
|
[
"Apache-2.0"
] | 16
|
2021-01-27T17:34:40.000Z
|
2021-08-13T17:05:06.000Z
|
test/test_redisqlite.py
|
Tejusbharadwaj/nimbella-sdk-python
|
cdd05c656450ef5e92ababd88a6db5c2c51393f8
|
[
"Apache-2.0"
] | 4
|
2021-01-28T16:47:26.000Z
|
2021-07-20T08:32:10.000Z
|
"""
/**
* Copyright (c) 2020-present, Nimbella, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import unittest
import os
import nimbella
class TestRedisqlite(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['__NIM_REDIS_IP'] = '127.0.0.1'
os.environ['__NIM_REDIS_PASSWORD'] = 'password'
os.system("docker run -d --name redisqlite --rm -p 6379:6379 sciabarracom/redisqlite:v1.0.4 --requirepass password >/dev/null")
@classmethod
def tearDownClass(cls):
os.system("docker kill redisqlite >/dev/null")
def setUp(self):
self.sql = nimbella.esql()
try:
self.sql.exec("drop table t")
except:
pass
def test_basic(self):
res = self.sql.exec("create table t(i int)")
self.assertEqual(len(res),2)
ins = self.sql.exec("insert into t(i) values(1),(2),(3)")
self.assertEqual(ins, [3,3])
m = self.sql.map("select * from t")
self.assertEqual(m, [{"i":1},{"i":2},{"i":3}])
m1 = self.sql.map("select * from t", limit=1)
self.assertEqual(m1, [{"i":1}])
m2 = self.sql.map("select * from t", limit=2)
self.assertEqual(m2, [{"i":1},{"i":2}])
a = self.sql.arr("select * from t")
self.assertEqual(a, [[1],[2],[3]])
a1 = self.sql.arr("select * from t", limit=1)
self.assertEqual(a1,[[1]])
a2 = self.sql.arr("select * from t", limit=2)
self.assertEqual(a2, [[1],[2]])
def test_with_args(self):
sql = self.sql
assertEqual = self.assertEqual
res = sql.exec("create table t(i int)")
assertEqual(len(res), 2)
ins = sql.exec("insert into t(i) values(?),(?),(?)",1,2,3)
assertEqual(ins,[3,3])
m = sql.map("select * from t where i>?",1)
assertEqual(m,[{"i":2},{"i":3}])
m1 = sql.map("select * from t where i>?",1,limit=1)
assertEqual(m1,[{"i":2}])
a = sql.arr("select * from t where i<?",3)
assertEqual(a,[[1],[2]])
a1 = sql.arr("select * from t where i<?",3,limit=1)
assertEqual(a1,[[1]])
def test_prepared(self):
sql = self.sql
expect = self.assertEqual
sql.exec("create table t(i int, s varchar)")
sel = sql.prep("select s from t where i <?")
expect(type(sel), int)
ins = sql.prep("insert into t(i, s) values(?,?)")
expect(type(ins), int)
sql.exec(ins, 1, 'a')
sql.exec(ins, 2, 'b')
sql.exec(ins, 3, 'c')
m = sql.map(sel, 3)
expect(m, [ { 's': 'a' }, { 's': 'b' } ])
a = sql.arr(sel, 3, limit=1)
expect(a, [ [ 'a' ] ])
ok = sql.prep(sel)
expect(ok, b'OK')
sql.prep(ins)
with self.assertRaises(Exception) as ctx:
sql.prep(sel)
expect(str(ctx.exception), 'invalid prepared statement index')
def test_errors(self):
with self.assertRaises(Exception) as ctx:
self.sql.exec('xxx')
self.assertEqual(str(ctx.exception), 'near "xxx": syntax error')
with self.assertRaises(Exception) as ctx:
self.sql.prep('xxx')
self.assertEqual(str(ctx.exception), 'near "xxx": syntax error')
with self.assertRaises(Exception) as ctx:
self.sql.map('xxx')
self.assertEqual(str(ctx.exception), 'near "xxx": syntax error')
with self.assertRaises(Exception) as ctx:
self.sql.arr('xxx')
self.assertEqual(str(ctx.exception), 'near "xxx": syntax error')
| 31.776923
| 135
| 0.566691
|
ef6645e401a44fac8e9ff25eeb790e95e03ad70e
| 110
|
py
|
Python
|
tests/test_ec2/test_customer_gateways.py
|
andrewgross/moto
|
912c3ceb396e1e460c75d6a0a65f2431800a1583
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ec2/test_customer_gateways.py
|
andrewgross/moto
|
912c3ceb396e1e460c75d6a0a65f2431800a1583
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ec2/test_customer_gateways.py
|
andrewgross/moto
|
912c3ceb396e1e460c75d6a0a65f2431800a1583
|
[
"Apache-2.0"
] | null | null | null |
import boto
import sure # noqa
from moto import mock_ec2
@mock_ec2
def test_customer_gateways():
pass
| 11
| 29
| 0.754545
|
139f3a89d1f101ab7f012cc278ad2bf8186731bd
| 2,470
|
py
|
Python
|
apps/resource/migrations/0001_initial.py
|
Quanfita/QTechCode
|
78a3cac617a63bd46272461e6b1e89411e2fb130
|
[
"MIT"
] | null | null | null |
apps/resource/migrations/0001_initial.py
|
Quanfita/QTechCode
|
78a3cac617a63bd46272461e6b1e89411e2fb130
|
[
"MIT"
] | 9
|
2022-01-16T04:23:33.000Z
|
2022-03-31T20:39:58.000Z
|
apps/resource/migrations/0001_initial.py
|
Quanfita/QTechCode
|
78a3cac617a63bd46272461e6b1e89411e2fb130
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.20 on 2022-01-15 17:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0004_carousel_is_show'),
]
operations = [
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, verbose_name='资源标题')),
('summary', models.TextField(default='资源摘要等同于网页description内容,请务必填写...', max_length=230, verbose_name='资源摘要')),
('body', models.TextField(verbose_name='资源内容')),
('resource_type', models.CharField(choices=[('1', 'PDF'), ('2', 'ZIP')], default='1', max_length=1, verbose_name='资源格式')),
('img_link', models.CharField(default='/static/resource/img/resource.png', max_length=255, verbose_name='图片地址')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_date', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('views', models.PositiveIntegerField(default=0, verbose_name='浏览量')),
('download', models.PositiveIntegerField(default=0, verbose_name='下载量')),
('url', models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='资源链接')),
('baidu_url', models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='资源百度网盘链接')),
('baidu_code', models.CharField(blank=True, default='', max_length=10, null=True, verbose_name='资源百度网盘提取码')),
('slug', models.SlugField(unique=True)),
('is_top', models.BooleanField(default=False, verbose_name='置顶')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='提供者')),
('keywords', models.ManyToManyField(help_text='资源关键词,用来作为SEO中keywords,最好使用长尾词,3-4个足够', to='blog.Keyword', verbose_name='资源关键词')),
],
options={
'verbose_name': '资源',
'verbose_name_plural': '资源',
'ordering': ['-create_date'],
},
),
]
| 53.695652
| 145
| 0.617004
|
80481ca7ae5ba55e51ee30de841587028ca0d192
| 1,250
|
py
|
Python
|
tests/unitary/test_approve_all.py
|
curvefi/curve-cross-asset-swaps
|
741701ab0e93de8775079cdc69d638a618196da3
|
[
"MIT"
] | 23
|
2021-01-06T02:52:25.000Z
|
2022-01-04T05:53:42.000Z
|
tests/unitary/test_approve_all.py
|
curvefi/curve-cross-asset-swaps
|
741701ab0e93de8775079cdc69d638a618196da3
|
[
"MIT"
] | 1
|
2021-01-09T14:15:00.000Z
|
2021-01-09T14:15:00.000Z
|
tests/unitary/test_approve_all.py
|
curvefi/curve-cross-asset-swaps
|
741701ab0e93de8775079cdc69d638a618196da3
|
[
"MIT"
] | 5
|
2021-01-11T23:32:15.000Z
|
2022-02-07T00:07:42.000Z
|
def test_approve_all(swap, alice, bob):
assert swap.isApprovedForAll(alice, bob) is False
swap.setApprovalForAll(bob, True, {"from": alice})
assert swap.isApprovedForAll(alice, bob) is True
def test_approve_all_multiple(swap, alice, accounts):
operators = accounts[4:8]
for acct in operators:
assert swap.isApprovedForAll(alice, acct) is False
for acct in operators:
swap.setApprovalForAll(acct, True, {"from": alice})
for acct in operators:
assert swap.isApprovedForAll(alice, acct) is True
def test_revoke_operator(swap, alice, bob):
swap.setApprovalForAll(bob, True, {"from": alice})
assert swap.isApprovedForAll(alice, bob) is True
swap.setApprovalForAll(bob, False, {"from": alice})
assert swap.isApprovedForAll(alice, bob) is False
def test_approval_all_event_fire(swap, alice, bob):
tx = swap.setApprovalForAll(bob, True, {"from": alice})
assert len(tx.events) == 1
assert tx.events["ApprovalForAll"].values() == [alice, bob, True]
def test_operator_approval(swap, alice, bob, charlie, settler_sbtc):
token_id = int(settler_sbtc.address, 16)
swap.setApprovalForAll(bob, True, {"from": alice})
swap.approve(charlie, token_id, {"from": bob})
| 33.783784
| 69
| 0.7064
|
1ba8db5f61a8b5a624f0d16ee72405470664ca97
| 120
|
py
|
Python
|
tests/context.py
|
thegreathippo/ezparse
|
1dc3184e74b8b1f85ff5c7248efa78613e65d837
|
[
"MIT"
] | null | null | null |
tests/context.py
|
thegreathippo/ezparse
|
1dc3184e74b8b1f85ff5c7248efa78613e65d837
|
[
"MIT"
] | null | null | null |
tests/context.py
|
thegreathippo/ezparse
|
1dc3184e74b8b1f85ff5c7248efa78613e65d837
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import ezparse
| 20
| 82
| 0.733333
|
98f45678c42eff043f2719f36bc636b0800ec976
| 901
|
bzl
|
Python
|
prometheus/defs.bzl
|
5h4d0w4rt/bazel_prometheus
|
69553b8837b7cdde79a5327d33fd757c0d1fbd9d
|
[
"MIT"
] | null | null | null |
prometheus/defs.bzl
|
5h4d0w4rt/bazel_prometheus
|
69553b8837b7cdde79a5327d33fd757c0d1fbd9d
|
[
"MIT"
] | null | null | null |
prometheus/defs.bzl
|
5h4d0w4rt/bazel_prometheus
|
69553b8837b7cdde79a5327d33fd757c0d1fbd9d
|
[
"MIT"
] | null | null | null |
load(
"@io_bazel_rules_prometheus//prometheus/internal:repositories.bzl",
_prometheus_repositories = "prometheus_repositories",
)
load(
"@io_bazel_rules_prometheus//prometheus/internal:promtool.bzl",
_promtool = "promtool",
_promtool_config_test = "promtool_config_test",
_promtool_rules_test = "promtool_rules_test",
_promtool_unit_test = "promtool_unit_test",
)
load(
"@io_bazel_rules_prometheus//prometheus/internal:prom.bzl",
_prometheus = "prometheus",
)
load(
"@io_bazel_rules_prometheus//prometheus/internal:toolchain.bzl",
_prometheus_toolchains = "prometheus_toolchains",
)
prometheus_toolchains = _prometheus_toolchains
prometheus_repositories = _prometheus_repositories
promtool_unit_test = _promtool_unit_test
promtool_config_test = _promtool_config_test
promtool = _promtool
promtool_rules_test = _promtool_rules_test
prometheus = _prometheus
| 32.178571
| 71
| 0.802442
|
f2821b0131fd3f49165a12f049b365eafe7d5edb
| 604
|
py
|
Python
|
Geometry/tests/test_line.py
|
liuxiang0/Geometry
|
3500f815fa56c535b36d1b6fd0afe69ce5d055be
|
[
"MIT"
] | 23
|
2015-10-28T15:21:41.000Z
|
2022-03-29T13:52:41.000Z
|
Geometry/tests/test_line.py
|
liuxiang0/Geometry
|
3500f815fa56c535b36d1b6fd0afe69ce5d055be
|
[
"MIT"
] | 7
|
2021-01-26T11:57:25.000Z
|
2022-02-07T11:00:06.000Z
|
Geometry/tests/test_line.py
|
liuxiang0/Geometry
|
3500f815fa56c535b36d1b6fd0afe69ce5d055be
|
[
"MIT"
] | 16
|
2016-07-17T12:47:05.000Z
|
2021-06-21T21:02:48.000Z
|
import unittest
import math
import sys
from .. import Line, Segment, Ray, Point
from ..exceptions import *
class LineTestCase(unittest.TestCase):
def assertIsLine(self, l, msg=None):
self.assertIsInstance(l, Line)
def assertLinesEqual(self, p, q, msg=None):
pass
def testLineCreation(self):
i, j, k = Point.units()
o = Point.origin()
self.assertIsLine(Line())
self.assertIsLine(Line(None, None))
self.assertIsLine(Line(o, i))
class SegmentTestCase(unittest.TestCase):
pass
class RayTestCase(unittest.TestCase):
pass
| 18.30303
| 47
| 0.657285
|
6bcbbf8a131f8d71a3552725de20c3e78083297e
| 17,774
|
py
|
Python
|
contrib/IECoreAlembic/test/IECoreAlembic/AlembicInputTest.py
|
goddardl/cortex
|
323a160fd831569591cde1504f415a638f8b85bc
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/IECoreAlembic/test/IECoreAlembic/AlembicInputTest.py
|
goddardl/cortex
|
323a160fd831569591cde1504f415a638f8b85bc
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/IECoreAlembic/test/IECoreAlembic/AlembicInputTest.py
|
goddardl/cortex
|
323a160fd831569591cde1504f415a638f8b85bc
|
[
"BSD-3-Clause"
] | 1
|
2020-09-26T01:15:37.000Z
|
2020-09-26T01:15:37.000Z
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import math
import unittest
import IECore
import IECoreAlembic
class AlembicInputTest( unittest.TestCase ) :
def testConstructor( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/cube.abc" )
self.assertRaises( Exception, IECoreAlembic.AlembicInput, "iDontExist" )
def testHierarchy( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/cube.abc" )
self.assertEqual( a.numChildren(), 1 )
self.assertEqual( a.childNames(), IECore.StringVectorData( [ "group1" ] ) )
g1 = a.child( 0 )
g2 = a.child( "group1" )
self.assertEqual( g1.name(), g2.name() )
self.assertEqual( g1.name(), "group1" )
self.assertEqual( g1.fullName(), g2.fullName() )
self.assertEqual( g1.fullName(), "/group1" )
self.assertEqual( g1.numChildren(), 1 )
self.assertEqual( g1.childNames(), IECore.StringVectorData( [ "pCube1" ] ) )
c = g1.child( 0 )
self.assertEqual( c.name(), "pCube1" )
self.assertEqual( c.fullName(), "/group1/pCube1" )
self.assertEqual( c.numChildren(), 1 )
self.assertEqual( c.childNames(), IECore.StringVectorData( [ "pCubeShape1" ] ) )
cs = c.child( 0 )
self.assertEqual( cs.numChildren(), 0 )
self.assertEqual( cs.childNames(), IECore.StringVectorData() )
self.assertRaises( Exception, cs.child, 0 )
self.assertRaises( Exception, cs.child, "iDontExist" )
def testConvertMesh( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/cube.abc" )
c = a.child( "group1" ).child( "pCube1" )
self.assertEqual( c.objectAtSample( 0, IECore.MeshPrimitive.staticTypeId() ), None )
cs = c.child( "pCubeShape1" )
m = cs.objectAtSample( 0, IECore.MeshPrimitive.staticTypeId() )
self.failUnless( isinstance( m, IECore.MeshPrimitive ) )
def testConvertTransform( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/cube.abc" )
g = a.child( "group1" )
t = g.objectAtSample( 0, IECore.M44fData.staticTypeId() )
self.assertEqual( t, IECore.M44fData( IECore.M44f.createScaled( IECore.V3f( 2 ) ) * IECore.M44f.createTranslated( IECore.V3f( 2, 0, 0 ) ) ) )
c = a.child( "group1" ).child( "pCube1" )
t = c.objectAtSample( 0, IECore.M44fData.staticTypeId() )
self.assertEqual( t, IECore.M44fData( IECore.M44f.createTranslated( IECore.V3f( -1, 0, 0 ) ) ) )
cs = c.child( "pCubeShape1" )
t = cs.objectAtSample( 0, IECore.M44fData.staticTypeId() )
self.assertEqual( t, None )
def testMetaData( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/cube.abc" )
m = a.metaData()
self.failUnless( isinstance( m , IECore.CompoundData ) )
def testBound( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/cube.abc" )
self.assertEqual( a.boundAtSample(), IECore.Box3d( IECore.V3d( -2 ), IECore.V3d( 2 ) ) )
cs = a.child( "group1" ).child( "pCube1" ).child( "pCubeShape1" )
self.assertEqual( cs.boundAtSample(), IECore.Box3d( IECore.V3d( -1 ), IECore.V3d( 1 ) ) )
def testTransform( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/cube.abc" )
self.assertEqual( a.transformAtSample(), IECore.M44d() )
g = a.child( "group1" )
self.assertEqual( g.transformAtSample(), IECore.M44d.createScaled( IECore.V3d( 2 ) ) * IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) )
c = g.child( "pCube1" )
self.assertEqual( c.transformAtSample(), IECore.M44d.createTranslated( IECore.V3d( -1, 0, 0 ) ) )
cs = c.child( "pCubeShape1" )
self.assertEqual( cs.transformAtSample(), IECore.M44d() )
def testConvertSubD( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/subdPlane.abc" )
c = a.child( "pPlane1" )
self.assertEqual( c.objectAtSample( 0, IECore.MeshPrimitive.staticTypeId() ), None )
cs = c.child( "pPlaneShape1" )
m = cs.objectAtSample( 0, IECore.MeshPrimitive.staticTypeId() )
self.failUnless( isinstance( m, IECore.MeshPrimitive ) )
self.assertEqual( m.interpolation, "catmullClark" )
def testConvertArbGeomParams( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/coloredMesh.abc" )
m = a.child( "pPlane1" ).child( "pPlaneShape1" ).objectAtSample( 0, IECore.MeshPrimitive.staticTypeId() )
self.failUnless( m.arePrimitiveVariablesValid() )
self.failUnless( "colorSet1" in m )
self.assertEqual( m["colorSet1"].interpolation, IECore.PrimitiveVariable.Interpolation.FaceVarying )
self.failUnless( isinstance( m["colorSet1"].data, IECore.Color4fVectorData ) )
self.assertEqual( len( m["colorSet1"].data ), 4 )
self.assertEqual(
m["colorSet1"].data,
IECore.Color4fVectorData( [
IECore.Color4f( 0, 1, 0, 1 ),
IECore.Color4f( 0, 0, 1, 1 ),
IECore.Color4f( 0, 0, 0, 1 ),
IECore.Color4f( 1, 0, 0, 1 ),
] )
)
self.failUnless( "ABC_int" in m )
self.assertEqual( m["ABC_int"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m["ABC_int"].data, IECore.IntVectorData( [ 10 ] ) )
def testConvertUVs( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/coloredMesh.abc" )
m = a.child( "pPlane1" ).child( "pPlaneShape1" ).objectAtSample( 0, IECore.MeshPrimitive.staticTypeId() )
self.failUnless( "s" in m )
self.failUnless( "t" in m )
self.assertEqual( m["s"].interpolation, IECore.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( m["t"].interpolation, IECore.PrimitiveVariable.Interpolation.FaceVarying )
self.failUnless( isinstance( m["s"].data, IECore.FloatVectorData ) )
self.failUnless( isinstance( m["t"].data, IECore.FloatVectorData ) )
def testSamples( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
self.assertEqual( a.numSamples(), 10 )
for i in range( 0, a.numSamples() ) :
self.assertAlmostEqual( a.timeAtSample( i ), (i + 1) / 24.0 )
p = a.child( "persp" )
self.assertEqual( p.numSamples(), 1 )
self.assertEqual( p.timeAtSample( 0 ), 1 / 24.0 )
t = a.child( "pCube1" )
self.assertEqual( t.numSamples(), 10 )
for i in range( 0, t.numSamples() ) :
self.assertAlmostEqual( t.timeAtSample( i ), (i + 1) / 24.0 )
m = t.child( "pCubeShape1" )
self.assertEqual( m.numSamples(), 10 )
for i in range( 0, m.numSamples() ) :
self.assertAlmostEqual( m.timeAtSample( i ), (i + 1) / 24.0 )
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/noTopLevelStoredBounds.abc" )
self.assertEqual( a.numSamples(), 0 )
# no time samples at the top level, so this should throw an exception:
self.assertRaises( Exception, a.timeAtSample, 0 )
# should throw the RIGHT exceptions:
try:
a.timeAtSample(0)
except Exception, e:
self.assertEqual( str(e), "Invalid Argument : Sample index out of range" )
# should these throw exceptions?
a.boundAtSample(0)
a.objectAtSample(0)
a.transformAtSample(0)
def testOutOfRangeSamplesRaise( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
self.assertRaises( Exception, a.timeAtSample, 10 )
def testSampleInterval( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
# persp has only one sample, so should always be reading from that regardless the time
p = a.child( "persp" )
t = -1000
while t < 1000 :
t += .01
self.assertEqual( p.sampleIntervalAtTime( t ), ( 0, 0, 0 ) )
# pCube1 has a sample per frame
t = a.child( "pCube1" )
for i in range( 0, t.numSamples() ) :
# reads on the frame should not need
# interpolation.
v = t.sampleIntervalAtTime( t.timeAtSample( i ) )
self.assertEqual( v[0], 0 )
self.assertEqual( v[1], i )
self.assertEqual( v[1], i )
# reads in between frames should need
# interpolation
if i < t.numSamples() -1 :
v = t.sampleIntervalAtTime( t.timeAtSample( i ) + 1 / 48.0 )
self.assertAlmostEqual( v[0], 0.5 )
self.assertEqual( v[1], i )
self.assertEqual( v[2], i + 1 )
def testConverterAccess( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
m = a.child( "pCube1" ).child( "pCubeShape1" )
c = m.converter()
mesh = c.convert()
self.failUnless( isinstance( mesh, IECore.MeshPrimitive ) )
def testConvertAtIndices( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
m = a.child( "pCube1" ).child( "pCubeShape1" )
c = m.converter()
mesh = c.convert()
self.failUnless( isinstance( mesh, IECore.MeshPrimitive ) )
for i in range( 1, m.numSamples() ) :
c["sampleIndex"].setNumericValue( i )
mesh2 = c.convert()
self.failUnless( isinstance( mesh2, IECore.MeshPrimitive ) )
self.assertEqual( mesh.verticesPerFace, mesh2.verticesPerFace )
self.assertNotEqual( mesh["P"], mesh2["P"] )
def testTransformAtSample( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
t = a.child( "pCube1" )
matrix = t.transformAtSample()
self.assertEqual( matrix, IECore.M44d() )
self.assertEqual( matrix, t.transformAtSample( 0 ) )
for i in range( 1, t.numSamples() ) :
matrix2 = t.transformAtSample( i )
self.assertNotEqual( matrix, matrix2 )
expectedMatrix = IECore.M44d.createTranslated( IECore.V3d( i / 9.0, 0, 0 ) )
self.failUnless( matrix2.equalWithAbsError( expectedMatrix, 0.0000001 ) )
self.assertEqual( t.transformAtSample( t.numSamples() - 1 ), IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) )
def testConvertInterpolated( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
m = a.child( "pCube1" ).child( "pCubeShape1" )
mesh0 = m.objectAtSample( 0 )
mesh1 = m.objectAtSample( 1 )
mesh = m.objectAtTime( 1.5 / 24.0 )
self.failUnless( isinstance( mesh, IECore.MeshPrimitive ) )
self.assertEqual( mesh, IECore.linearObjectInterpolation( mesh0, mesh1, 0.5 ) )
def testRotatingTransformAtSample( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/rotatingCube.abc" )
t = a.child( "pCube1" )
for i in range( 0, 24 ) :
ti = t.transformAtSample( i )
mi = IECore.M44d.createRotated( IECore.V3d( IECore.degreesToRadians( 90 * i ), 0, 0 ) )
self.failUnless( ti.equalWithAbsError( mi, 0.0000000000001 ) )
def testInterpolatedTranslate( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
t = a.child( "pCube1" )
for i in range( 0, t.numSamples() * 2 - 1 ) :
frame = i / 2.0 + 1
time = frame / 24.0
matrix = t.transformAtTime( time )
expectedMatrix = IECore.M44d.createTranslated( IECore.V3d( i / 18.0, 0, 0 ) )
self.failUnless( matrix.equalWithAbsError( expectedMatrix, 0.0000001 ) )
def testInterpolatedRotate( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/rotatingCube.abc" )
t = a.child( "pCube1" )
for i in range( 0, t.numSamples() * 2 - 1 ) :
frame = i / 2.0 + 1
time = frame / 24.0
matrix = t.transformAtTime( time )
expectedMatrix = IECore.M44d.createRotated( IECore.V3d( IECore.degreesToRadians( 90 * i * 0.5 ), 0, 0 ) )
self.failUnless( matrix.equalWithAbsError( expectedMatrix, 0.0000001 ) )
def testHasStoredBound( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
self.assertEqual( a.hasStoredBound(), True )
self.assertEqual( a.child( "persp" ).hasStoredBound(), False )
self.assertEqual( a.child( "persp" ).child( "perspShape" ).hasStoredBound(), False )
self.assertEqual( a.child( "pCube1" ).hasStoredBound(), False )
self.assertEqual( a.child( "pCube1" ).child( "pCubeShape1" ).hasStoredBound(), True )
self.assertEqual( a.child( "front" ).hasStoredBound(), False )
self.assertEqual( a.child( "front" ).child( "frontShape" ).hasStoredBound(), False )
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/noTopLevelStoredBounds.abc" )
self.assertEqual( a.hasStoredBound(), False )
def testBoundAtSample( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
self.assertEqual( a.boundAtSample( 0 ), IECore.Box3d( IECore.V3d( -0.5 ), IECore.V3d( 0.5 ) ) )
self.assertEqual( a.boundAtSample( a.numSamples()-1 ), IECore.Box3d( IECore.V3d( 0.5, -0.5, -0.5 ), IECore.V3d( 1.5, 2, 0.5 ) ) )
t = a.child( "pCube1" )
self.assertRaises( Exception, t.boundAtSample, 0 )
self.assertRaises( Exception, t.boundAtSample, t.numSamples() - 1 )
m = t.child( "pCubeShape1" )
self.assertEqual( m.boundAtSample( 0 ), IECore.Box3d( IECore.V3d( -0.5 ), IECore.V3d( 0.5 ) ) )
self.assertEqual( m.boundAtSample( m.numSamples()-1 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 2, 0.5 ) ) )
def testBoundAtTime( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
t = a.child( "pCube1" )
m = t.child( "pCubeShape1" )
startTime = a.timeAtSample( 0 )
endTime = a.timeAtSample( a.numSamples() - 1 )
aStartBound = a.boundAtSample( 0 )
aEndBound = a.boundAtSample( a.numSamples() - 1 )
mStartBound = m.boundAtSample( 0 )
mEndBound = m.boundAtSample( m.numSamples() - 1 )
def lerp( a, b, x ) :
return a + ( b - a ) * x
def lerpBox( a, b, x ) :
r = a.__class__()
r.min = lerp( a.min, b.min, x )
r.max = lerp( a.max, b.max, x )
return r
numSteps = 100
for i in range( 0, numSteps ) :
lerpFactor = ( float( i ) / (numSteps-1) )
time = lerp( startTime, endTime, lerpFactor )
aBound = a.boundAtTime( time )
expectedABound = lerpBox( aStartBound, aEndBound, lerpFactor )
self.failUnless( aBound.min.equalWithAbsError( expectedABound.min, 0.000001 ) )
self.failUnless( aBound.max.equalWithAbsError( expectedABound.max, 0.000001 ) )
mBound = m.boundAtTime( time )
expectedMBound = lerpBox( mStartBound, mEndBound, lerpFactor )
self.failUnless( mBound.min.equalWithAbsError( expectedMBound.min, 0.000001 ) )
self.failUnless( mBound.max.equalWithAbsError( expectedMBound.max, 0.000001 ) )
tBound = t.boundAtTime( time )
self.failUnless( tBound.min.equalWithAbsError( expectedMBound.min, 0.000001 ) )
self.failUnless( tBound.max.equalWithAbsError( expectedMBound.max, 0.000001 ) )
def testConvertNormals( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
m = a.child( "pCube1" ).child( "pCubeShape1" )
mesh = m.objectAtSample( 0 )
self.failUnless( "N" in mesh )
self.failUnless( isinstance( mesh["N"].data, IECore.V3fVectorData ) )
self.assertEqual( mesh["N"].interpolation, IECore.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( mesh["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
def testCamera( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/animatedCube.abc" )
c = a.child( "persp" ).child( "perspShape" ).objectAtSample( 0 )
self.failUnless( isinstance( c, IECore.Camera ) )
c = a.child( "persp" ).child( "perspShape" ).objectAtTime( 0 )
self.failUnless( isinstance( c, IECore.Camera ) )
def testHierarchyIgnoresShadingGroups( self ) :
a = IECoreAlembic.AlembicInput( os.path.dirname( __file__ ) + "/data/sphereWithShadingGroups.abc" )
self.assertEqual( a.numChildren(), 1 )
self.assertEqual( a.childNames(), IECore.StringVectorData( [ "pSphere1" ] ) )
g = a.child( "pSphere1" )
m = g.child( "pSphereShape1" )
self.assertEqual( m.numChildren(), 0 )
self.assertEqual( m.childNames(), IECore.StringVectorData() )
if __name__ == "__main__":
unittest.main()
| 38.059957
| 144
| 0.672499
|
dba4892df6b04036d2bb5c14229d3ad666dc9abb
| 2,810
|
py
|
Python
|
mvlearn/datasets/base.py
|
cameronfr/mvlearn
|
5e279d979446f213dd3ae420536138ce6c60cccf
|
[
"Apache-2.0"
] | null | null | null |
mvlearn/datasets/base.py
|
cameronfr/mvlearn
|
5e279d979446f213dd3ae420536138ce6c60cccf
|
[
"Apache-2.0"
] | null | null | null |
mvlearn/datasets/base.py
|
cameronfr/mvlearn
|
5e279d979446f213dd3ae420536138ce6c60cccf
|
[
"Apache-2.0"
] | null | null | null |
from os.path import dirname, join
import numpy as np
def load_UCImultifeature(select_labeled="all"):
"""
Load the UCI multiple features dataset, taken from
https://archive.ics.uci.edu/ml/datasets/Multiple+Features This data set
consists of 6 views of handwritten digit images, with classes 0-9. The
6 views are the following:
1. 76 Fourier coefficients of the character shapes
2. 216 profile correlations
3. 64 Karhunen-Love coefficients
4. 240 pixel averages of the images from 2x3 windows
5. 47 Zernike moments
6. 6 morphological features
Each class contains 200 labeled examples.
Parameters
----------
select_labeled : optional, array-like, shape (n_features,) default (all)
A list of the examples that the user wants by label. If not
specified, all examples in the dataset are returned. Repeated labels
are ignored.
Returns
-------
data : list of np.ndarray, each of size (2000,n_features)
List of length 6 with each element being the data for one of the
views.
labels : np.ndarray
Array of labels for the digit
References
----------
.. [#1Data] M. van Breukelen, R.P.W. Duin, D.M.J. Tax, and J.E. den Hartog,
Handwritten digit recognition by combined classifiers, Kybernetika,
vol. 34, no. 4, 1998, 381-386
"""
if select_labeled == "all":
select_labeled = range(10)
select_labeled = list(set(select_labeled))
if len(select_labeled) < 1 or len(select_labeled) > 10:
raise ValueError("If selecting examples by label, must select "
"at least 1 and no more than 10.")
module_path = dirname(__file__)
folder = "UCImultifeature"
filenames = ["mfeat-fou.csv", "mfeat-fac.csv", "mfeat-kar.csv",
"mfeat-pix.csv", "mfeat-zer.csv", "mfeat-mor.csv"]
data = []
for filename in filenames:
csv_file = join(module_path, folder, filename)
datatemp = np.genfromtxt(csv_file, delimiter=',')
data.append(datatemp[1:, :-1])
labels = datatemp[1:, -1]
selected_data = []
for i in range(6):
datatemp = np.zeros((200*len(select_labeled), data[i].shape[1]))
if i == 0:
selected_labels = np.zeros(200*len(select_labeled),)
for j, label in enumerate(select_labeled):
# user specified a bad label
if label not in range(10):
raise ValueError("Bad label: labels must be in 0, 1, 2,.. 9")
indices = np.nonzero(labels == label)
datatemp[j * 200: (j+1) * 200, :] = data[i][indices, :]
selected_labels[j*200:(j+1)*200] = labels[indices]
selected_data.append(datatemp)
return selected_data, selected_labels
| 35.125
| 79
| 0.625979
|
6561d33379623149d36537a5f95a5152b9b99399
| 14,908
|
py
|
Python
|
bin/update_isa.py
|
ucam-comparch-loki/lokisim
|
3641e01d6173d448a8bd9d62c3cc9664d05e442d
|
[
"MIT"
] | null | null | null |
bin/update_isa.py
|
ucam-comparch-loki/lokisim
|
3641e01d6173d448a8bd9d62c3cc9664d05e442d
|
[
"MIT"
] | 2
|
2020-04-06T14:22:31.000Z
|
2020-06-01T09:33:37.000Z
|
bin/update_isa.py
|
ucam-comparch-loki/lokisim
|
3641e01d6173d448a8bd9d62c3cc9664d05e442d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import datetime, os
class Operation:
# Split the string into its individual sections. These should be:
# operation[0] = mnemonic (e.g. NOR)
# operation[1] = opcode (e.g. 0000000)
# operation[2] = format (e.g. 3R(rd,rs,rt))
# operation[3] = function (e.g. 0000)
# operation[4] = channel ([o]ptional, [m]andatory, [-]unused)
# operation[5] = immediate ([u]nsigned, [s]igned, [l]ui, [-]none)
# operation[6] = ALU function (e.g. 00000 - usually the same as function bits)
def __init__(self, string):
parts = string.split()
self.mnemonic = parts[0]
self.opcode = int(parts[1], 2)
self.fullformat = parts[2]
self.format = parts[2].split('(')[0] # e.g. just "3R"
if parts[3] == '-':
self.function = 0
else:
self.function = int(parts[3], 2)
self.channel = parts[4]
self.immediate = parts[5]
if parts[6] == '-':
self.alu_function = -1
else:
self.alu_function = int(parts[6], 2)
def __str__(self):
return self.mnemonic.ljust(16) + str(self.opcode).ljust(4) +\
self.format.ljust(8) + str(self.function).ljust(4) + self.channel.ljust(4) +\
self.immediate.ljust(4) + str(self.alu_function)
def has_dest_reg(self):
return self.fullformat.find("rd") >= 0
def has_src_reg1(self):
return self.fullformat.find("rs") >= 0
def has_src_reg2(self):
return self.fullformat.find("rt") >= 0
def has_immediate(self):
# Immediates which don't pass through the ALU aren't recognised in the
# "immediate" field, so we have to do a more complicated check.
return (self.fullformat.find("immed") >= 0) or (self.fullformat.find("shamt") >= 0)
def has_channel(self):
return not self.channel == '-'
def sets_predicate(self):
return self.asm_name().find(".p") >= 0
def is_alu_operation(self):
# Function code 31 represents operations which have results, but not
# from the ALU.
return 0 <= self.alu_function < 31
def signed_immediate(self):
return self.immediate == 's'
# The name to go in the C++ enumeration of all operations.
# By convention, it is all capitals, and it cannot contain any punctuation.
def enum_name(self):
return "OP_" + self.mnemonic.replace(".", "_").upper()
# The name of the operation which gets printed. By convention, it is all
# lower case.
def asm_name(self):
return self.mnemonic.lower()
# The enumeration name of the ALU function this operation corresponds to.
# Only valid for ALU operations.
# TODO: use the actual name, rather than just casting the value.
def function_name(self):
return "(function_t)" + str(self.alu_function)
# An example use case of this instruction, e.g. nor rd, rs, rt (-> ch)
def usage(self):
result = self.asm_name() + " "
operands = self.fullformat.split('(')[1].split(')')[0] # Leave contents of brackets
result += operands.replace(",", ", ") # Add spacing
if(self.channel == "m"):
result += " -> ch"
elif(self.channel == "o"):
result += " (-> ch)"
return result.replace(", unused", "").replace(" unused", "") # Remove all instances of "unused"
################################################################################
# The file containing all of the information.
definition_file = "/usr/groups/comparch-loki/isa/opcodes.isa"
with open(definition_file) as openfile:
operations = openfile.readlines()
# Filter out comments and blank lines
operations = filter(lambda x: x[0] != '%' and not x[0].isspace(), operations)
# Generate an Operation type for each line of text remaining.
operations = [Operation(operation) for operation in operations]
# The files we are going to write into.
directory = os.path.dirname(os.path.realpath(__file__))
target_header = directory + "/../src/Utility/ISA.h"
target_source = directory + "/../src/Utility/ISA.cpp"
# Return a string representing a comma-separated list of booleans showing which
# operations satisfy the filter function.
def opcode_list(filter_function):
d = dict()
for i in range(128):
d[i] = "0"
for operation in filter(filter_function, operations):
d[operation.opcode] = "1"
return ", ".join(d.values())
# Return a string representing a comma-separated list of values.
# There may be gaps (e.g. an opcode which isn't used) which are filled with the
# supplied default value. The two functions given access the appropriate fields
# of an Operation: one gets the key, and the other gets the value.
# Also add a function which can filter out operations which shouldn't join the map.
def create_map(length, key_function, value_function, default, include_function=lambda x:True):
d = dict()
for i in range(length):
d[i] = default
for operation in filter(include_function, operations):
d[key_function(operation)] = value_function(operation)
return ", ".join(d.values())
# Generate C++ code which initialises a map of the given types with all operations.
# The functions provided access the keys and values within each operation.
# Also add a function which can filter out operations which shouldn't join the map.
def create_c_map(name, key_type, value_type, key_function, value_function, include_function=lambda x:True):
code = """\
static std::map<"""+key_type+","+value_type+"> "+name+""";
static bool initialised = false;
if (!initialised) {
""";
room_on_line = True;
for operation in filter(include_function, operations):
single_op = " "+name+"["+str(key_function(operation))+"] = "+str(value_function(operation))+";"
code += single_op.ljust(50)
if not room_on_line:
code += "\n"
room_on_line = not room_on_line
return code + "\n initialised = true;\n }\n"
################################################################################
header_text = """\
/*
* ISA.h
*
* Listing of all instructions, their formats, and other associated information.
*
* Automatically generated by """ + os.path.basename(__file__) + " on " + str(datetime.date.today()) + """.
*
*/
#ifndef ISA_H_
#define ISA_H_
#include <string>
typedef std::string inst_name_t;
class ISA {
public:
enum Opcode {
\n"""
# Add a line for each operation to the enumeration.
for i in range(len(operations)):
operation = operations[i]
name = operation.enum_name() + " = " + str(operation.opcode)
if i < len(operations) - 1:
name += ","
comment = "// " + operation.usage()
# strings are immutable - don't want to keep adding to them
# Use a bytearray instead, or a list of all lines to write to the file
header_text += " " + name.ljust(20) + comment + "\n"
header_text += """
};
enum Function {"""
functions = []
# Go through all operations, checking for their ALU function codes. Assumes that
# the first operation found will have the most useful name, and that function
# codes appear in order.
for operation in operations:
if operation.alu_function not in functions:
header_text += "\n FN_" + operation.mnemonic.replace(".", "_").upper() + " = " + str(operation.alu_function) + ","
functions.append(operation.alu_function)
header_text += """
};
enum Format {
FMT_FF, // Fetch format (rs,immed)
FMT_PFF, // Predicated fetch format (immed:16s, immed:7s)
FMT_0R, // Zero registers (unused) (immed)
FMT_0Rnc, // Zero registers, no channel (immed)
FMT_1R, // One register (rd,immed) (rs,immed)
FMT_1Rnc, // One register, no channel (rd,immed) (rs,immed) (rd,unused)
FMT_2R, // Two registers (rd,rs,immed) (rs,rt,unused)
FMT_2Rnc, // Two registers, no channel (rs,rt,unused)
FMT_2Rs, // Two registers, shift amount (rd,rs,shamt)
FMT_3R // Three registers (rd,rs,rt)
};
typedef InstructionMap::Opcode opcode_t;
typedef InstructionMap::Function function_t;
typedef InstructionMap::Format format_t;
// Simple true/false questions to ask of each operation.
static bool storesResult(opcode_t opcode);
static bool hasDestReg(opcode_t opcode);
static bool hasSrcReg1(opcode_t opcode);
static bool hasSrcReg2(opcode_t opcode);
static bool hasImmediate(opcode_t opcode);
static bool hasRemoteChannel(opcode_t opcode);
static bool setsPredicate(opcode_t opcode);
static bool isALUOperation(opcode_t opcode);
static bool hasSignedImmediate(opcode_t opcode);
// The total number of instructions currently supported.
static int numInstructions();
// Convert back and forth between names and opcodes.
static opcode_t opcode(const inst_name_t& name);
static const inst_name_t& name(opcode_t opcode, function_t function = (function_t)0);
static function_t function(const inst_name_t& name);
static function_t function(opcode_t opcode);
static format_t format(opcode_t opcode);
};
typedef InstructionMap::Opcode opcode_t;
typedef InstructionMap::Function function_t;
typedef InstructionMap::Format format_t;
#endif /* ISA_H_ */\n"""
# Write file
with open(target_header, "w") as header_file:
header_file.write(header_text)
print "Wrote", os.path.abspath(target_header)
################################################################################
# A function which returns the C++ implementation of an opcode_t -> bool method.
# Parameters:
# name = name of function
# lambda_func = way of getting a boolean value from the Operation class
def func_implementation(name, lambda_func):
return "bool ISA::"+name+"(opcode_t opcode) {\n" +\
" static const bool _"+name+"[] = {"+opcode_list(lambda_func)+"};\n\n" +\
" return _"+name+"[opcode];\n" +\
"}\n\n"
source_text = """\
/*
* ISA.cpp
*
* Automatically generated by """ + os.path.basename(__file__) + " on " + str(datetime.date.today()) + """.
*
*/
#include "ISA.h"
#include <assert.h>
#include <iostream>
#include <map>
bool ISA::storesResult(opcode_t opcode) {return hasDestReg(opcode);} // remove?
""" + \
func_implementation("hasDestReg", lambda x: x.has_dest_reg() ) + \
func_implementation("hasSrcReg1", lambda x: x.has_src_reg1() ) + \
func_implementation("hasSrcReg2", lambda x: x.has_src_reg2() ) + \
func_implementation("hasImmediate", lambda x: x.has_immediate() ) + \
func_implementation("hasRemoteChannel", lambda x: x.has_channel() ) + \
func_implementation("setsPredicate", lambda x: x.sets_predicate() ) + \
func_implementation("isALUOperation", lambda x: x.is_alu_operation()) + \
func_implementation("hasSignedImmediate", lambda x: x.signed_immediate()) + \
"""\
int ISA::numInstructions() {return """ + str(len(operations)) + """;} // 128?
const inst_name_t& ISA::name(opcode_t opcode, function_t function) {
static const inst_name_t opcode_to_name[] = {"""
source_text += \
create_map(128, # 128 opcodes
lambda x: x.opcode, # mapping from opcodes...
lambda x: '"' + x.asm_name() + '"', # ... to names
"\"\"") # if opcode unused, give empty string
source_text += """};
static const inst_name_t function_to_name[] = {"""
source_text += \
create_map(16, # 16 function codes
lambda x: x.function, # mapping from functions...
lambda x: '"' + x.asm_name() + '"', # ... to names
"\"\"", # if unused, give empty string
lambda x: x.opcode == 0) # only instructions with opcode == 0 have function codes
source_text += """};
static const inst_name_t function_to_name_p[] = {"""
source_text += \
create_map(16, # 16 function codes
lambda x: x.function, # mapping from functions...
lambda x: '"' + x.asm_name() + '"', # ... to names
"\"\"", # if unused, give empty string
lambda x: x.opcode == 1) # only instructions with opcode == 0 have function codes
source_text += """};
if(opcode == 0) return function_to_name[function];
else if(opcode == 1) return function_to_name_p[function];
else return opcode_to_name[opcode];
}
format_t ISA::format(opcode_t opcode) {
static const format_t opcode_to_format[] = {"""+create_map(128, lambda x: x.opcode, lambda x: "FMT_" + x.format, "(format_t)0")+"""};
return opcode_to_format[opcode];
}
opcode_t ISA::opcode(const inst_name_t& name) {\n"""
source_text += \
create_c_map("name_to_opcode", # Name of map
"inst_name_t", # Key type
"opcode_t", # Value type
lambda x: '"' + x.asm_name() + '"', # Access key (and add quote marks)
lambda x: x.enum_name()) # Access value
source_text += """
if(name_to_opcode.find(name) == name_to_opcode.end()) {
std::cerr << "Error: unknown instruction: " << name << std::endl;
throw std::exception();
}
else return name_to_opcode[name];
}
function_t ISA::function(const inst_name_t& name) {\n"""
source_text += \
create_c_map("name_to_function", # Name of map
"inst_name_t", # Key type
"function_t", # Value type
lambda x: '"' + x.asm_name() + '"', # Access key (and add quote marks)
lambda x: x.function_name(), # Access value
lambda x: x.opcode < 2) # only certain instructions have function codes
source_text += """
assert(name_to_function.find(name) != name_to_function.end());
return name_to_function[name];
}
function_t ISA::function(opcode_t opcode) {
static const function_t opcode_to_function[] = {"""
source_text += \
create_map(128,
lambda x: x.opcode,
lambda x: "(function_t)" + str(x.alu_function),
"(function_t)0")
source_text += """};
return opcode_to_function[opcode];
}
"""
# Write file
with open(target_source, "w") as source_file:
source_file.write(source_text)
print "Wrote", os.path.abspath(target_source)
| 37.363409
| 135
| 0.601422
|
00ab72c7edc5cf468271829378e07e8f0c7064d2
| 36,865
|
py
|
Python
|
multimodal_transformers/model/tabular_transformers.py
|
sidharrth2002/Multimodal-Toolkit
|
3e5e9cf58e5310a10f62203113443631bc6ca1fe
|
[
"MIT"
] | null | null | null |
multimodal_transformers/model/tabular_transformers.py
|
sidharrth2002/Multimodal-Toolkit
|
3e5e9cf58e5310a10f62203113443631bc6ca1fe
|
[
"MIT"
] | null | null | null |
multimodal_transformers/model/tabular_transformers.py
|
sidharrth2002/Multimodal-Toolkit
|
3e5e9cf58e5310a10f62203113443631bc6ca1fe
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.utils.data.dataset import T
from transformers import (
BertForSequenceClassification,
RobertaForSequenceClassification,
DistilBertForSequenceClassification,
AlbertForSequenceClassification,
XLNetForSequenceClassification,
XLMForSequenceClassification,
LongformerForSequenceClassification,
)
from transformers.models.bert.modeling_bert import BERT_INPUTS_DOCSTRING
from transformers.models.roberta.modeling_roberta import ROBERTA_INPUTS_DOCSTRING
from transformers.models.distilbert.modeling_distilbert import DISTILBERT_INPUTS_DOCSTRING
from transformers.models.albert.modeling_albert import ALBERT_INPUTS_DOCSTRING
from transformers.models.xlnet.modeling_xlnet import XLNET_INPUTS_DOCSTRING
from transformers.models.xlm.modeling_xlm import XLM_INPUTS_DOCSTRING
from transformers.models.longformer.modeling_longformer import LONGFORMER_INPUTS_DOCSTRING
from transformers.models.xlm_roberta.modeling_xlm_roberta import XLMRobertaConfig
from transformers.file_utils import add_start_docstrings
from multimodal_transformers.model.layers import KeyAttention, LambdaLayer
from .tabular_combiner import TabularFeatCombiner
from .tabular_config import TabularConfig
from .layer_utils import MLP, calc_mlp_dims, hf_loss_func
class BertWithTabular(BertForSequenceClassification):
"""
Bert Model transformer with a sequence classification/regression head as well as
a TabularFeatCombiner module to combine categorical and numerical features
with the Bert pooled output
Parameters:
hf_model_config (:class:`~transformers.BertConfig`):
Model configuration class with all the parameters of the model.
This object must also have a tabular_config member variable that is a
:obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`
"""
def __init__(self, hf_model_config):
super().__init__(hf_model_config)
tabular_config = hf_model_config.tabular_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
tabular_config.text_feat_dim = hf_model_config.hidden_size
tabular_config.hidden_dropout_prob = hf_model_config.hidden_dropout_prob
self.tabular_combiner = TabularFeatCombiner(tabular_config)
self.num_labels = tabular_config.num_labels
combined_feat_dim = self.tabular_combiner.final_out_dim
if tabular_config.use_simple_classifier:
self.tabular_classifier = nn.Linear(combined_feat_dim,
tabular_config.num_labels)
else:
dims = calc_mlp_dims(combined_feat_dim,
division=tabular_config.mlp_division,
output_dim=tabular_config.num_labels)
self.tabular_classifier = MLP(combined_feat_dim,
tabular_config.num_labels,
num_hidden_lyr=len(dims),
dropout_prob=tabular_config.mlp_dropout,
hidden_channels=dims,
bn=True)
@add_start_docstrings(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
class_weights=None,
output_attentions=None,
output_hidden_states=None,
cat_feats=None,
numerical_feats=None
):
r"""
class_weights (:obj:`torch.FloatTensor` of shape :obj:`(tabular_config.num_labels,)`, `optional`, defaults to :obj:`None`):
Class weights to be used for cross entropy loss function for classification task
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`tabular_config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`tabular_config.num_labels > 1` a classification loss is computed (Cross-Entropy).
cat_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.cat_feat_dim)`, `optional`, defaults to :obj:`None`):
Categorical features to be passed in to the TabularFeatCombiner
numerical_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.numerical_feat_dim)`, `optional`, defaults to :obj:`None`):
Numerical features to be passed in to the TabularFeatCombiner
Returns:
:obj:`tuple` comprising various elements depending on configuration and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if tabular_config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.num_labels)`):
Classification (or regression if tabular_config.num_labels==1) scores (before SoftMax).
classifier_layer_outputs(:obj:`list` of :obj:`torch.FloatTensor`):
The outputs of each layer of the final classification layers. The 0th index of this list is the
combining module's output
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
combined_feats = self.tabular_combiner(pooled_output,
cat_feats,
numerical_feats)
loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,
self.tabular_classifier,
labels,
self.num_labels,
class_weights)
return loss, logits, classifier_layer_outputs
class RobertaWithTabular(RobertaForSequenceClassification):
"""
Roberta Model transformer with a sequence classification/regression head as well as
a TabularFeatCombiner module to combine categorical and numerical features
with the Roberta pooled output
Parameters:
hf_model_config (:class:`~transformers.RobertaConfig`):
Model configuration class with all the parameters of the model.
This object must also have a tabular_config member variable that is a
:obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`
"""
def __init__(self, hf_model_config):
super().__init__(hf_model_config)
tabular_config = hf_model_config.tabular_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
tabular_config.text_feat_dim = hf_model_config.hidden_size
tabular_config.hidden_dropout_prob = hf_model_config.hidden_dropout_prob
self.tabular_combiner = TabularFeatCombiner(tabular_config)
self.num_labels = tabular_config.num_labels
combined_feat_dim = self.tabular_combiner.final_out_dim
self.dropout = nn.Dropout(hf_model_config.hidden_dropout_prob)
if tabular_config.use_simple_classifier:
self.tabular_classifier = nn.Linear(combined_feat_dim,
tabular_config.num_labels)
else:
dims = calc_mlp_dims(combined_feat_dim,
division=tabular_config.mlp_division,
output_dim=tabular_config.num_labels)
self.tabular_classifier = MLP(combined_feat_dim,
tabular_config.num_labels,
num_hidden_lyr=len(dims),
dropout_prob=tabular_config.mlp_dropout,
hidden_channels=dims,
bn=True)
@add_start_docstrings(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
class_weights=None,
cat_feats=None,
numerical_feats=None
):
r"""
class_weights (:obj:`torch.FloatTensor` of shape :obj:`(tabular_config.num_labels,)`, `optional`, defaults to :obj:`None`):
Class weights to be used for cross entropy loss function for classification task
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`tabular_config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`tabular_config.num_labels > 1` a classification loss is computed (Cross-Entropy).
cat_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.cat_feat_dim)`, `optional`, defaults to :obj:`None`):
Categorical features to be passed in to the TabularFeatCombiner
numerical_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.numerical_feat_dim)`, `optional`, defaults to :obj:`None`):
Numerical features to be passed in to the TabularFeatCombiner
Returns:
:obj:`tuple` comprising various elements depending on configuration and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if tabular_config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.num_labels)`):
Classification (or regression if tabular_config.num_labels==1) scores (before SoftMax).
classifier_layer_outputs(:obj:`list` of :obj:`torch.FloatTensor`):
The outputs of each layer of the final classification layers. The 0th index of this list is the
combining module's output
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
text_feats = sequence_output[:, 0, :]
text_feats = self.dropout(text_feats)
print('Sequence Outputs Shape')
print(sequence_output.shape)
print('Text Feats Shape')
print(text_feats.shape)
print('Cat Feats Shape')
print(cat_feats.shape)
combined_feats = self.tabular_combiner(text_feats,
cat_feats,
numerical_feats)
loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,
self.tabular_classifier,
labels,
self.num_labels,
class_weights)
return loss, logits, classifier_layer_outputs
class XLMRobertaWithTabular(RobertaWithTabular):
"""
This class overrides :class:`~RobertaWithTabular`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
class DistilBertWithTabular(DistilBertForSequenceClassification):
"""
DistilBert Model transformer with a sequence classification/regression head as well as
a TabularFeatCombiner module to combine categorical and numerical features
with the Roberta pooled output
Parameters:
hf_model_config (:class:`~transformers.DistilBertConfig`):
Model configuration class with all the parameters of the model.
This object must also have a tabular_config member variable that is a
:obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`
"""
def __init__(self, hf_model_config):
super().__init__(hf_model_config)
tabular_config = hf_model_config.tabular_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
tabular_config.text_feat_dim = hf_model_config.hidden_size
tabular_config.hidden_dropout_prob = hf_model_config.seq_classif_dropout
self.tabular_combiner = TabularFeatCombiner(tabular_config)
self.num_labels = tabular_config.num_labels
combined_feat_dim = self.tabular_combiner.final_out_dim
if tabular_config.use_simple_classifier:
self.tabular_classifier = nn.Linear(combined_feat_dim,
tabular_config.num_labels)
else:
dims = calc_mlp_dims(combined_feat_dim,
division=tabular_config.mlp_division,
output_dim=tabular_config.num_labels)
self.tabular_classifier = MLP(combined_feat_dim,
tabular_config.num_labels,
num_hidden_lyr=len(dims),
dropout_prob=tabular_config.mlp_dropout,
hidden_channels=dims,
bn=True)
@add_start_docstrings(DISTILBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
class_weights=None,
cat_feats=None,
numerical_feats=None
):
r"""
class_weights (:obj:`torch.FloatTensor` of shape :obj:`(tabular_config.num_labels,)`,`optional`, defaults to :obj:`None`):
Class weights to be used for cross entropy loss function for classification task
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`tabular_config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`tabular_config.num_labels > 1` a classification loss is computed (Cross-Entropy).
cat_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.cat_feat_dim)`,`optional`, defaults to :obj:`None`):
Categorical features to be passed in to the TabularFeatCombiner
numerical_feats (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.numerical_feat_dim)`,`optional`, defaults to :obj:`None`):
Numerical features to be passed in to the TabularFeatCombiner
Returns:
:obj:`tuple` comprising various elements depending on configuration and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if tabular_config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, tabular_config.num_labels)`):
Classification (or regression if tabular_config.num_labels==1) scores (before SoftMax).
classifier_layer_outputs(:obj:`list` of :obj:`torch.FloatTensor`):
The outputs of each layer of the final classification layers. The 0th index of this list is the
combining module's output
"""
distilbert_output = self.distilbert(
input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
text_feats = self.dropout(pooled_output)
combined_feats = self.tabular_combiner(text_feats,
cat_feats,
numerical_feats)
loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,
self.tabular_classifier,
labels,
self.num_labels,
class_weights)
return loss, logits, classifier_layer_outputs
class AlbertWithTabular(AlbertForSequenceClassification):
"""
ALBERT Model transformer with a sequence classification/regression head as well as
a TabularFeatCombiner module to combine categorical and numerical features
with the Roberta pooled output
Parameters:
hf_model_config (:class:`~transformers.AlbertConfig`):
Model configuration class with all the parameters of the model.
This object must also have a tabular_config member variable that is a
:obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`
"""
def __init__(self, hf_model_config):
super().__init__(hf_model_config)
tabular_config = hf_model_config.tabular_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
tabular_config.text_feat_dim = hf_model_config.hidden_size
tabular_config.hidden_dropout_prob = hf_model_config.hidden_dropout_prob
self.tabular_combiner = TabularFeatCombiner(tabular_config)
self.num_labels = tabular_config.num_labels
combined_feat_dim = self.tabular_combiner.final_out_dim
if tabular_config.use_simple_classifier:
self.tabular_classifier = nn.Linear(combined_feat_dim,
tabular_config.num_labels)
else:
dims = calc_mlp_dims(combined_feat_dim,
division=tabular_config.mlp_division,
output_dim=tabular_config.num_labels)
self.tabular_classifier = MLP(combined_feat_dim,
tabular_config.num_labels,
num_hidden_lyr=len(dims),
dropout_prob=tabular_config.mlp_dropout,
hidden_channels=dims,
bn=True)
@add_start_docstrings(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
class_weights=None,
cat_feats=None,
numerical_feats=None
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
combined_feats = self.tabular_combiner(pooled_output,
cat_feats,
numerical_feats)
loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,
self.tabular_classifier,
labels,
self.num_labels,
class_weights)
return loss, logits, classifier_layer_outputs
class XLNetWithTabular(XLNetForSequenceClassification):
"""
XLNet Model transformer with a sequence classification/regression head as well as
a TabularFeatCombiner module to combine categorical and numerical features
with the Roberta pooled output
Parameters:
hf_model_config (:class:`~transformers.XLNetConfig`):
Model configuration class with all the parameters of the model.
This object must also have a tabular_config member variable that is a
:obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`
"""
def __init__(self, hf_model_config):
super().__init__(hf_model_config)
tabular_config = hf_model_config.tabular_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
tabular_config.text_feat_dim = hf_model_config.hidden_size
self.tabular_combiner = TabularFeatCombiner(tabular_config)
self.num_labels = tabular_config.num_labels
combined_feat_dim = self.tabular_combiner.final_out_dim
if tabular_config.use_simple_classifier:
self.tabular_classifier = nn.Linear(combined_feat_dim,
tabular_config.num_labels)
else:
dims = calc_mlp_dims(combined_feat_dim,
division=tabular_config.mlp_division,
output_dim=tabular_config.num_labels)
self.tabular_classifier = MLP(combined_feat_dim,
tabular_config.num_labels,
num_hidden_lyr=len(dims),
dropout_prob=tabular_config.mlp_dropout,
hidden_channels=dims,
bn=True)
@add_start_docstrings(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
class_weights=None,
cat_feats=None,
numerical_feats=None
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
combined_feats = self.tabular_combiner(output,
cat_feats,
numerical_feats)
loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,
self.tabular_classifier,
labels,
self.num_labels,
class_weights)
return loss, logits, classifier_layer_outputs
class XLMWithTabular(XLMForSequenceClassification):
"""
XLM Model transformer with a sequence classification/regression head as well as
a TabularFeatCombiner module to combine categorical and numerical features
with the Roberta pooled output
Parameters:
hf_model_config (:class:`~transformers.XLMConfig`):
Model configuration class with all the parameters of the model.
This object must also have a tabular_config member variable that is a
:obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`
"""
def __init__(self, hf_model_config):
super().__init__(hf_model_config)
tabular_config = hf_model_config.tabular_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
tabular_config.text_feat_dim = hf_model_config.hidden_size
self.tabular_combiner = TabularFeatCombiner(tabular_config)
self.num_labels = tabular_config.num_labels
combined_feat_dim = self.tabular_combiner.final_out_dim
if tabular_config.use_simple_classifier:
self.tabular_classifier = nn.Linear(combined_feat_dim,
tabular_config.num_labels)
else:
dims = calc_mlp_dims(combined_feat_dim,
division=tabular_config.mlp_division,
output_dim=tabular_config.num_labels)
self.tabular_classifier = MLP(combined_feat_dim,
tabular_config.num_labels,
num_hidden_lyr=len(dims),
dropout_prob=tabular_config.mlp_dropout,
hidden_channels=dims,
bn=True)
@ add_start_docstrings(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
class_weights=None,
cat_feats=None,
numerical_feats=None
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
combined_feats = self.tabular_combiner(output,
cat_feats,
numerical_feats)
loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,
self.tabular_classifier,
labels,
self.num_labels,
class_weights)
return loss, logits, classifier_layer_outputs
class LongformerWithTabular(LongformerForSequenceClassification):
"""
Longformer Model With Sequence Classification Head
"""
def __init__(self, hf_model_config, embedding_weights=None):
super().__init__(hf_model_config)
tabular_config = hf_model_config.tabular_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
tabular_config.text_feat_dim = hf_model_config.hidden_size
tabular_config.hidden_dropout_prob = hf_model_config.hidden_dropout_prob
self.tabular_combiner = TabularFeatCombiner(tabular_config)
self.num_labels = tabular_config.num_labels
combined_feat_dim = self.tabular_combiner.final_out_dim
self.dropout = nn.Dropout(hf_model_config.hidden_dropout_prob)
if tabular_config.use_simple_classifier:
self.tabular_classifier = nn.Linear(combined_feat_dim,
tabular_config.num_labels)
else:
dims = calc_mlp_dims(combined_feat_dim,
division=tabular_config.mlp_division,
output_dim=tabular_config.num_labels)
self.tabular_classifier = MLP(combined_feat_dim,
tabular_config.num_labels,
num_hidden_lyr=len(dims),
dropout_prob=tabular_config.mlp_dropout,
hidden_channels=dims,
bn=True)
# load embeddings
self.embedding_layer = nn.Embedding.from_pretrained(torch.from_numpy(embedding_weights).float(), freeze=True)
# self.embedding_layer = nn.Embedding()
@add_start_docstrings(LONGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
class_weights=None,
cat_feats=None,
numerical_feats=None,
answer_tokens=None,
key_tokens=None,
answer_mask=None,
key_mask=None
):
if global_attention_mask is None:
print("Initializing global attention on CLS token...")
global_attention_mask = torch.zeros_like(input_ids)
# global attention on cls token
global_attention_mask[:, 0] = 1
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
# head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
text_feats = sequence_output[:, 0, :]
text_feats = self.dropout(text_feats)
# print('Sequence Outputs Shape')
# print(sequence_output.shape)
# print('Text Feats Shape')
# print(text_feats.shape)
# print('Cat Feats Shape')
# print(cat_feats.shape)
combined_feats = self.tabular_combiner(text_feats,
cat_feats,
numerical_feats,
keyword_feats)
ans_emb = self.embedding_layer(answer_tokens)
ans_mask_emb = self.embedding_layer(answer_mask)
keys_emb = self.embedding_layer(key_tokens)
keys_mask_emb = self.embedding_layer(key_mask)
att_layer = KeyAttention(
name='attention',
op='dot',
seed=0,
emb_dim=300,
word_att_pool='mean',
merge_ans_key='concat',
beta=False
)
for i in range(key_num):
t_k = LambdaLayer(lambda x: x[:, i], name='key_%d' % i)(keys_emb)
t_k_m = LambdaLayer(lambda x: x[:, i], name='ans_%d' % i)(key_masks)
f, *att_rtn = att_layer([ans_emb, ans_mask, t_k, t_k_m])
fea_att_list.append(f)
for i_a_r, a_r in enumerate(att_rtn):
attentions[att_rtn_keys[i_a_r]].append(a_r)
# do something with this- represents keyword attention
fea_rubric = torch.cat(fea_att_list)
loss, logits, classifier_layer_outputs = hf_loss_func(combined_feats,
self.tabular_classifier,
labels,
self.num_labels,
class_weights)
return loss, logits, classifier_layer_outputs
| 48.570487
| 152
| 0.606999
|
55eccd8de3593950e9094d97a36fd2d8aa7100e4
| 1,874
|
py
|
Python
|
iree/compiler/API/python/test/transforms/ireec/compile_sample_module.py
|
dcaballe/iree
|
e73f0505831b5c29ed5d97537472b6e2068eed7f
|
[
"Apache-2.0"
] | null | null | null |
iree/compiler/API/python/test/transforms/ireec/compile_sample_module.py
|
dcaballe/iree
|
e73f0505831b5c29ed5d97537472b6e2068eed7f
|
[
"Apache-2.0"
] | null | null | null |
iree/compiler/API/python/test/transforms/ireec/compile_sample_module.py
|
dcaballe/iree
|
e73f0505831b5c29ed5d97537472b6e2068eed7f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import io
import subprocess
from iree.compiler import ir
from iree.compiler import passmanager
from iree.compiler.transforms import ireec
# The compiler re-exports API access to a number of dialects. If one of these
# fails to import, it indicates a build issue.
from iree.compiler.dialects import arith
from iree.compiler.dialects import chlo
from iree.compiler.dialects import mhlo
from iree.compiler.dialects import iree_input
from iree.compiler.dialects import builtin
from iree.compiler.dialects import linalg
from iree.compiler.dialects import math
from iree.compiler.dialects import memref
from iree.compiler.dialects import shape
from iree.compiler.dialects import tensor
from iree.compiler.dialects import tosa
from iree.compiler.dialects import vector
# Test the compiler API.
with ir.Context() as ctx:
ireec.register_all_dialects(ctx)
input_module = ir.Module.parse(r"""
builtin.module {
func.func @fabs(%arg0: tensor<1x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4x4xf32> {
%0 = chlo.broadcast_add %arg0, %arg1 : (tensor<1x4xf32>, tensor<4x1xf32>) -> tensor<4x4xf32>
%1 = "mhlo.abs"(%0) : (tensor<4x4xf32>) -> tensor<4x4xf32>
return %1 : tensor<4x4xf32>
}
}
""")
options = ireec.CompilerOptions("--iree-hal-target-backends=cpu")
print(options)
pm = passmanager.PassManager()
ireec.build_mhlo_import_pass_pipeline(pm)
ireec.build_iree_vm_pass_pipeline(options, pm)
pm.run(input_module)
print(input_module)
bytecode_io = io.BytesIO()
ireec.translate_module_to_vm_bytecode(options, input_module, bytecode_io)
print(f"Bytecode module len = {len(bytecode_io.getbuffer())}")
| 34.703704
| 100
| 0.759872
|
b035632a13e8e2dbf500075afcc2782555a2d6e8
| 9,276
|
py
|
Python
|
electrum_ltc/gui/qt/paytoedit.py
|
jakesum/electrum-ltc
|
8bc205e086210e6ec40dbc0b8f46c9fc74c47121
|
[
"MIT"
] | null | null | null |
electrum_ltc/gui/qt/paytoedit.py
|
jakesum/electrum-ltc
|
8bc205e086210e6ec40dbc0b8f46c9fc74c47121
|
[
"MIT"
] | null | null | null |
electrum_ltc/gui/qt/paytoedit.py
|
jakesum/electrum-ltc
|
8bc205e086210e6ec40dbc0b8f46c9fc74c47121
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from decimal import Decimal
from typing import NamedTuple, Sequence, Optional, List
from PyQt5.QtGui import QFontMetrics
from electrum_ltc import bitcoin
from electrum_ltc.util import bfh
from electrum_ltc.transaction import push_script, PartialTxOutput
from electrum_ltc.bitcoin import opcodes
from electrum_ltc.logging import Logger
from electrum_ltc.lnaddr import LnDecodeException
from .qrtextedit import ScanQRTextEdit
from .completion_text_edit import CompletionTextEdit
from . import util
RE_ALIAS = r'(.*?)\s*\<([0-9A-Za-z]{1,})\>'
frozen_style = "QWidget {border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToLineError(NamedTuple):
idx: int # index of line
line_content: str
exc: Exception
class PayToEdit(CompletionTextEdit, ScanQRTextEdit, Logger):
def __init__(self, win):
CompletionTextEdit.__init__(self)
ScanQRTextEdit.__init__(self)
Logger.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = [] # type: List[PartialTxOutput]
self.errors = [] # type: Sequence[PayToLineError]
self.is_pr = False
self.is_alias = False
self.update_size()
self.payto_scriptpubkey = None # type: Optional[bytes]
self.lightning_invoice = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.ColorScheme.GREEN.as_stylesheet(True))
def setExpired(self):
self.setStyleSheet(util.ColorScheme.RED.as_stylesheet(True))
def parse_address_and_amount(self, line) -> PartialTxOutput:
x, y = line.split(',')
scriptpubkey = self.parse_output(x)
amount = self.parse_amount(y)
return PartialTxOutput(scriptpubkey=scriptpubkey, value=amount)
def parse_output(self, x) -> bytes:
try:
address = self.parse_address(x)
return bfh(bitcoin.address_to_script(address))
except:
script = self.parse_script(x)
return bfh(script)
def parse_script(self, x):
script = ''
for word in x.split():
if word[0:3] == 'OP_':
opcode_int = opcodes[word]
assert opcode_int < 256 # opcode is single-byte
script += bitcoin.int_to_hex(opcode_int)
else:
bfh(word) # to test it is hex data
script += push_script(word)
return script
def parse_amount(self, x):
if x.strip() == '!':
return '!'
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def parse_address(self, line):
r = line.strip()
m = re.match('^'+RE_ALIAS+'$', r)
address = str(m.group(2) if m else r)
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = [i for i in self.lines() if i]
outputs = [] # type: List[PartialTxOutput]
total = 0
self.payto_scriptpubkey = None
self.lightning_invoice = None
if len(lines) == 1:
data = lines[0]
if data.startswith("litecoin:"):
self.win.pay_to_URI(data)
return
lower = data.lower()
if lower.startswith("lightning:ln"):
lower = lower[10:]
if lower.startswith("ln"):
try:
self.win.parse_lightning_invoice(lower)
except LnDecodeException as e:
self.errors.append(PayToLineError(idx=0, line_content=data, exc=e))
else:
self.lightning_invoice = lower
return
try:
self.payto_scriptpubkey = self.parse_output(data)
except:
pass
if self.payto_scriptpubkey:
self.win.set_onchain(True)
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
output = self.parse_address_and_amount(line)
except Exception as e:
self.errors.append(PayToLineError(idx=i, line_content=line.strip(), exc=e))
continue
outputs.append(output)
if output.value == '!':
is_max = True
else:
total += output.value
if outputs:
self.win.set_onchain(True)
self.win.max_button.setChecked(is_max)
self.outputs = outputs
self.payto_scriptpubkey = None
if self.win.max_button.isChecked():
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self) -> Sequence[PayToLineError]:
return self.errors
def get_destination_scriptpubkey(self) -> Optional[bytes]:
return self.payto_scriptpubkey
def get_outputs(self, is_max):
if self.payto_scriptpubkey:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
self.outputs = [PartialTxOutput(scriptpubkey=self.payto_scriptpubkey, value=amount)]
return self.outputs[:]
def lines(self):
return self.toPlainText().split('\n')
def is_multiline(self):
return len(self.lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
lineHeight = QFontMetrics(self.document().defaultFont()).height()
docHeight = self.document().size().height()
h = docHeight * lineHeight + 11
h = min(max(h, self.heightMin), self.heightMax)
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data.startswith("litecoin:"):
self.win.pay_to_URI(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self.is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
key = key.strip() # strip whitespaces
if key == self.previous_payto:
return
self.previous_payto = key
if not (('.' in key) and (not '<' in key) and (not ' ' in key)):
return
parts = key.split(sep=',') # assuming single line
if parts and len(parts) > 0 and bitcoin.is_address(parts[0]):
return
try:
data = self.win.contacts.resolve(key)
except Exception as e:
self.logger.info(f'error resolving address/alias: {repr(e)}')
return
if not data:
return
self.is_alias = True
address = data.get('address')
name = data.get('name')
new_url = key + ' <' + address + '>'
self.setText(new_url)
self.previous_payto = new_url
#if self.win.config.get('openalias_autoadd') == 'checked':
self.win.contacts[key] = ('openalias', name)
self.win.contact_list.update()
self.setFrozen(True)
if data.get('type') == 'openalias':
self.validated = data.get('validated')
if self.validated:
self.setGreen()
else:
self.setExpired()
else:
self.validated = None
| 33.730909
| 96
| 0.604355
|
7c00f00f0af44752aa86f0af3bebb2e82d7b0717
| 4,181
|
py
|
Python
|
examples/Python/ReconstructionSystem/sensors/azure_kinect_recorder.py
|
yuki-inaho/Open3D
|
cbbee4e19a45551ada223f491e667f1868115ead
|
[
"MIT"
] | 113
|
2018-11-12T03:32:52.000Z
|
2022-03-29T13:58:54.000Z
|
examples/Python/ReconstructionSystem/sensors/azure_kinect_recorder.py
|
llp45135/Open3D
|
ff7003d542c4fcf88a2d9e7fe08508b3e52dc702
|
[
"MIT"
] | 3
|
2018-10-19T12:09:57.000Z
|
2020-04-22T11:55:54.000Z
|
examples/Python/ReconstructionSystem/sensors/azure_kinect_recorder.py
|
llp45135/Open3D
|
ff7003d542c4fcf88a2d9e7fe08508b3e52dc702
|
[
"MIT"
] | 27
|
2018-10-16T20:01:18.000Z
|
2021-07-26T08:02:20.000Z
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/ReconstructionSystem/sensors/azure_kinect_recorder.py
import argparse
import datetime
import open3d as o3d
class RecorderWithCallback:
def __init__(self, config, device, filename, align_depth_to_color):
# Global flags
self.flag_exit = False
self.flag_record = False
self.filename = filename
self.align_depth_to_color = align_depth_to_color
self.recorder = o3d.io.AzureKinectRecorder(config, device)
if not self.recorder.init_sensor():
raise RuntimeError('Failed to connect to sensor')
def escape_callback(self, vis):
self.flag_exit = True
if self.recorder.is_record_created():
print('Recording finished.')
else:
print('Nothing has been recorded.')
return False
def space_callback(self, vis):
if self.flag_record:
print('Recording paused. '
'Press [Space] to continue. '
'Press [ESC] to save and exit.')
self.flag_record = False
elif not self.recorder.is_record_created():
if self.recorder.open_record(self.filename):
print('Recording started. '
'Press [SPACE] to pause. '
'Press [ESC] to save and exit.')
self.flag_record = True
else:
print('Recording resumed, video may be discontinuous. '
'Press [SPACE] to pause. '
'Press [ESC] to save and exit.')
self.flag_record = True
return False
def run(self):
glfw_key_escape = 256
glfw_key_space = 32
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.register_key_callback(glfw_key_escape, self.escape_callback)
vis.register_key_callback(glfw_key_space, self.space_callback)
vis.create_window('recorder', 1920, 540)
print("Recorder initialized. Press [SPACE] to start. "
"Press [ESC] to save and exit.")
vis_geometry_added = False
while not self.flag_exit:
rgbd = self.recorder.record_frame(self.flag_record,
self.align_depth_to_color)
if rgbd is None:
continue
if not vis_geometry_added:
vis.add_geometry(rgbd)
vis_geometry_added = True
vis.update_geometry(rgbd)
vis.poll_events()
vis.update_renderer()
self.recorder.close_record()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Azure kinect mkv recorder.')
parser.add_argument('--config', type=str, help='input json kinect config')
parser.add_argument('--output', type=str, help='output mkv filename')
parser.add_argument('--list',
action='store_true',
help='list available azure kinect sensors')
parser.add_argument('--device',
type=int,
default=0,
help='input kinect device id')
parser.add_argument('-a',
'--align_depth_to_color',
action='store_true',
help='enable align depth image to color')
args = parser.parse_args()
if args.list:
o3d.io.AzureKinectSensor.list_devices()
exit()
if args.config is not None:
config = o3d.io.read_azure_kinect_sensor_config(args.config)
else:
config = o3d.io.AzureKinectSensorConfig()
if args.output is not None:
filename = args.output
else:
filename = '{date:%Y-%m-%d-%H-%M-%S}.mkv'.format(
date=datetime.datetime.now())
print('Prepare writing to {}'.format(filename))
device = args.device
if device < 0 or device > 255:
print('Unsupported device id, fall back to 0')
device = 0
r = RecorderWithCallback(config, device, filename,
args.align_depth_to_color)
r.run()
| 33.448
| 78
| 0.586463
|
ea54fce9273d175db856d4ad57f983d247c41de8
| 4,135
|
py
|
Python
|
pde/pdes/wave.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | 163
|
2020-03-30T09:26:32.000Z
|
2022-03-31T12:22:18.000Z
|
pde/pdes/wave.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | 127
|
2020-03-31T15:33:15.000Z
|
2022-03-30T19:27:47.000Z
|
pde/pdes/wave.py
|
tefavidal/py-pde
|
427be3f2f4b096775f46111cd5a5d05af50e94bc
|
[
"MIT"
] | 37
|
2020-03-10T18:54:22.000Z
|
2022-03-29T14:45:40.000Z
|
"""
A simple diffusion equation
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from typing import Callable, Dict
import numpy as np
from ..fields import FieldCollection, ScalarField
from ..grids.boundaries.axes import BoundariesData
from ..tools.docstrings import fill_in_docstring
from ..tools.numba import jit, nb
from .base import PDEBase, expr_prod
class WavePDE(PDEBase):
r""" A simple wave equation
The mathematical definition,
.. math::
\partial_t^2 u = c^2 \nabla^2 u
is implemented as two first-order equations:
.. math::
\partial_t u &= v \\
\partial_t v &= c^2 \nabla^2 u
where :math:`u` is the density field that and :math:`c` sets the wave speed.
"""
explicit_time_dependence = False
@fill_in_docstring
def __init__(self, speed: float = 1, bc: BoundariesData = "natural"):
"""
Args:
speed (float):
The speed :math:`c` of the wave
bc:
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
"""
super().__init__()
self.speed = speed
self.bc = bc
def get_initial_condition(self, u: ScalarField, v: ScalarField = None):
"""create a suitable initial condition
Args:
u (:class:`~pde.fields.ScalarField`):
The initial density on the grid
v (:class:`~pde.fields.ScalarField`, optional):
The initial rate of change. This is assumed to be zero if the
value is omitted.
Returns:
:class:`~pde.fields.FieldCollection`:
The combined fields u and v, suitable for the simulation
"""
if v is None:
v = ScalarField(u.grid)
return FieldCollection([u, v])
@property
def expressions(self) -> Dict[str, str]:
"""dict: the expressions of the right hand side of this PDE"""
return {"u": "v", "v": expr_prod(self.speed ** 2, "laplace(u)")}
def evolution_rate( # type: ignore
self,
state: FieldCollection,
t: float = 0,
) -> FieldCollection:
"""evaluate the right hand side of the PDE
Args:
state (:class:`~pde.fields.FieldCollection`):
The fields :math:`u` and :math:`v` distribution
t (float):
The current time point
Returns:
:class:`~pde.fields.FieldCollection`:
Scalar field describing the evolution rate of the PDE
"""
assert isinstance(state, FieldCollection), "`state` must be FieldCollection"
assert len(state) == 2, "`state` must contain two fields"
u, v = state
u_t = v.copy()
v_t = self.speed ** 2 * u.laplace(self.bc) # type: ignore
return FieldCollection([u_t, v_t])
def _make_pde_rhs_numba( # type: ignore
self, state: FieldCollection
) -> Callable[[np.ndarray, float], np.ndarray]:
"""create a compiled function evaluating the right hand side of the PDE
Args:
state (:class:`~pde.fields.FieldCollection`):
An example for the state defining the grid and data types
Returns:
A function with signature `(state_data, t)`, which can be called
with an instance of :class:`~numpy.ndarray` of the state data and
the time to obtained an instance of :class:`~numpy.ndarray` giving
the evolution rate.
"""
arr_type = nb.typeof(state.data)
signature = arr_type(arr_type, nb.double)
speed2 = self.speed ** 2
laplace = state.grid.make_operator("laplace", bc=self.bc)
@jit(signature)
def pde_rhs(state_data: np.ndarray, t: float):
"""compiled helper function evaluating right hand side"""
rate = np.empty_like(state_data)
rate[0] = state_data[1]
rate[1][:] = laplace(state_data[0])
rate[1] *= speed2
return rate
return pde_rhs # type: ignore
| 31.564885
| 84
| 0.579686
|
8ed664ce2ce7703fc7d24abafd23b4083023d095
| 3,734
|
py
|
Python
|
coverage-4.2-py2.7-linux-armv7l.egg/coverage/debug.py
|
NextGenTechBar/twandora
|
f626717a5580f82250bbe66d4ebc357e0882382c
|
[
"MIT"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
coverage-4.2-py2.7-linux-armv7l.egg/coverage/debug.py
|
NextGenTechBar/twandora
|
f626717a5580f82250bbe66d4ebc357e0882382c
|
[
"MIT"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
coverage-4.2-py2.7-linux-armv7l.egg/coverage/debug.py
|
NextGenTechBar/twandora
|
f626717a5580f82250bbe66d4ebc357e0882382c
|
[
"MIT"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Control of and utilities for debugging."""
import inspect
import os
import sys
from coverage.misc import isolate_module
os = isolate_module(os)
# When debugging, it can be helpful to force some options, especially when
# debugging the configuration mechanisms you usually use to control debugging!
# This is a list of forced debugging options.
FORCED_DEBUG = []
# A hack for debugging testing in sub-processes.
_TEST_NAME_FILE = "" # "/tmp/covtest.txt"
class DebugControl(object):
"""Control and output for debugging."""
def __init__(self, options, output):
"""Configure the options and output file for debugging."""
self.options = options
self.output = output
def __repr__(self):
return "<DebugControl options=%r output=%r>" % (self.options, self.output)
def should(self, option):
"""Decide whether to output debug information in category `option`."""
return (option in self.options or option in FORCED_DEBUG)
def write(self, msg):
"""Write a line of debug output."""
if self.should('pid'):
msg = "pid %5d: %s" % (os.getpid(), msg)
self.output.write(msg+"\n")
if self.should('callers'):
dump_stack_frames(out=self.output)
self.output.flush()
def write_formatted_info(self, header, info):
"""Write a sequence of (label,data) pairs nicely."""
self.write(info_header(header))
for line in info_formatter(info):
self.write(" %s" % line)
def info_header(label):
"""Make a nice header string."""
return "--{0:-<60s}".format(" "+label+" ")
def info_formatter(info):
"""Produce a sequence of formatted lines from info.
`info` is a sequence of pairs (label, data). The produced lines are
nicely formatted, ready to print.
"""
info = list(info)
if not info:
return
label_len = max(len(l) for l, _d in info)
for label, data in info:
if data == []:
data = "-none-"
if isinstance(data, (list, set, tuple)):
prefix = "%*s:" % (label_len, label)
for e in data:
yield "%*s %s" % (label_len+1, prefix, e)
prefix = ""
else:
yield "%*s: %s" % (label_len, label, data)
def short_stack(limit=None): # pragma: debugging
"""Return a string summarizing the call stack.
The string is multi-line, with one line per stack frame. Each line shows
the function name, the file name, and the line number:
...
start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
...
`limit` is the number of frames to include, defaulting to all of them.
"""
stack = inspect.stack()[limit:0:-1]
return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack)
def dump_stack_frames(limit=None, out=None): # pragma: debugging
"""Print a summary of the stack to stdout, or some place else."""
out = out or sys.stdout
out.write(short_stack(limit=limit))
out.write("\n")
def log(msg, stack=False): # pragma: debugging
"""Write a log message as forcefully as possible."""
with open("/tmp/covlog.txt", "a") as f:
f.write("{pid}: {msg}\n".format(pid=os.getpid(), msg=msg))
if stack:
dump_stack_frames(out=f)
| 32.469565
| 82
| 0.614355
|
cf3ef70d19c131dde23a104072bf8395cbead6a2
| 581
|
py
|
Python
|
nomadgram/images/migrations/0003_auto_20190312_0234.py
|
hyeseonii/nomadgram
|
5aa3d4bad038e59f95e618168b1ea01af99eff15
|
[
"MIT"
] | null | null | null |
nomadgram/images/migrations/0003_auto_20190312_0234.py
|
hyeseonii/nomadgram
|
5aa3d4bad038e59f95e618168b1ea01af99eff15
|
[
"MIT"
] | 5
|
2020-06-05T20:29:02.000Z
|
2021-09-08T00:58:35.000Z
|
nomadgram/images/migrations/0003_auto_20190312_0234.py
|
hyeseonii/nomadgram
|
5aa3d4bad038e59f95e618168b1ea01af99eff15
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.13 on 2019-03-11 17:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20190311_2224'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='Image',
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='images.Image'),
),
]
| 24.208333
| 111
| 0.605852
|
b421d66c50fe97398de43ba410e1e1b500ed5914
| 4,340
|
py
|
Python
|
DeepSpeech/examples/vad_transcriber/audioTranscript_cmd.py
|
wingerse/audio_adversarial_examples
|
f747efea967e64351e9d5dc1b36fa1ca8d52b066
|
[
"BSD-2-Clause"
] | null | null | null |
DeepSpeech/examples/vad_transcriber/audioTranscript_cmd.py
|
wingerse/audio_adversarial_examples
|
f747efea967e64351e9d5dc1b36fa1ca8d52b066
|
[
"BSD-2-Clause"
] | null | null | null |
DeepSpeech/examples/vad_transcriber/audioTranscript_cmd.py
|
wingerse/audio_adversarial_examples
|
f747efea967e64351e9d5dc1b36fa1ca8d52b066
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import os
import logging
import argparse
import subprocess
import shlex
import numpy as np
import wavTranscriber
# Debug helpers
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
def main(args):
parser = argparse.ArgumentParser(description='Transcribe long audio files using webRTC VAD or use the streaming interface')
parser.add_argument('--aggressive', type=int, choices=range(4), required=False,
help='Determines how aggressive filtering out non-speech is. (Interger between 0-3)')
parser.add_argument('--audio', required=False,
help='Path to the audio file to run (WAV format)')
parser.add_argument('--model', required=True,
help='Path to directory that contains all model files (output_graph, lm, trie and alphabet)')
parser.add_argument('--stream', required=False, action='store_true',
help='To use deepspeech streaming interface')
args = parser.parse_args()
if args.stream is True and len(sys.argv[1:]) == 3:
print("Opening mic for streaming")
elif args.audio is not None and len(sys.argv[1:]) == 6:
logging.debug("Transcribing audio file @ %s" % args.audio)
else:
parser.print_help()
parser.exit()
# Point to a path containing the pre-trained models & resolve ~ if used
dirName = os.path.expanduser(args.model)
# Resolve all the paths of model files
output_graph, alphabet, lm, trie = wavTranscriber.resolve_models(dirName)
# Load output_graph, alpahbet, lm and trie
model_retval = wavTranscriber.load_model(output_graph, alphabet, lm, trie)
if args.audio is not None:
title_names = ['Filename', 'Duration(s)', 'Inference Time(s)', 'Model Load Time(s)', 'LM Load Time(s)']
print("\n%-30s %-20s %-20s %-20s %s" % (title_names[0], title_names[1], title_names[2], title_names[3], title_names[4]))
inference_time = 0.0
# Run VAD on the input file
waveFile = args.audio
segments, sample_rate, audio_length = wavTranscriber.vad_segment_generator(waveFile, args.aggressive)
f = open(waveFile.rstrip(".wav") + ".txt", 'w')
logging.debug("Saving Transcript @: %s" % waveFile.rstrip(".wav") + ".txt")
for i, segment in enumerate(segments):
# Run deepspeech on the chunk that just completed VAD
logging.debug("Processing chunk %002d" % (i,))
audio = np.frombuffer(segment, dtype=np.int16)
output = wavTranscriber.stt(model_retval[0], audio, sample_rate)
inference_time += output[1]
logging.debug("Transcript: %s" % output[0])
f.write(output[0] + " ")
# Summary of the files processed
f.close()
# Extract filename from the full file path
filename, ext = os.path.split(os.path.basename(waveFile))
logging.debug("************************************************************************************************************")
logging.debug("%-30s %-20s %-20s %-20s %s" % (title_names[0], title_names[1], title_names[2], title_names[3], title_names[4]))
logging.debug("%-30s %-20.3f %-20.3f %-20.3f %-0.3f" % (filename + ext, audio_length, inference_time, model_retval[1], model_retval[2]))
logging.debug("************************************************************************************************************")
print("%-30s %-20.3f %-20.3f %-20.3f %-0.3f" % (filename + ext, audio_length, inference_time, model_retval[1], model_retval[2]))
else:
sctx = model_retval[0].setupStream()
subproc = subprocess.Popen(shlex.split('rec -q -V0 -e signed -L -c 1 -b 16 -r 16k -t raw - gain -2'),
stdout=subprocess.PIPE,
bufsize=0)
print('You can start speaking now. Press Control-C to stop recording.')
try:
while True:
data = subproc.stdout.read(512)
model_retval[0].feedAudioContent(sctx, np.frombuffer(data, np.int16))
except KeyboardInterrupt:
print('Transcription: ', model_retval[0].finishStream(sctx))
subproc.terminate()
subproc.wait()
if __name__ == '__main__':
main(sys.argv[1:])
| 46.666667
| 144
| 0.593779
|
79af22d62e19765d358a42bdbe956e8e584d7f61
| 2,370
|
py
|
Python
|
Bayesian/bayesian_linear_regression.py
|
perathambkk/ml-techniques
|
5d6fd122322342c0b47dc65d09c4425fd73f2ea9
|
[
"MIT"
] | null | null | null |
Bayesian/bayesian_linear_regression.py
|
perathambkk/ml-techniques
|
5d6fd122322342c0b47dc65d09c4425fd73f2ea9
|
[
"MIT"
] | null | null | null |
Bayesian/bayesian_linear_regression.py
|
perathambkk/ml-techniques
|
5d6fd122322342c0b47dc65d09c4425fd73f2ea9
|
[
"MIT"
] | null | null | null |
"""
Author: Peratham Wiriyathammabhum
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
eps = np.finfo(float).eps
class BayesLinReg(object):
"""
Bayesian linear regression.
See: Pattern Recognition and Machine Learning by Christopher Bishop ch.3.
https://www.microsoft.com/en-us/research/people/cmbishop/prml-book/
Blogs: https://maxhalford.github.io/blog/bayesian-linear-regression/
"""
def __init__(self, num_feas, alpha, beta):
self.num_feas = num_feas
self.alpha = alpha
self.beta = beta
self.mean = np.zeros((num_feas,1))
self.invcov_mat = np.identity(num_feas) / alpha
return
def update(self, x, y):
"""
eq 3.50-3.51 in Bishop
"""
invcov_mat_n = self.invcov_mat + self.beta * np.outer(x, x)
mean_n = np.matmul(np.linalg.inv(invcov_mat_n), (np.matmul(self.invcov_mat, self.mean) + self.beta* np.expand_dims(np.dot(y, x), axis=1)))
assert mean_n.shape == self.mean.shape
self.mean = mean_n
self.invcov_mat = invcov_mat_n
return self
def predict(self, x):
"""
eq 3.58-3.59 in Bishop
"""
pred_mean = np.dot(x, self.mean)
sigma_squared_x = 1./self.beta + np.dot(np.dot(x, np.linalg.inv(self.invcov_mat)), x.T)
return stats.norm(loc=pred_mean.T, scale=sigma_squared_x ** .5)
@property
def weights_dist(self):
return stats.multivariate_normal(mean=self.mean, cov=np.linalg.inv(self.invcov_mat))
def main(opts):
from sklearn import metrics
alpha = opts['alpha']
beta = opts['beta']
from sklearn import datasets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# whitening
X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
model = BayesLinReg(num_feas=X.shape[1], alpha=alpha, beta=beta)
y_pred = np.empty(len(y))
for i, (xi, yi) in enumerate(zip(X, y)): # one at a time
y_pred[i] = model.predict(xi).mean()
model.update(xi, yi)
print(metrics.mean_absolute_error(y, y_pred))
# plot
# input("Press Enter to continue...")
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='run bayesian linear regression.')
parser.add_argument('--alpha', dest='alpha',
help='alpha',
default=.3, type=float)
parser.add_argument('--beta', dest='beta',
help='beta',
default=1, type=float)
args = parser.parse_args()
opts = vars(args)
main(opts)
| 26.043956
| 140
| 0.694093
|
52f70fbe982b13a35ac2f1a8f13a5c4d790077b1
| 158
|
py
|
Python
|
k2mosaic/version.py
|
amcody/k2mosaic
|
79733ab86249825868d5d6f6362a98656b8482f0
|
[
"MIT"
] | 13
|
2015-12-17T02:47:11.000Z
|
2021-09-07T21:13:21.000Z
|
k2mosaic/version.py
|
amcody/k2mosaic
|
79733ab86249825868d5d6f6362a98656b8482f0
|
[
"MIT"
] | 12
|
2015-12-17T02:50:49.000Z
|
2019-01-24T19:31:15.000Z
|
k2mosaic/version.py
|
amcody/k2mosaic
|
79733ab86249825868d5d6f6362a98656b8482f0
|
[
"MIT"
] | 18
|
2016-05-20T06:41:06.000Z
|
2021-09-08T21:15:02.000Z
|
# It is important to store the version number in a separate file
# so that we can read it from setup.py without importing the package
__version__ = "2.1.dev"
| 39.5
| 68
| 0.765823
|
1f9968ab60d2e3497cf55536a9d02b6fb47eb340
| 1,472
|
py
|
Python
|
AddExcelData/AddExcelData.py
|
IndyMPO/IndyGeoprocessingTools
|
968f9befc37252e065e8d8085c0d10f17a871152
|
[
"Apache-2.0"
] | null | null | null |
AddExcelData/AddExcelData.py
|
IndyMPO/IndyGeoprocessingTools
|
968f9befc37252e065e8d8085c0d10f17a871152
|
[
"Apache-2.0"
] | 3
|
2016-08-30T16:10:20.000Z
|
2016-09-06T15:32:44.000Z
|
AddExcelData/AddExcelData.py
|
IndyMPO/IndyGeoprocessingTools
|
968f9befc37252e065e8d8085c0d10f17a871152
|
[
"Apache-2.0"
] | null | null | null |
#This script copyright 2017 Indianapolis Metropolitan Planning Organization
import arcpy
import pandas as pd
shapefile = arcpy.GetParameterAsText(0)
id_field = arcpy.GetParameterAsText(1)
excel_file = arcpy.GetParameterAsText(2)
sheet = arcpy.GetParameter(3)
#Map to determine each new field's data type based on the data frame's column's data type
dtype_map = {'int64': 'LONG',
'float32': 'FLOAT',
'float64': 'DOUBLE'}
#Read in data
data = pd.read_excel(excel_file, sheet, index_col = 0)
#Add fields based on columns of the table
new_fields = []
for col in data.columns:
#new_field = sheet + col
if len(col) > 10:
arcpy.AddMessage('WARNING: Truncating field {0} to {1}'.format(col, col[:10]))
new_field = col[:10]
else:
new_field = col
arcpy.AddMessage(new_field)
new_fields += [new_field]
if new_field not in [field.name for field in arcpy.ListFields(shapefile)]:
arcpy.AddField_management(shapefile, new_field, dtype_map[str(data[col].dtype)], field_is_nullable = True)
#Write values in data frame to attribute table
rows = arcpy.da.UpdateCursor(shapefile, field_names = [id_field] + new_fields)
for row in rows:
try:
for i in range(len(new_fields)):
row[i+1] = data[data.columns[i]][row[0]]
rows.updateRow(row)
except KeyError:
arcpy.AddMessage('WARNING: Data for "{}" is not present in the Excel file.'.format(row[0]))
del row
del rows
| 34.232558
| 114
| 0.688859
|
e33ddc20d66c5185a5fce266e23aaa2181291511
| 1,230
|
py
|
Python
|
jdcloud_sdk/services/vod/apis/DeleteSnapshotTemplateRequest.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
jdcloud_sdk/services/vod/apis/DeleteSnapshotTemplateRequest.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
jdcloud_sdk/services/vod/apis/DeleteSnapshotTemplateRequest.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DeleteSnapshotTemplateRequest(JDCloudRequest):
"""
删除截图模板
"""
def __init__(self, parameters, header=None, version="v1"):
super(DeleteSnapshotTemplateRequest, self).__init__(
'/snapshotTemplates/{templateId}', 'DELETE', header, version)
self.parameters = parameters
class DeleteSnapshotTemplateParameters(object):
def __init__(self, templateId, ):
"""
:param templateId: 模板ID
"""
self.templateId = templateId
| 29.285714
| 75
| 0.721138
|
8e23ee323791607793f35ce51f325dac47f68e6d
| 10,260
|
py
|
Python
|
plaso/engine/artifact_filters.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | 1
|
2020-12-04T10:26:34.000Z
|
2020-12-04T10:26:34.000Z
|
plaso/engine/artifact_filters.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | null | null | null |
plaso/engine/artifact_filters.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Helper to create filters based on forensic artifact definitions."""
from __future__ import unicode_literals
from artifacts import definitions as artifact_types
from dfwinreg import registry_searcher
from dfvfs.helpers import file_system_searcher
from plaso.engine import filters_helper
from plaso.engine import logger
from plaso.engine import path_helper
class ArtifactDefinitionsFiltersHelper(filters_helper.CollectionFiltersHelper):
"""Helper to create collection filters based on artifact definitions.
Builds collection filters from forensic artifact definitions.
For more information about Forensic Artifacts see:
https://github.com/ForensicArtifacts/artifacts/blob/master/docs/Artifacts%20definition%20format%20and%20style%20guide.asciidoc
Attributes:
file_system_artifact_names (set[str]): names of artifacts definitions that
generated file system find specifications.
registry_artifact_names (set[str]): names of artifacts definitions that
generated Windows Registry find specifications.
"""
_COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES = frozenset([
'HKEY_CURRENT_USER',
'HKEY_LOCAL_MACHINE\\SYSTEM',
'HKEY_LOCAL_MACHINE\\SOFTWARE',
'HKEY_LOCAL_MACHINE\\SAM',
'HKEY_LOCAL_MACHINE\\SECURITY',
'HKEY_USERS'])
def __init__(self, artifacts_registry, knowledge_base):
"""Initializes an artifact definitions filters helper.
Args:
artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifact
definitions registry.
knowledge_base (KnowledgeBase): contains information from the source
data needed for filtering.
"""
super(ArtifactDefinitionsFiltersHelper, self).__init__()
self._artifacts_registry = artifacts_registry
self._knowledge_base = knowledge_base
self.file_system_artifact_names = set()
self.registry_artifact_names = set()
def _BuildFindSpecsFromArtifact(self, definition, environment_variables):
"""Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications.
"""
find_specs = []
for source in definition.sources:
if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE:
for path_entry in set(source.paths):
specifications = self._BuildFindSpecsFromFileSourcePath(
path_entry, source.separator, environment_variables,
self._knowledge_base.user_accounts)
find_specs.extend(specifications)
self.file_system_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
for key_path in set(source.keys):
if ArtifactDefinitionsFiltersHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
# TODO: Handle Registry Values Once Supported in dfwinreg.
# https://github.com/log2timeline/dfwinreg/issues/98
# Use set-comprehension to create a set of the source key paths.
key_paths = {key_value['key'] for key_value in source.key_value_pairs}
key_paths_string = ', '.join(key_paths)
logger.warning((
'Windows Registry values are not supported, extracting keys: '
'"{0!s}"').format(key_paths_string))
for key_path in key_paths:
if ArtifactDefinitionsFiltersHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):
for name in source.names:
specifications = self._BuildFindSpecsFromGroupName(
name, environment_variables)
find_specs.extend(specifications)
else:
logger.warning(
'Unsupported artifact definition source type: "{0:s}"'.format(
source.type_indicator))
return find_specs
def _BuildFindSpecsFromGroupName(self, group_name, environment_variables):
"""Builds find specifications from a artifact group name.
Args:
group_name (str): artifact group name.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in file and registry
artifacts.
Returns:
list[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no
artifact with the given name can be retrieved.
"""
definition = self._artifacts_registry.GetDefinitionByName(group_name)
if not definition:
return None
return self._BuildFindSpecsFromArtifact(definition, environment_variables)
def _BuildFindSpecsFromRegistrySourceKey(self, key_path):
"""Build find specifications from a Windows Registry source type.
Args:
key_path (str): Windows Registry key path defined by the source.
Returns:
list[dfwinreg.FindSpec]: find specifications for the Windows Registry
source type.
"""
find_specs = []
for key_path_glob in path_helper.PathHelper.ExpandGlobStars(key_path, '\\'):
logger.debug('building find spec from key path glob: {0:s}'.format(
key_path_glob))
key_path_glob_upper = key_path_glob.upper()
if key_path_glob_upper.startswith(
'HKEY_LOCAL_MACHINE\\SYSTEM\\CURRENTCONTROLSET'):
# Rewrite CurrentControlSet to ControlSet* for Windows NT.
key_path_glob = 'HKEY_LOCAL_MACHINE\\System\\ControlSet*{0:s}'.format(
key_path_glob[43:])
elif key_path_glob_upper.startswith('HKEY_USERS\\%%USERS.SID%%'):
key_path_glob = 'HKEY_CURRENT_USER{0:s}'.format(key_path_glob[26:])
find_spec = registry_searcher.FindSpec(key_path_glob=key_path_glob)
find_specs.append(find_spec)
return find_specs
def _BuildFindSpecsFromFileSourcePath(
self, source_path, path_separator, environment_variables, user_accounts):
"""Builds find specifications from a file source type.
Args:
source_path (str): file system path defined by the source.
path_separator (str): file system path segment separator.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in key.
user_accounts (list[str]): identified user accounts stored in the
knowledge base.
Returns:
list[dfvfs.FindSpec]: find specifications for the file source type.
"""
find_specs = []
for path_glob in path_helper.PathHelper.ExpandGlobStars(
source_path, path_separator):
logger.debug('building find spec from path glob: {0:s}'.format(
path_glob))
for path in path_helper.PathHelper.ExpandUsersVariablePath(
path_glob, path_separator, user_accounts):
logger.debug('building find spec from path: {0:s}'.format(path))
if '%' in path:
path = path_helper.PathHelper.ExpandWindowsPath(
path, environment_variables)
logger.debug('building find spec from expanded path: {0:s}'.format(
path))
if not path.startswith(path_separator):
logger.warning((
'The path filter must be defined as an absolute path: '
'"{0:s}"').format(path))
continue
try:
find_spec = file_system_searcher.FindSpec(
case_sensitive=False, location_glob=path,
location_separator=path_separator)
except ValueError as exception:
logger.error((
'Unable to build find specification for path: "{0:s}" with '
'error: {1!s}').format(path, exception))
continue
find_specs.append(find_spec)
return find_specs
def BuildFindSpecs(self, artifact_filter_names, environment_variables=None):
"""Builds find specifications from artifact definitions.
Args:
artifact_filter_names (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
environment_variables (Optional[list[EnvironmentVariableArtifact]]):
environment variables.
"""
find_specs = []
for name in artifact_filter_names:
definition = self._artifacts_registry.GetDefinitionByName(name)
if not definition:
logger.debug('undefined artifact definition: {0:s}'.format(name))
continue
logger.debug('building find spec from artifact definition: {0:s}'.format(
name))
artifact_find_specs = self._BuildFindSpecsFromArtifact(
definition, environment_variables)
find_specs.extend(artifact_find_specs)
for find_spec in find_specs:
if isinstance(find_spec, file_system_searcher.FindSpec):
self.included_file_system_find_specs.append(find_spec)
elif isinstance(find_spec, registry_searcher.FindSpec):
self.registry_find_specs.append(find_spec)
else:
logger.warning('Unsupported find specification type: {0!s}'.format(
type(find_spec)))
@classmethod
def CheckKeyCompatibility(cls, key_path):
"""Checks if a Windows Registry key path is supported by dfWinReg.
Args:
key_path (str): path of the Windows Registry key.
Returns:
bool: True if key is compatible or False if not.
"""
key_path_upper = key_path.upper()
for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES:
if key_path_upper.startswith(key_path_prefix):
return True
logger.warning('Key path: "{0:s}" is currently not supported'.format(
key_path))
return False
| 38
| 128
| 0.705653
|
c29f37c95e9d53035a512fc38888bf216bf8a364
| 1,368
|
py
|
Python
|
tests/_test_utils.py
|
SimonBlanke/data-storage
|
65ff5b6115099d0dad86cf3472c4ebe66249eade
|
[
"MIT"
] | null | null | null |
tests/_test_utils.py
|
SimonBlanke/data-storage
|
65ff5b6115099d0dad86cf3472c4ebe66249eade
|
[
"MIT"
] | null | null | null |
tests/_test_utils.py
|
SimonBlanke/data-storage
|
65ff5b6115099d0dad86cf3472c4ebe66249eade
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import Counter
def search_data_equal(search_data1, search_data2, assert_order=True):
col1 = list(search_data1.columns)
col2 = list(search_data2.columns)
print("\n col1 \n", col1, "\n", type(col1))
print("\n col2 \n", col2, "\n", type(col2))
if set(col1) != set(col2):
return False
dtypes1 = list(search_data1.dtypes)
dtypes2 = list(search_data2.dtypes)
print("\n dtypes1 \n", dtypes1, "\n", type(dtypes1))
print("\n dtypes2 \n", dtypes2, "\n", type(dtypes2))
if set(dtypes1) != set(dtypes2):
return False
print("\n search_data1 \n", search_data1, "\n dtypes:\n", search_data1.dtypes)
print("\n search_data2 \n", search_data2, "\n dtypes:\n", search_data2.dtypes)
for col in col1:
values1 = search_data1[col].values
values2 = search_data2[col].values
if not assert_order:
occur_d1 = Counter(list(values1))
occur_d2 = Counter(list(values2))
if occur_d1 != occur_d2:
print("\n occur_d1 \n", occur_d1)
print("\n occur_d2 \n", occur_d2)
return False
if assert_order and not np.array_equal(values1, values2):
print("\n values1 \n", values1)
print("\n values2 \n", values2)
return False
return True
| 27.918367
| 82
| 0.60307
|
6ed77a59d96ded565b7124aa205d4c4b0dc94dcb
| 12,590
|
py
|
Python
|
cirq-google/cirq_google/serialization/op_deserializer.py
|
peterse/Cirq
|
31daa9410a0e1e1ac3da38109aa8ce3a15aed17b
|
[
"Apache-2.0"
] | 3,326
|
2018-07-18T23:17:21.000Z
|
2022-03-29T22:28:24.000Z
|
cirq-google/cirq_google/serialization/op_deserializer.py
|
peterse/Cirq
|
31daa9410a0e1e1ac3da38109aa8ce3a15aed17b
|
[
"Apache-2.0"
] | 3,443
|
2018-07-18T21:07:28.000Z
|
2022-03-31T20:23:21.000Z
|
cirq-google/cirq_google/serialization/op_deserializer.py
|
peterse/Cirq
|
31daa9410a0e1e1ac3da38109aa8ce3a15aed17b
|
[
"Apache-2.0"
] | 865
|
2018-07-18T23:30:24.000Z
|
2022-03-30T11:43:23.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
)
from dataclasses import dataclass
import abc
import sympy
import cirq
from cirq_google.api import v2
from cirq_google.ops.calibration_tag import CalibrationTag
from cirq_google.serialization import arg_func_langs
class OpDeserializer(abc.ABC):
"""Generic supertype for operation deserializers.
Each operation deserializer describes how to deserialize operation protos
with a particular `serialized_id` to a specific type of Cirq operation.
"""
@property
@abc.abstractmethod
def serialized_id(self) -> str:
"""Returns the string identifier for the accepted serialized objects.
This ID denotes the serialization format this deserializer consumes. For
example, one of the common deserializers converts objects with the id
'xy' into PhasedXPowGates.
"""
@abc.abstractmethod
def from_proto(
self,
proto,
*,
arg_function_language: str = '',
constants: List[v2.program_pb2.Constant] = None,
deserialized_constants: List[Any] = None,
) -> cirq.Operation:
"""Converts a proto-formatted operation into a Cirq operation.
Args:
proto: The proto object to be deserialized.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`.
deserialized_constants: The deserialized contents of `constants`.
Returns:
The deserialized operation represented by `proto`.
"""
@dataclass(frozen=True)
class DeserializingArg:
"""Specification of the arguments to deserialize an argument to a gate.
Args:
serialized_name: The serialized name of the gate that is being
deserialized.
constructor_arg_name: The name of the argument in the constructor of
the gate corresponding to this serialized argument.
value_func: Sometimes a value from the serialized proto needs to
converted to an appropriate type or form. This function takes the
serialized value and returns the appropriate type. Defaults to
None.
required: Whether a value must be specified when constructing the
deserialized gate. Defaults to True.
default: default value to set if the value is not present in the
arg. If set, required is ignored.
"""
serialized_name: str
constructor_arg_name: str
value_func: Optional[Callable[[arg_func_langs.ARG_LIKE], Any]] = None
required: bool = True
default: Any = None
class GateOpDeserializer(OpDeserializer):
"""Describes how to deserialize a proto to a given Gate type.
Attributes:
serialized_gate_id: The id used when serializing the gate.
"""
def __init__(
self,
serialized_gate_id: str,
gate_constructor: Callable,
args: Sequence[DeserializingArg],
num_qubits_param: Optional[str] = None,
op_wrapper: Callable[
[cirq.Operation, v2.program_pb2.Operation], cirq.Operation
] = lambda x, y: x,
deserialize_tokens: Optional[bool] = True,
):
"""Constructs a deserializer.
Args:
serialized_gate_id: The serialized id of the gate that is being
deserialized.
gate_constructor: A function that produces the deserialized gate
given arguments from args.
args: A list of the arguments to be read from the serialized
gate and the information required to use this to construct
the gate using the gate_constructor above.
num_qubits_param: Some gate constructors require that the number
of qubits be passed to their constructor. This is the name
of the parameter in the constructor for this value. If None,
no number of qubits is passed to the constructor.
op_wrapper: An optional Callable to modify the resulting
GateOperation, for instance, to add tags
deserialize_tokens: Whether to convert tokens to
CalibrationTags. Defaults to True.
"""
self._serialized_gate_id = serialized_gate_id
self._gate_constructor = gate_constructor
self._args = args
self._num_qubits_param = num_qubits_param
self._op_wrapper = op_wrapper
self._deserialize_tokens = deserialize_tokens
@property
def serialized_id(self):
return self._serialized_gate_id
# TODO(#3388) Add documentation for Raises.
# pylint: disable=missing-raises-doc
def from_proto(
self,
proto: v2.program_pb2.Operation,
*,
arg_function_language: str = '',
constants: List[v2.program_pb2.Constant] = None,
deserialized_constants: List[Any] = None, # unused
) -> cirq.Operation:
"""Turns a cirq_google.api.v2.Operation proto into a GateOperation.
Args:
proto: The proto object to be deserialized.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`.
deserialized_constants: Unused in this method.
Returns:
The deserialized GateOperation represented by `proto`.
"""
qubits = [v2.qubit_from_proto_id(q.id) for q in proto.qubits]
args = self._args_from_proto(proto, arg_function_language=arg_function_language)
if self._num_qubits_param is not None:
args[self._num_qubits_param] = len(qubits)
gate = self._gate_constructor(**args)
op = self._op_wrapper(gate.on(*qubits), proto)
if self._deserialize_tokens:
which = proto.WhichOneof('token')
if which == 'token_constant_index':
if not constants:
raise ValueError(
'Proto has references to constants table '
'but none was passed in, value ='
f'{proto}'
)
op = op.with_tags(
CalibrationTag(constants[proto.token_constant_index].string_value)
)
elif which == 'token_value':
op = op.with_tags(CalibrationTag(proto.token_value))
return op
# pylint: enable=missing-raises-doc
def _args_from_proto(
self, proto: v2.program_pb2.Operation, *, arg_function_language: str
) -> Dict[str, arg_func_langs.ARG_LIKE]:
return_args = {}
for arg in self._args:
if arg.serialized_name not in proto.args:
if arg.default:
return_args[arg.constructor_arg_name] = arg.default
continue
elif arg.required:
raise ValueError(
f'Argument {arg.serialized_name} '
'not in deserializing args, but is required.'
)
value = arg_func_langs.arg_from_proto(
proto.args[arg.serialized_name],
arg_function_language=arg_function_language,
required_arg_name=None if not arg.required else arg.serialized_name,
)
if arg.value_func is not None:
value = arg.value_func(value)
if value is not None:
return_args[arg.constructor_arg_name] = value
return return_args
class CircuitOpDeserializer(OpDeserializer):
"""Describes how to serialize CircuitOperations."""
@property
def serialized_id(self):
return 'circuit'
# TODO(#3388) Add documentation for Raises.
# pylint: disable=missing-raises-doc
def from_proto(
self,
proto: v2.program_pb2.CircuitOperation,
*,
arg_function_language: str = '',
constants: List[v2.program_pb2.Constant] = None,
deserialized_constants: List[Any] = None,
) -> cirq.CircuitOperation:
"""Turns a cirq.google.api.v2.CircuitOperation proto into a CircuitOperation.
Args:
proto: The proto object to be deserialized.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`. This list should already have been
parsed to produce 'deserialized_constants'.
deserialized_constants: The deserialized contents of `constants`.
Returns:
The deserialized CircuitOperation represented by `proto`.
"""
if constants is None or deserialized_constants is None:
raise ValueError(
'CircuitOp deserialization requires a constants list and a corresponding list of '
'post-deserialization values (deserialized_constants).'
)
if len(deserialized_constants) <= proto.circuit_constant_index:
raise ValueError(
f'Constant index {proto.circuit_constant_index} in CircuitOperation '
'does not appear in the deserialized_constants list '
f'(length {len(deserialized_constants)}).'
)
circuit = deserialized_constants[proto.circuit_constant_index]
if not isinstance(circuit, cirq.FrozenCircuit):
raise ValueError(
f'Constant at index {proto.circuit_constant_index} was expected to be a circuit, '
f'but it has type {type(circuit)} in the deserialized_constants list.'
)
which_rep_spec = proto.repetition_specification.WhichOneof('repetition_value')
if which_rep_spec == 'repetition_count':
rep_ids = None
repetitions = proto.repetition_specification.repetition_count
elif which_rep_spec == 'repetition_ids':
rep_ids = proto.repetition_specification.repetition_ids.ids
repetitions = len(rep_ids)
else:
rep_ids = None
repetitions = 1
qubit_map = {
v2.qubit_from_proto_id(entry.key.id): v2.qubit_from_proto_id(entry.value.id)
for entry in proto.qubit_map.entries
}
measurement_key_map = {
entry.key.string_key: entry.value.string_key
for entry in proto.measurement_key_map.entries
}
arg_map = {
arg_func_langs.arg_from_proto(
entry.key, arg_function_language=arg_function_language
): arg_func_langs.arg_from_proto(
entry.value, arg_function_language=arg_function_language
)
for entry in proto.arg_map.entries
}
for arg in arg_map.keys():
if not isinstance(arg, (str, sympy.Symbol)):
raise ValueError(
'Invalid key parameter type in deserialized CircuitOperation. '
f'Expected str or sympy.Symbol, found {type(arg)}.'
f'\nFull arg: {arg}'
)
for arg in arg_map.values():
if not isinstance(arg, (str, sympy.Symbol, float, int)):
raise ValueError(
'Invalid value parameter type in deserialized CircuitOperation. '
f'Expected str, sympy.Symbol, or number; found {type(arg)}.'
f'\nFull arg: {arg}'
)
return cirq.CircuitOperation(
circuit,
repetitions,
qubit_map,
measurement_key_map,
arg_map, # type: ignore
rep_ids,
)
# pylint: enable=missing-raises-doc
| 38.501529
| 98
| 0.625894
|
9ed9a6031a22000a8fd1579f3e84ad009fa37881
| 602
|
py
|
Python
|
oop/inheritance.py
|
mbreault/python
|
addab433b2f1e9b2fdd4851d9b9662804656ce5a
|
[
"MIT"
] | null | null | null |
oop/inheritance.py
|
mbreault/python
|
addab433b2f1e9b2fdd4851d9b9662804656ce5a
|
[
"MIT"
] | null | null | null |
oop/inheritance.py
|
mbreault/python
|
addab433b2f1e9b2fdd4851d9b9662804656ce5a
|
[
"MIT"
] | null | null | null |
## Simple Inheritance
class Person:
def __init__(self, first, last, age):
self.firstname = first
self.lastname = last
self.age = age
def __str__(self):
return self.firstname + " " + self.lastname + ", " + str(self.age)
class Employee(Person):
def __init__(self, first, last, age, staffnum):
super().__init__(first, last, age)
self.staffnumber = staffnum
def __str__(self):
return super().__str__() + ", " + self.staffnumber
x = Person("Marge", "Simpson", 36)
y = Employee("Homer", "Simpson", 28, "1007")
print(x)
print(y)
| 23.153846
| 74
| 0.60299
|
dd27ac2f09077aa7401408c152552f102c2853b6
| 898
|
py
|
Python
|
notebook_snapshot/snapshot.py
|
oscar6echo/notebook-snapshot
|
3f2474d27abfca6610a68c6651c2e327792f8a5a
|
[
"MIT"
] | 1
|
2019-08-15T22:24:43.000Z
|
2019-08-15T22:24:43.000Z
|
notebook_snapshot/snapshot.py
|
oscar6echo/notebook-snapshot
|
3f2474d27abfca6610a68c6651c2e327792f8a5a
|
[
"MIT"
] | null | null | null |
notebook_snapshot/snapshot.py
|
oscar6echo/notebook-snapshot
|
3f2474d27abfca6610a68c6651c2e327792f8a5a
|
[
"MIT"
] | 3
|
2018-12-11T10:10:21.000Z
|
2019-11-22T17:57:47.000Z
|
import sys
import subprocess as sp
def snapshot(path_nb,
path_template=None,
embed_img=True,
):
"""
"""
if embed_img:
exporter = 'notebook_snapshot.html_embed_img'
else:
exporter = 'html'
cmd = 'jupyter nbconvert --to {} --template {} {}'
cmd = cmd.format(exporter, path_template, path_nb)
print(cmd)
args = cmd.split()
# os.system(cmd)
with sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE) as process:
# Read stdout character by character, as it includes real-time progress updates
for c in iter(lambda: process.stdout.read(1), b''):
sys.stdout.write(c.decode(sys.stdout.encoding))
# Read stderr line by line, because real-time does not matter
for line in iter(process.stderr.readline, b''):
sys.stderr.write(line.decode(sys.stderr.encoding))
| 29.933333
| 87
| 0.615813
|
e9a326d8c4fd8f7e4073d33b5ed616a3c2533d13
| 6,119
|
py
|
Python
|
green/output.py
|
kris-winiark/green
|
7f1dc69a38ed6af4947988f29b5f3c8353449961
|
[
"MIT"
] | null | null | null |
green/output.py
|
kris-winiark/green
|
7f1dc69a38ed6af4947988f29b5f3c8353449961
|
[
"MIT"
] | null | null | null |
green/output.py
|
kris-winiark/green
|
7f1dc69a38ed6af4947988f29b5f3c8353449961
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from colorama.ansi import Cursor
from colorama.initialise import wrap_stream
import logging
import os
import platform
import sys
import termstyle
from unidecode import unidecode
global debug_level
debug_level = 0
if sys.version_info[0] == 3: # pragma: no cover
text_type = str
unicode = None # so pyflakes stops complaining
else: # pragma: no cover
text_type = unicode
def debug(message, level=1):
"""
So we can tune how much debug output we get when we turn it on.
"""
if level <= debug_level:
logging.debug(' ' * (level - 1) * 2 + str(message))
class Colors:
"""
A class to centralize wrapping strings in terminal colors.
"""
def __init__(self, termcolor=None):
"""
termcolor - If None, attempt to autodetect whether we are in a terminal
and turn on terminal colors if we think we are. If True, force
terminal colors on. If False, force terminal colors off.
"""
if termcolor is None:
termstyle.auto()
self.termcolor = bool(termstyle.bold(""))
else:
self.termcolor = termcolor
self._restoreColor()
def _restoreColor(self):
"""
Unfortunately other programs (that we test) can mess with termstyle's
global settings, so we need to reset termstyle to the correct mode
after each test (which I think is faster than just checking whether it
matches the current mode...)
"""
if self.termcolor:
termstyle.enable()
else:
termstyle.disable()
# Movement
def start_of_line(self):
return '\r'
def up(self, lines=1):
return Cursor.UP(lines)
# Real colors and styles
def bold(self, text):
self._restoreColor()
return termstyle.bold(text)
def blue(self, text):
self._restoreColor()
if platform.system() == 'Windows': # pragma: no cover
# Default blue in windows is unreadable (such awful defaults...)
return termstyle.cyan(text)
else:
return termstyle.blue(text)
def green(self, text):
self._restoreColor()
return termstyle.green(text)
def red(self, text):
self._restoreColor()
return termstyle.red(text)
def yellow(self, text):
self._restoreColor()
return termstyle.yellow(text)
# Abstracted colors and styles
def passing(self, text):
return self.green(text)
def failing(self, text):
return self.red(text)
def error(self, text):
return self.red(text)
def skipped(self, text):
return self.blue(text)
def unexpectedSuccess(self, text):
return self.yellow(text)
def expectedFailure(self, text):
return self.yellow(text)
def moduleName(self, text):
return self.bold(text)
def className(self, text):
return text
class GreenStream(object):
"""
Wraps a stream-like object with the following additonal features:
1) A handy writeln() method (which calls write() under-the-hood)
2) Handy formatLine() and formatText() methods, which support indent
levels, and outcome codes.
3) Compatibility with real file objects (by implementing real file object
methods as we discover people need them). So far we have implemented the
following functions just for compatibility:
writelines(lines)
"""
indent_spaces = 2
_ascii_only_output = False # default to printing output in unicode
def __init__(self, stream, override_appveyor=False, disable_windows=False):
self.stream = stream
# Ironically, AppVeyor doesn't support windows win32 system calls for
# colors, but it WILL interpret posix ansi escape codes!
on_windows = platform.system() == 'Windows'
on_appveyor = os.environ.get('APPVEYOR', False)
if (override_appveyor
or ((on_windows and not on_appveyor)
and not disable_windows)): # pragma: no cover
self.stream = wrap_stream(self.stream, None, None, None, True)
# set output is ascii-only
self._ascii_only_output = True
self.closed = False
def flush(self):
self.stream.flush()
def writeln(self, text=''):
self.write(text + '\n')
def write(self, text):
if type(text) == bytes:
text = text.decode('utf-8')
# Compensate for windows' anti-social unicode behavior
if self._ascii_only_output:
# Windows doesn't actually want unicode, so we get
# the closest ASCII equivalent
text = text_type(unidecode(text))
self.stream.write(text)
def writelines(self, lines):
"""
Just for better compatibility with real file objects
"""
for line in lines:
self.write(line)
def formatText(self, text, indent=0, outcome_char=''):
# We'll go through each line in the text, modify it, and store it in a
# new list
updated_lines = []
for line in text.split('\n'):
# We only need to format the line if there's something visible on
# it.
if line.strip(' '):
updated_lines.append(self.formatLine(line, indent, outcome_char))
else:
updated_lines.append('')
outcome_char = '' # only the first line gets an outcome character
# Join the list back together
output = '\n'.join(updated_lines)
return output
def formatLine(self, line, indent=0, outcome_char=''):
"""
Takes a single line, optionally adds an indent and/or outcome
character to the beginning of the line.
"""
actual_spaces = (indent * self.indent_spaces) - len(outcome_char)
return (outcome_char + ' ' * actual_spaces + line)
def isatty(self):
"""
Wrap internal self.stream.isatty.
"""
return self.stream.isatty()
| 30.595
| 81
| 0.61546
|
312f2612c5879e3b0bf017b107cd20b912afb83b
| 1,455
|
bzl
|
Python
|
vendor/mlir/mlir.bzl
|
Antetokounpo/plaidml
|
06891b0a4a2691994580add063232f83294f7fec
|
[
"Apache-2.0"
] | null | null | null |
vendor/mlir/mlir.bzl
|
Antetokounpo/plaidml
|
06891b0a4a2691994580add063232f83294f7fec
|
[
"Apache-2.0"
] | null | null | null |
vendor/mlir/mlir.bzl
|
Antetokounpo/plaidml
|
06891b0a4a2691994580add063232f83294f7fec
|
[
"Apache-2.0"
] | null | null | null |
TBLGEN_ACTIONS = [
"-gen-enum-defs",
"-gen-enum-decls",
"-gen-llvmir-conversions",
"-gen-op-decls",
"-gen-op-defs",
"-gen-op-doc",
"-gen-reference-implementations",
"-gen-rewriters",
]
def _tblgen_impl(ctx):
args = ctx.actions.args()
args.add(ctx.attr.action)
args.add_all(ctx.attr.flags)
args.add("-I", ctx.label.workspace_root)
args.add_all(ctx.files.incs, before_each = "-I")
args.add("-o", ctx.outputs.out)
args.add(ctx.file.src)
ctx.actions.run(
inputs = [ctx.file.src],
outputs = [ctx.outputs.out],
arguments = [args],
executable = ctx.executable._tool,
mnemonic = "MLIRTableGen",
)
return [DefaultInfo(files = depset([ctx.outputs.out]))]
mlir_tblgen = rule(
attrs = {
"src": attr.label(
allow_single_file = [".td"],
mandatory = True,
),
"out": attr.output(
mandatory = True,
),
"incs": attr.label_list(
allow_files = True,
),
"action": attr.string(
mandatory = True,
values = TBLGEN_ACTIONS,
),
"flags": attr.string_list(),
"_tool": attr.label(
default = Label("@mlir//:mlir-tblgen"),
allow_single_file = True,
executable = True,
cfg = "host",
),
},
output_to_genfiles = True,
implementation = _tblgen_impl,
)
| 25.982143
| 59
| 0.534708
|
d14e733bdec0dc0ea49894ccb571bfc216bf27d1
| 6,024
|
py
|
Python
|
src/mantarray_file_manager/__init__.py
|
CuriBio/mantarray-file-manager
|
fa71793ef9ac469f1868fa77c5d0929aad764900
|
[
"MIT"
] | null | null | null |
src/mantarray_file_manager/__init__.py
|
CuriBio/mantarray-file-manager
|
fa71793ef9ac469f1868fa77c5d0929aad764900
|
[
"MIT"
] | 50
|
2020-08-07T17:38:06.000Z
|
2022-02-21T12:01:57.000Z
|
src/mantarray_file_manager/__init__.py
|
CuriBio/mantarray-file-manager
|
fa71793ef9ac469f1868fa77c5d0929aad764900
|
[
"MIT"
] | 1
|
2021-07-01T16:48:07.000Z
|
2021-07-01T16:48:07.000Z
|
# -*- coding: utf-8 -*-
"""Curi Bio File Manager.
File Manager for utilizing Curi bio data files and online databases.
"""
from . import file_writer
from .constants import ADC_GAIN_SETTING_UUID
from .constants import ADC_REF_OFFSET_UUID
from .constants import ADC_TISSUE_OFFSET_UUID
from .constants import BACKEND_LOG_UUID
from .constants import BARCODE_IS_FROM_SCANNER_UUID
from .constants import BOOTUP_COUNTER_UUID
from .constants import CENTIMILLISECONDS_PER_SECOND
from .constants import COMPUTER_NAME_HASH_UUID
from .constants import CURI_BIO_ACCOUNT_UUID
from .constants import CURI_BIO_USER_ACCOUNT_ID
from .constants import CURRENT_BETA1_HDF5_FILE_FORMAT_VERSION
from .constants import CURRENT_BETA2_HDF5_FILE_FORMAT_VERSION
from .constants import CUSTOMER_ACCOUNT_ID_UUID
from .constants import DATETIME_STR_FORMAT
from .constants import FILE_FORMAT_VERSION_METADATA_KEY
from .constants import FILE_MIGRATION_PATHS
from .constants import FILE_VERSION_PRIOR_TO_MIGRATION_UUID
from .constants import HARDWARE_TEST_RECORDING_UUID
from .constants import IS_FILE_ORIGINAL_UNTRIMMED_UUID
from .constants import MAGNETOMETER_CONFIGURATION_UUID
from .constants import MAIN_FIRMWARE_VERSION_UUID
from .constants import MANTARRAY_NICKNAME_UUID
from .constants import MANTARRAY_SERIAL_NUMBER_UUID
from .constants import METADATA_UUID_DESCRIPTIONS
from .constants import MICROSECONDS_PER_CENTIMILLISECOND
from .constants import MIN_SUPPORTED_FILE_VERSION
from .constants import NOT_APPLICABLE_H5_METADATA
from .constants import ORIGINAL_FILE_VERSION_UUID
from .constants import PCB_SERIAL_NUMBER_UUID
from .constants import PLATE_BARCODE_UUID
from .constants import REF_SAMPLING_PERIOD_UUID
from .constants import REFERENCE_SENSOR_READINGS
from .constants import REFERENCE_VOLTAGE_UUID
from .constants import SLEEP_FIRMWARE_VERSION_UUID
from .constants import SOFTWARE_BUILD_NUMBER_UUID
from .constants import SOFTWARE_RELEASE_VERSION_UUID
from .constants import START_RECORDING_TIME_INDEX_UUID
from .constants import TAMPER_FLAG_UUID
from .constants import TIME_INDICES
from .constants import TIME_OFFSETS
from .constants import TISSUE_SAMPLING_PERIOD_UUID
from .constants import TISSUE_SENSOR_READINGS
from .constants import TOTAL_WELL_COUNT_UUID
from .constants import TOTAL_WORKING_HOURS_UUID
from .constants import TRIMMED_TIME_FROM_ORIGINAL_END_UUID
from .constants import TRIMMED_TIME_FROM_ORIGINAL_START_UUID
from .constants import USER_ACCOUNT_ID_UUID
from .constants import UTC_BEGINNING_DATA_ACQUISTION_UUID
from .constants import UTC_BEGINNING_RECORDING_UUID
from .constants import UTC_FIRST_REF_DATA_POINT_UUID
from .constants import UTC_FIRST_TISSUE_DATA_POINT_UUID
from .constants import UTC_TIMESTAMP_OF_FILE_VERSION_MIGRATION_UUID
from .constants import WELL_COLUMN_UUID
from .constants import WELL_INDEX_UUID
from .constants import WELL_NAME_UUID
from .constants import WELL_ROW_UUID
from .constants import XEM_SERIAL_NUMBER_UUID
from .exceptions import AxisDataForSensorNotInFileError
from .exceptions import FileAttributeNotFoundError
from .exceptions import MantarrayFileNotLatestVersionError
from .exceptions import SensorDataNotInFileError
from .exceptions import UnsupportedFileMigrationPath
from .exceptions import UnsupportedMantarrayFileVersionError
from .exceptions import WellRecordingsNotFromSameSessionError
from .file_writer import MantarrayH5FileCreator
from .file_writer import migrate_to_latest_version
from .file_writer import migrate_to_next_version
from .files import BaseWellFile
from .files import Beta1WellFile
from .files import H5Wrapper
from .files import PlateRecording
from .files import WellFile
__all__ = [
"WellFile",
"PlateRecording",
"UTC_BEGINNING_DATA_ACQUISTION_UUID",
"FILE_FORMAT_VERSION_METADATA_KEY",
"START_RECORDING_TIME_INDEX_UUID",
"CUSTOMER_ACCOUNT_ID_UUID",
"USER_ACCOUNT_ID_UUID",
"SOFTWARE_BUILD_NUMBER_UUID",
"SOFTWARE_RELEASE_VERSION_UUID",
"MAIN_FIRMWARE_VERSION_UUID",
"SLEEP_FIRMWARE_VERSION_UUID",
"XEM_SERIAL_NUMBER_UUID",
"MANTARRAY_NICKNAME_UUID",
"REFERENCE_VOLTAGE_UUID",
"WELL_NAME_UUID",
"WELL_ROW_UUID",
"WELL_COLUMN_UUID",
"WELL_INDEX_UUID",
"TOTAL_WELL_COUNT_UUID",
"REF_SAMPLING_PERIOD_UUID",
"TISSUE_SAMPLING_PERIOD_UUID",
"ADC_GAIN_SETTING_UUID",
"PLATE_BARCODE_UUID",
"ADC_TISSUE_OFFSET_UUID",
"ADC_REF_OFFSET_UUID",
"MANTARRAY_SERIAL_NUMBER_UUID",
"UTC_BEGINNING_RECORDING_UUID",
"UTC_FIRST_TISSUE_DATA_POINT_UUID",
"UTC_FIRST_REF_DATA_POINT_UUID",
"HARDWARE_TEST_RECORDING_UUID",
"CURI_BIO_ACCOUNT_UUID",
"CURI_BIO_USER_ACCOUNT_ID",
"METADATA_UUID_DESCRIPTIONS",
"DATETIME_STR_FORMAT",
"CENTIMILLISECONDS_PER_SECOND",
"MICROSECONDS_PER_CENTIMILLISECOND",
"WellRecordingsNotFromSameSessionError",
"MIN_SUPPORTED_FILE_VERSION",
"UnsupportedMantarrayFileVersionError",
"FileAttributeNotFoundError",
"BACKEND_LOG_UUID",
"COMPUTER_NAME_HASH_UUID",
"BARCODE_IS_FROM_SCANNER_UUID",
"IS_FILE_ORIGINAL_UNTRIMMED_UUID",
"TRIMMED_TIME_FROM_ORIGINAL_START_UUID",
"TRIMMED_TIME_FROM_ORIGINAL_END_UUID",
"ORIGINAL_FILE_VERSION_UUID",
"CURRENT_BETA1_HDF5_FILE_FORMAT_VERSION",
"MantarrayH5FileCreator",
"FILE_MIGRATION_PATHS",
"migrate_to_next_version",
"migrate_to_latest_version",
"UnsupportedFileMigrationPath",
"BaseWellFile",
"file_writer",
"UTC_TIMESTAMP_OF_FILE_VERSION_MIGRATION_UUID",
"FILE_VERSION_PRIOR_TO_MIGRATION_UUID",
"NOT_APPLICABLE_H5_METADATA",
"MantarrayFileNotLatestVersionError",
"TISSUE_SENSOR_READINGS",
"REFERENCE_SENSOR_READINGS",
"BOOTUP_COUNTER_UUID",
"TOTAL_WORKING_HOURS_UUID",
"TAMPER_FLAG_UUID",
"PCB_SERIAL_NUMBER_UUID",
"CURRENT_BETA2_HDF5_FILE_FORMAT_VERSION",
"MAGNETOMETER_CONFIGURATION_UUID",
"TIME_INDICES",
"TIME_OFFSETS",
"Beta1WellFile",
"H5Wrapper",
"SensorDataNotInFileError",
"AxisDataForSensorNotInFileError",
]
| 38.615385
| 68
| 0.834495
|
d0000e6e98510600d51c0be34c93515ab1685ecb
| 15,026
|
py
|
Python
|
demos/text_to_speech_demo/python/models/forward_tacotron_ie.py
|
kblaszczak-intel/open_model_zoo
|
e313674d35050d2a4721bbccd9bd4c404f1ba7f8
|
[
"Apache-2.0"
] | 2,201
|
2018-10-15T14:37:19.000Z
|
2020-07-16T02:05:51.000Z
|
demos/text_to_speech_demo/python/models/forward_tacotron_ie.py
|
kblaszczak-intel/open_model_zoo
|
e313674d35050d2a4721bbccd9bd4c404f1ba7f8
|
[
"Apache-2.0"
] | 759
|
2018-10-18T07:43:55.000Z
|
2020-07-16T01:23:12.000Z
|
demos/text_to_speech_demo/python/models/forward_tacotron_ie.py
|
kblaszczak-intel/open_model_zoo
|
e313674d35050d2a4721bbccd9bd4c404f1ba7f8
|
[
"Apache-2.0"
] | 808
|
2018-10-16T14:03:49.000Z
|
2020-07-15T11:41:45.000Z
|
"""
Copyright (c) 2020-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from utils.text_preprocessing import text_to_sequence, _symbol_to_id
from utils.embeddings_processing import PCA
def check_input_name(model, input_tensor_name):
try:
model.input(input_tensor_name)
return True
except RuntimeError:
return False
class ForwardTacotronIE:
def __init__(self, model_duration, model_forward, core, device='CPU', verbose=False):
self.verbose = verbose
self.device = device
self.core = core
self.duration_predictor_model = self.load_network(model_duration)
self.duration_predictor_request = self.create_infer_request(self.duration_predictor_model, model_duration)
self.forward_model = self.load_network(model_forward)
self.forward_request = self.create_infer_request(self.forward_model, model_forward)
# fixed length of the sequence of symbols
self.duration_len = self.duration_predictor_model.input('input_seq').shape[1]
# fixed length of the input embeddings for forward
self.forward_len = self.forward_model.input('data').shape[1]
if self.verbose:
log.debug('Forward limitations : {0} symbols and {1} embeddings'.format(self.duration_len, self.forward_len))
self.is_attention = check_input_name(self.forward_model, "pos_mask")
if self.is_attention:
self.init_pos_mask()
else:
self.pos_mask = None
self.is_multi_speaker = check_input_name(self.duration_predictor_model, "speaker_embedding")
if self.is_multi_speaker:
self.init_speaker_information()
else:
self.male_idx = None
self.female_idx = None
self.speaker_embeddings = None
self.female_embeddings = None
self.male_embeddings = None
def init_pos_mask(self, mask_sz=6000, window_size=4):
mask_arr = np.zeros((1, 1, mask_sz, mask_sz), dtype=np.float32)
width = 2 * window_size + 1
for i in range(mask_sz - width):
mask_arr[0][0][i][i:i + width] = 1.0
self.pos_mask = mask_arr
@staticmethod
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = np.max(length)
x = np.arange(max_length, dtype=length.dtype)
x = np.expand_dims(x, axis=(0))
length = np.expand_dims(length, axis=(1))
return x < length
def seq_to_indexes(self, text):
res = text_to_sequence(text)
if self.verbose:
log.debug(res)
return res
@staticmethod
def build_index(duration, x):
duration[np.where(duration < 0)] = 0
tot_duration = np.cumsum(duration, 1)
max_duration = int(tot_duration.max().item())
index = np.zeros([x.shape[0], max_duration, x.shape[2]], dtype='long')
for i in range(tot_duration.shape[0]):
pos = 0
for j in range(tot_duration.shape[1]):
pos1 = tot_duration[i, j]
index[i, pos:pos1, :] = j
pos = pos1
index[i, pos:, :] = j
return index
@staticmethod
def gather(a, dim, index):
expanded_index = [index if dim == i else np.arange(a.shape[i]).reshape(
[-1 if i == j else 1 for j in range(a.ndim)]) for i in range(a.ndim)]
return a[tuple(expanded_index)]
def load_network(self, model_path):
log.info('Reading ForwardTacotron model {}'.format(model_path))
return self.core.read_model(model_path)
def create_infer_request(self, model, path):
compiled_model = self.core.compile_model(model, device_name=self.device)
log.info('The ForwardTacotron model {} is loaded to {}'.format(path, self.device))
return compiled_model.create_infer_request()
def infer_duration(self, sequence, speaker_embedding=None, alpha=1.0, non_empty_symbols=None):
if self.is_attention:
input_mask = self.sequence_mask(np.array([[non_empty_symbols]]), sequence.shape[1])
pos_mask = self.pos_mask[:, :, :sequence.shape[1], :sequence.shape[1]]
inputs = {"input_seq": sequence,
"input_mask": input_mask,
"pos_mask": pos_mask}
if speaker_embedding is not None:
inputs["speaker_embedding"] = np.array(speaker_embedding)
self.duration_predictor_request.infer(inputs)
else:
self.duration_predictor_request.infer(inputs={"input_seq": sequence})
duration = self.duration_predictor_request.get_tensor("duration").data[:] * alpha
duration = (duration + 0.5).astype('int').flatten()
duration = np.expand_dims(duration, axis=0)
preprocessed_embeddings = self.duration_predictor_request.get_tensor("embeddings").data[:]
if non_empty_symbols is not None:
duration = duration[:, :non_empty_symbols]
preprocessed_embeddings = preprocessed_embeddings[:, :non_empty_symbols]
indexes = self.build_index(duration, preprocessed_embeddings)
if self.verbose:
log.debug("Index: {0}, duration: {1}, embeddings: {2}, non_empty_symbols: {3}"
.format(indexes.shape, duration.shape, preprocessed_embeddings.shape, non_empty_symbols))
return self.gather(preprocessed_embeddings, 1, indexes)
def infer_mel(self, aligned_emb, non_empty_symbols, speaker_embedding=None):
if self.is_attention:
data_mask = self.sequence_mask(np.array([[non_empty_symbols]]), aligned_emb.shape[1])
pos_mask = self.pos_mask[:, :, :aligned_emb.shape[1], :aligned_emb.shape[1]]
inputs = {"data": aligned_emb,
"data_mask": data_mask,
"pos_mask": pos_mask}
if speaker_embedding is not None:
inputs["speaker_embedding"] = np.array(speaker_embedding)
self.forward_request.infer(inputs)
else:
self.forward_request.infer(inputs={"data": aligned_emb})
return self.forward_request.get_tensor('mel').data[:, :non_empty_symbols]
def find_optimal_delimiters_position(self, sequence, delimiters, idx, window=20):
res = {d: -1 for d in delimiters}
for i in range(max(0, idx - window), idx):
if sequence[i] in delimiters:
res[sequence[i]] = i + 1
return res
def forward_duration_prediction_by_delimiters(self, text, speaker_embedding, alpha):
sequence = self.seq_to_indexes(text)
seq_len = len(sequence)
outputs = []
if seq_len <= self.duration_len:
non_empty_symbols = len(sequence) + min(1, self.duration_len - seq_len)
sequence = sequence + [_symbol_to_id[' ']] * (self.duration_len - seq_len)
sequence = np.array(sequence)
sequence = np.expand_dims(sequence, axis=0)
outputs.append(self.infer_duration(sequence, speaker_embedding, alpha, non_empty_symbols=non_empty_symbols))
else:
punctuation = '.!?,;: '
delimiters = [_symbol_to_id[p] for p in punctuation]
start_idx = 0
while start_idx < seq_len:
if start_idx + self.duration_len < seq_len:
positions = self.find_optimal_delimiters_position(sequence, delimiters,
start_idx + self.duration_len,
window=self.duration_len//10)
else:
positions = {delimiters[0]: seq_len}
edge = -1
for d in delimiters:
if positions[d] > 0:
edge = positions[d]
break
if edge < 0:
raise Exception("Bad delimiter position {0} for sequence with length {1}".format(edge, seq_len))
sub_sequence = sequence[start_idx:edge]
non_empty_symbols = len(sub_sequence) + min(1, self.duration_len - len(sub_sequence))
sub_sequence += [_symbol_to_id[' ']] * (self.duration_len - len(sub_sequence))
sub_sequence = np.array(sub_sequence)
sub_sequence = np.expand_dims(sub_sequence, axis=0)
outputs.append(self.infer_duration(sub_sequence, speaker_embedding, alpha, non_empty_symbols=non_empty_symbols))
start_idx = edge
aligned_emb = np.concatenate(outputs, axis=1)
return aligned_emb
def forward(self, text, alpha=1.0, speaker_id=19, speaker_emb=None):
speaker_embedding = None
if self.is_multi_speaker:
if speaker_emb is not None:
speaker_embedding = speaker_emb
else:
speaker_embedding = [self.speaker_embeddings[speaker_id, :]]
aligned_emb = self.forward_duration_prediction_by_delimiters(text, speaker_embedding, alpha)
mels = []
start_idx = 0
end_idx = 0
while start_idx < aligned_emb.shape[1] and end_idx < aligned_emb.shape[1]:
end_idx = min(start_idx + self.forward_len, aligned_emb.shape[1])
sub_aligned_emb = aligned_emb[:, start_idx:end_idx, :]
if sub_aligned_emb.shape[1] < self.forward_len:
sub_aligned_emb = np.pad(sub_aligned_emb,
((0, 0), (0, self.forward_len - sub_aligned_emb.shape[1]), (0, 0)),
'constant', constant_values=0)
if self.verbose:
log.debug("SAEmb shape: {0}".format(sub_aligned_emb.shape))
mel = self.infer_mel(sub_aligned_emb, end_idx - start_idx, speaker_embedding)
mels.append(np.copy(mel))
start_idx += self.forward_len
res = np.concatenate(mels, axis=1)
if self.verbose:
log.debug("MEL shape :{0}".format(res.shape))
return res
def get_speaker_embeddings(self):
if self.is_multi_speaker:
return self.speaker_embeddings
return None
def get_pca_speaker_embedding(self, gender, alpha):
if not self.is_multi_speaker:
return None
emb = self.male_embeddings if gender == "Male" else self.female_embeddings
pca = PCA()
projection = pca.build(emb)
x1 = min(projection)
x2 = max(projection)
pca_component = x1 + alpha * (x2 - x1)
emb = pca.iproject(np.array([pca_component]))
return emb
def init_speaker_information(self):
self.male_idx = [2, 3, 7, 11, 12, 15, 16, 19, 20, 21, 25, 26, 27, 29, 32, 33, 34, 35, 36, 38]
self.female_idx = [0, 1, 4, 5, 6, 8, 9, 10, 13, 14, 17, 18, 22, 23, 24, 28, 30, 31, 37, 39]
self.speaker_embeddings = np.array([[-0.4327550530433655, -0.5420686602592468],
[-0.5264465808868408, -0.6281864643096924],
[0.15513141453266144, 0.7856010794639587],
[0.3424123525619507, 0.8129010200500488],
[-0.6081429719924927, -0.6511518359184265],
[-0.49752333760261536, -0.8568740487098694],
[-0.005007751286029816, -1.3364707231521606],
[0.14275427162647247, 1.121581792831421],
[-0.45601722598075867, -0.9648892283439636],
[-0.26137179136276245, -1.1388417482376099],
[0.12628738582134247, -1.149622917175293],
[0.34105026721954346, 1.0184416770935059],
[0.3222722113132477, 1.070836067199707],
[-0.2694351375102997, -0.9980007410049438],
[-0.11780811846256256, -1.0476068258285522],
[0.2472933977842331, 1.1816325187683105],
[0.04263993725180626, 1.4357256889343262],
[0.05275965854525566, -1.0010212659835815],
[-0.17100927233695984, -1.1538763046264648],
[0.09288709610700607, 1.296027660369873],
[0.13041983544826508, 1.1497610807418823],
[0.11197542399168015, 1.0537633895874023],
[-0.13089995086193085, -1.2036861181259155],
[0.055261872708797455, -1.338423728942871],
[0.20335668325424194, -1.2085381746292114],
[-0.038247253745794296, 1.268439769744873],
[-0.11069679260253906, 1.050403356552124],
[-0.19113299250602722, 1.0872247219085693],
[0.17568981647491455, -1.247299075126648],
[-0.34791627526283264, 1.0054986476898193],
[0.2401651293039322, -1.1724580526351929],
[0.30263951420783997, -1.043319582939148],
[-0.3040805160999298, 1.1061657667160034],
[-0.27853792905807495, 1.145222544670105],
[-0.49230968952178955, 0.9106340408325195],
[-0.45115727186203003, 0.9025603532791138],
[-0.49153658747673035, 0.7804651260375977],
[0.253637433052063, -1.014277696609497],
[-0.48516881465911865, 0.6745203137397766],
[0.3036082983016968, -0.8406648635864258]])
mask = np.array([True if i in self.male_idx else False for i in range(self.speaker_embeddings.shape[0])])
self.male_embeddings = self.speaker_embeddings[mask, :]
mask = np.array([True if i in self.female_idx else False for i in range(self.speaker_embeddings.shape[0])])
self.female_embeddings = self.speaker_embeddings[mask, :]
| 48.627832
| 128
| 0.582989
|
64869eefd3d8c8e8b9294409dae8a4d102581dc7
| 3,781
|
py
|
Python
|
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitIProductionJobRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitIProductionJobRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitIProductionJobRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmts.endpoint import endpoint_data
class SubmitIProductionJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'SubmitIProductionJob','mts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_JobParams(self): # String
return self.get_query_params().get('JobParams')
def set_JobParams(self, JobParams): # String
self.add_query_param('JobParams', JobParams)
def get_Output(self): # String
return self.get_query_params().get('Output')
def set_Output(self, Output): # String
self.add_query_param('Output', Output)
def get_UserData(self): # String
return self.get_query_params().get('UserData')
def set_UserData(self, UserData): # String
self.add_query_param('UserData', UserData)
def get_FunctionName(self): # String
return self.get_query_params().get('FunctionName')
def set_FunctionName(self, FunctionName): # String
self.add_query_param('FunctionName', FunctionName)
def get_NotifyUrl(self): # String
return self.get_query_params().get('NotifyUrl')
def set_NotifyUrl(self, NotifyUrl): # String
self.add_query_param('NotifyUrl', NotifyUrl)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_ModelId(self): # String
return self.get_query_params().get('ModelId')
def set_ModelId(self, ModelId): # String
self.add_query_param('ModelId', ModelId)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_PipelineId(self): # String
return self.get_query_params().get('PipelineId')
def set_PipelineId(self, PipelineId): # String
self.add_query_param('PipelineId', PipelineId)
def get_Input(self): # String
return self.get_query_params().get('Input')
def set_Input(self, Input): # String
self.add_query_param('Input', Input)
def get_ScheduleParams(self): # String
return self.get_query_params().get('ScheduleParams')
def set_ScheduleParams(self, ScheduleParams): # String
self.add_query_param('ScheduleParams', ScheduleParams)
| 38.191919
| 79
| 0.75324
|
ebb8059662b2b31f18fc1f1b5b2e731f200e6df2
| 6,659
|
py
|
Python
|
ocfweb/stats/printing.py
|
expedited/ocfweb
|
ed143b8f1c59e58157780007fe5fd104ee18d944
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
ocfweb/stats/printing.py
|
expedited/ocfweb
|
ed143b8f1c59e58157780007fe5fd104ee18d944
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
ocfweb/stats/printing.py
|
expedited/ocfweb
|
ed143b8f1c59e58157780007fe5fd104ee18d944
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
import time
from collections import defaultdict
from datetime import date
from datetime import timedelta
from functools import partial
from django.http import HttpResponse
from django.shortcuts import render
from matplotlib.figure import Figure
from ocflib.lab import stats
from ocflib.printing.printers import PRINTERS
from ocflib.printing.quota import get_connection
from ocflib.printing.quota import SEMESTERLY_QUOTA
from ocfweb.caching import periodic
from ocfweb.component.graph import plot_to_image_bytes
ALL_PRINTERS = ('papercut', 'pagefault', 'logjam', 'logjam-old', 'deforestation')
ACTIVE_PRINTERS = ('papercut', 'pagefault', 'logjam')
def stats_printing(request):
return render(
request,
'stats/printing.html',
{
'title': 'Printing Statistics',
'current_printers': PRINTERS,
'toner_changes': _toner_changes(),
'last_month': [
date.today() - timedelta(days=i)
for i in range(30)
],
'pages_per_day': _pages_per_day(),
},
)
def semester_histogram(request):
return HttpResponse(
plot_to_image_bytes(_semester_histogram(), format='svg'),
content_type='image/svg+xml',
)
@periodic(300)
def _semester_histogram():
with get_connection() as c:
c.execute(
'SELECT `user`, `semester` FROM `printed` WHERE `semester` > 0',
)
users = [SEMESTERLY_QUOTA - int(r['semester']) for r in c]
fig = Figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
ax.locator_params(nbins=20)
ax.hist(users, bins=list(range(0, 105, 5)))
ax.grid(True)
ax.set_xlim(SEMESTERLY_QUOTA, 0)
ax.set_ylabel('Number of users')
ax.set_xlabel('Remaining balance')
ax.set_title('Remaining balances this semester')
return fig
@periodic(3600)
def _toner_changes():
return [
(
printer,
_toner_used_by_printer(printer),
)
for printer in ACTIVE_PRINTERS
]
def _toner_used_by_printer(printer, cutoff=.05, since=None):
"""Returns toner used for a printer since a given date (by default it
returns toner used for this semester).
Toner numbers can be significantly noisy, including significant diffs
whenever toner gets taken out and put back in whenever there is a jam.
Because of this it's hard to determine if a new toner is inserted into a
printer or if it was the same toner again. To reduce this noise we only
count diffs that are smaller than a cutoff which empirically seems to be
more accurate.
"""
if not since:
since = stats.current_semester_start()
with stats.get_connection() as cursor:
cursor.execute(
'''
CREATE TEMPORARY TABLE ordered1
(PRIMARY KEY (position))
AS (
SELECT * FROM (
SELECT
T.*,
@rownum := @rownum + 1 AS position
FROM (
(
SELECT * FROM printer_toner_public
WHERE printer = %s AND
date > %s
ORDER BY date
) AS T,
(SELECT @rownum := 0) AS r
)
) AS x
)
''', (printer, since.strftime('%Y-%m-%d')),
)
cursor.execute('''
CREATE TEMPORARY TABLE ordered2
(PRIMARY KEY (position))
AS (SELECT * FROM ordered1)
''')
cursor.execute('''
CREATE TEMPORARY TABLE diffs
AS (SELECT
B.date AS date,
A.value/A.max - B.value/B.max as pct_diff
FROM
ordered1 as A,
ordered2 as B
WHERE
B.position = A.position + 1)
''')
cursor.execute(
'''
SELECT SUM(pct_diff) as toner_used
FROM
diffs
WHERE
ABS(pct_diff)<%s
''', (cutoff,),
)
result = cursor.fetchone()['toner_used']
return float(result or 0.0)
@periodic(120)
def _pages_per_day():
with stats.get_connection() as cursor:
cursor.execute('''
SELECT max(value) as value, cast(date as date) as date, printer
FROM printer_pages_public
GROUP BY cast(date as date), printer
ORDER BY date ASC, printer ASC
''')
# Resolves the issue of possible missing dates.
# defaultdict(lambda: defaultdict(int)) doesn't work due to inability to pickle local objects like lambdas;
# this effectively does the same thing as that.
pages_printed = defaultdict(partial(defaultdict, int))
last_seen = {}
for row in cursor:
if row['printer'] in last_seen:
pages_printed.setdefault(row['date'], defaultdict(int))
pages_printed[row['date']][row['printer']] = (
row['value'] - last_seen[row['printer']]
)
last_seen[row['printer']] = row['value']
return pages_printed
def _pages_printed_for_printer(printer, resolution=100):
with stats.get_connection() as cursor:
cursor.execute(
'''
SELECT Z.date, Z.value FROM (
SELECT
T.*,
@rownum := @rownum + 1 AS position
FROM (
(
SELECT * FROM printer_pages_public
WHERE printer = %s
ORDER BY date
) AS T,
(SELECT @rownum := 0) AS r
)
) as Z
WHERE Z.position mod %s = 0
''', (printer, resolution),
)
return [
(time.mktime(row['date'].timetuple()) * 1000, row['value'])
for row in cursor
]
@periodic(3600)
def _pages_printed_data():
return [
{
'name': printer,
'animation': False,
'data': _pages_printed_for_printer(printer),
}
for printer in ALL_PRINTERS
]
def pages_printed(request):
return render(
request,
'stats/printing/pages-printed.html',
{
'title': 'Pages Printed',
'data': _pages_printed_data(),
},
)
| 30.406393
| 115
| 0.532212
|
083cc22c4f52c39407fd57167da5617c1d4e8944
| 18,820
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class route_selection_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/route-selection-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Parameters relating to options for route selection
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-selection-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"afi-safis",
"afi-safi",
"route-selection-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/config (container)
YANG Description: Configuration parameters relating to route selection
options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to route selection
options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state (container)
YANG Description: State information for the route selection options
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for the route selection options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class route_selection_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/route-selection-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Parameters relating to options for route selection
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-selection-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"afi-safis",
"afi-safi",
"route-selection-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/config (container)
YANG Description: Configuration parameters relating to route selection
options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to route selection
options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state (container)
YANG Description: State information for the route selection options
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for the route selection options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| 39.208333
| 377
| 0.606164
|
877d35fb38ccda1f21d30e07a23fdf1a58b2ecfe
| 127
|
py
|
Python
|
lab_2/3.py
|
Mmalikov1337/python_labs
|
0d72604efc8a9a94c80de38525146ebbc3287fa3
|
[
"MIT"
] | null | null | null |
lab_2/3.py
|
Mmalikov1337/python_labs
|
0d72604efc8a9a94c80de38525146ebbc3287fa3
|
[
"MIT"
] | null | null | null |
lab_2/3.py
|
Mmalikov1337/python_labs
|
0d72604efc8a9a94c80de38525146ebbc3287fa3
|
[
"MIT"
] | null | null | null |
from random import randint
print("random three-digit number ending in zero")
num = str(randint(10, 99))
print(int(num + '0'))
| 21.166667
| 49
| 0.716535
|
9001d4f08769bcdb4a48f64146b7b178ccd491c0
| 1,698
|
py
|
Python
|
setup.py
|
hugovk/ety-python
|
b59f424b05f3bfb7c1fa023348842e2404128f32
|
[
"MIT"
] | null | null | null |
setup.py
|
hugovk/ety-python
|
b59f424b05f3bfb7c1fa023348842e2404128f32
|
[
"MIT"
] | null | null | null |
setup.py
|
hugovk/ety-python
|
b59f424b05f3bfb7c1fa023348842e2404128f32
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import re
from codecs import open
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(here, 'ety/__init__.py'), encoding='utf8') as f:
version = re.search(r'__version__ = "(.*?)"', f.read()).group(1)
setup(
name="ety",
version=version,
description="discover the etymology of words",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jmsv/ety-python",
author="James Vickery",
author_email="dev@jamesvickery.net",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4",
keywords="etymology origins english language words",
packages=["ety", "ety/data"],
install_requires=["treelib", "colorful", "six"],
extras_require={"dev": ["flake8"]},
package_data={"ety": ["data/etymologies.json", "data/iso-639-3.json"]},
entry_points={"console_scripts": ["ety=ety:cli"]},
project_urls={
"Source": "https://github.com/jmsv/ety-python",
"Bug Reports": "https://github.com/jmsv/ety-python/issues",
},
)
| 33.96
| 75
| 0.623675
|
b26d082ddcda888714fff0b3149cf3a2fc444229
| 846
|
py
|
Python
|
train_evaluator/TFRecord_Slim_Reader.py
|
cow8/PupilTracking
|
02977de83c9f1481b700a3bb0ac37f0f814b1faa
|
[
"Apache-2.0"
] | 8
|
2018-01-28T06:41:14.000Z
|
2020-07-09T05:01:06.000Z
|
train_evaluator/TFRecord_Slim_Reader.py
|
asanchez19/PupilTracking
|
02977de83c9f1481b700a3bb0ac37f0f814b1faa
|
[
"Apache-2.0"
] | 1
|
2019-12-23T08:47:57.000Z
|
2019-12-23T08:47:57.000Z
|
train_evaluator/TFRecord_Slim_Reader.py
|
asanchez19/PupilTracking
|
02977de83c9f1481b700a3bb0ac37f0f814b1faa
|
[
"Apache-2.0"
] | 4
|
2018-05-19T06:28:07.000Z
|
2020-07-09T05:13:22.000Z
|
import tensorflow as tf
def PupilDataset(tf_record_filename="C:\\LPW\\train.tfrecords.AlexJ"):
filename_queue = tf.train.string_input_producer([tf_record_filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) # 返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [224, 224, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
lab = tf.cast(features['label'], tf.uint8)
lab=tf.one_hot(lab,2,1,0)
return img,lab
| 40.285714
| 88
| 0.556738
|
06781cc702222f07fcd4c0da5ff6b34e58db0e13
| 222
|
py
|
Python
|
8958.py
|
kwondohun0308/Beakjoon
|
65e8c1015bcb3bb757d8056525034ee333ecb681
|
[
"MIT"
] | null | null | null |
8958.py
|
kwondohun0308/Beakjoon
|
65e8c1015bcb3bb757d8056525034ee333ecb681
|
[
"MIT"
] | null | null | null |
8958.py
|
kwondohun0308/Beakjoon
|
65e8c1015bcb3bb757d8056525034ee333ecb681
|
[
"MIT"
] | null | null | null |
a = int(input())
for i in range(a):
arr_1 = list(input())
S = 0
start = 1
for j in arr_1:
if j == 'O':
S += start
start += 1
else:
start = 1
print(S)
| 17.076923
| 25
| 0.387387
|
7aedc50ab9a71deb8d9da977f9a61d9c0d0bc5fa
| 3,053
|
py
|
Python
|
label_maker/images.py
|
cgoodier/label-maker
|
4e8774ff2790234352a78de82b92d762697a65d2
|
[
"MIT"
] | 1
|
2021-03-21T13:37:46.000Z
|
2021-03-21T13:37:46.000Z
|
label_maker/images.py
|
cgoodier/label-maker
|
4e8774ff2790234352a78de82b92d762697a65d2
|
[
"MIT"
] | null | null | null |
label_maker/images.py
|
cgoodier/label-maker
|
4e8774ff2790234352a78de82b92d762697a65d2
|
[
"MIT"
] | null | null | null |
# pylint: disable=unused-argument
"""Generate an .npz file containing arrays for training machine learning algorithms"""
from os import makedirs, path as op
from urllib.parse import urlparse
from random import shuffle
import numpy as np
import requests
from label_maker.utils import url
def download_images(dest_folder, classes, imagery, ml_type, background_ratio, **kwargs):
"""Download satellite images specified by a URL and a label.npz file
Parameters
------------
dest_folder: str
Folder to save labels, tiles, and final numpy arrays into
classes: list
A list of classes for machine learning training. Each class is defined as a dict
with two required properties:
- name: class name
- filter: A Mapbox GL Filter.
See the README for more details
imagery: str
Imagery template to download satellite images from.
Ex: http://a.tiles.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=ACCESS_TOKEN
ml_type: str
Defines the type of machine learning. One of "classification", "object-detection", or "segmentation"
background_ratio: float
Determines the number of background images to download in single class problems. Ex. A value
of 1 will download an equal number of background images to class images.
**kwargs: dict
Other properties from CLI config passed as keywords to other utility functions
"""
# open labels file
labels_file = op.join(dest_folder, 'labels.npz')
tiles = np.load(labels_file)
# create tiles directory
tiles_dir = op.join(dest_folder, 'tiles')
if not op.isdir(tiles_dir):
makedirs(tiles_dir)
# find tiles which have any matching class
def class_test(value):
"""Determine if a label matches a given class index"""
if ml_type == 'object-detection':
return len(value)
elif ml_type == 'segmentation':
return np.sum(value) > 0
elif ml_type == 'classification':
return value[0] == 0
return None
class_tiles = [tile for tile in tiles.files if class_test(tiles[tile])]
# for classification problems with a single class, we also get background
# tiles up to len(class_tiles) * config.get('background_ratio')
background_tiles = []
limit = len(class_tiles) * background_ratio
if ml_type == 'classification' and len(classes) == 1:
background_tiles_full = [tile for tile in tiles.files if tile not in class_tiles]
shuffle(background_tiles_full)
background_tiles = background_tiles_full[:limit]
# download tiles
tiles = class_tiles + background_tiles
print('Downloading {} tiles to {}'.format(len(tiles), op.join(dest_folder, 'tiles')))
o = urlparse(imagery)
_, image_format = op.splitext(o.path)
for tile in tiles:
r = requests.get(url(tile.split('-'), imagery))
tile_img = op.join(dest_folder, 'tiles', '{}{}'.format(tile, image_format))
open(tile_img, 'wb').write(r.content)
| 40.706667
| 108
| 0.679332
|
6b6e661db16eab9dd99920f02b65c97e1779f93d
| 1,756
|
py
|
Python
|
remove_chromosomes.py
|
djlduckett/Genome_Resources
|
7d0e17c349606464f11a20ce3f04acdc57202cd6
|
[
"MIT"
] | null | null | null |
remove_chromosomes.py
|
djlduckett/Genome_Resources
|
7d0e17c349606464f11a20ce3f04acdc57202cd6
|
[
"MIT"
] | null | null | null |
remove_chromosomes.py
|
djlduckett/Genome_Resources
|
7d0e17c349606464f11a20ce3f04acdc57202cd6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###Imports###
import numpy as np
import pandas as pd
import re
import sys
import collections
###Definitions###
vcf_file = sys.argv[1]
chr_file = sys.argv[2]
out_file = sys.argv[3]
###Functions###
def get_header_lines(vcf_file):
lines = []
comment = True
with open(vcf_file, 'r') as f:
while comment == True: # while line begins with '##'
line = f.readline()
comment = bool(re.search('##', line))
if comment == True:
lines.append(line)
return(lines)
###Main###
header_lines = get_header_lines(vcf_file) # get vcf header lines with '##'
f = open(chr_file, 'r')
chr_list = f.readlines()
chr_list = [x.strip('\n') for x in chr_list]
vcf_df = pd.read_table(vcf_file, delimiter = '\t', header = 1, skiprows = len(header_lines) - 1) # read in vcf
total_snps = len(vcf_df.index) # get starting number of snps
print("Total SNPs: %s" % total_snps)
chrom = vcf_df['#CHROM'] # get chromosomes
chrom = chrom.astype('str')
snp_dict = collections.defaultdict(list)
for key, value in zip(chrom.values, chrom.index): # create dictionary with locus ids as keys and indices as values
snp_dict[key].append(value)
for k in chr_list: # remove chromosomes from dictionary
snp_dict.pop(k, None)
keep_indices = [item for sublist in snp_dict.values() for item in sublist] # get snp indices to keep
snp_mod_df = vcf_df.iloc[keep_indices] # use single snp indices to subsample vcf dataframe
single_snps = len(snp_mod_df) # get number of subsampled snps
print("Subsampled SNPs: %s" % single_snps)
with open(out_file, 'a') as out:
out.write(''.join(header_lines)) # write vcf header lines
snp_mod_df.to_csv(out, sep = '\t', header = True, index = False)
| 30.275862
| 114
| 0.678815
|
ea20464908936d45f2d7dffe01ba623153057a1c
| 23,493
|
py
|
Python
|
Chempy/input/yields/West17/fortranfile/writer.py
|
jan-rybizki/Chempy
|
18260a0647b448a8a937384271c8a4de8702aefe
|
[
"MIT"
] | 25
|
2017-02-28T14:06:09.000Z
|
2022-03-21T16:00:40.000Z
|
Chempy/input/yields/West17/fortranfile/writer.py
|
oliverphilcox/ChempyMulti
|
1ab0d0c56a03c4f4b710ee8f0142bcccc7e84e22
|
[
"MIT"
] | 15
|
2017-02-03T14:45:26.000Z
|
2020-10-01T10:56:45.000Z
|
Chempy/input/yields/West17/fortranfile/writer.py
|
oliverphilcox/ChempyMulti
|
1ab0d0c56a03c4f4b710ee8f0142bcccc7e84e22
|
[
"MIT"
] | 14
|
2017-02-16T12:34:39.000Z
|
2021-07-12T02:51:41.000Z
|
"""
Classes for writing UNIX unformatted FORTRAN files.
"""
# TODO
# * for general reading, load needs to be able to specify record size?
# * fortranfile needs "backspace" and "truncate" functions.
import os
import sys
import gzip
import bz2
import lzma
import numpy as np
from .utils import prod
from .errors import RecordBeginningError, WriteError
from .common import _np_types, _set_method
from .defaults import FortranSpecs
#=======================================================================
# WRITER
#=======================================================================
class DataOutputBuffer(object):
"""
Interface for writing buffered output data
"""
default_byte_order = ">"
sys_is_le = sys.byteorder == "little"
native_byteorder = "<" if sys_is_le else ">"
initial_buf_size = 2**24
buf_grow_factor = 2
buf_grow_limit = 2**28
# as of 2011, even my best solid state drive will take 0.5 s to write that much
def __init__(self,
byteorder = default_byte_order,
**kwargs):
self._set_byteorder(byteorder = byteorder)
self._init()
def _init(self):
self.pos = 0
self.buffer = bytearray(self.initial_buf_size)
self.buf_size = self.initial_buf_size
def _set_byteorder(self,
byteorder = default_byte_order):
"""
set up all data types for deserved byte order
"""
if byteorder == "=":
byteorder = self.native_byteorder
self.swapbyteorder = byteorder != self.native_byteorder
self.byteorder = byteorder
def bor(self):
"""Return whether position is beginning of record."""
return self.pos == 0
def assert_bor(self):
"""
Throw exception if current position is not beginnig of record.
This can be used to deterime whether all previous data has been written,
i.e., as a consistency check of previous writes.
"""
if not self.bor():
raise RecordBeginnigError(self.pos)
def _extend_buf(self):
"""
Grow write buffer as specified.
"""
self.buf_size += min(self.buf_size, self.buf_grow_limit)
new_buffer = bytearray(self.buf_size)
new_buffer[0:self.pos] = self.buffer[0:self.pos]
del self.buffer
self.buffer = new_buffer
def _check_buf_size(self, size, offset = None):
if offset is not None:
if offset <= 0:
p = size - offset
else:
p = self.pos + size + offset
else:
p = self.pos + size
while p > self.buf_size:
self._extend_buf()
def skip_bytes(self, nbytes, fill = None):
"""Skip a number of empty bytes, optionally initializing with `fill`."""
self._check_buf_size(nbytes)
if fill is not None:
if isinstance(fill, bytes):
self.buffer[self.pos:self.pos+nbytes] = (fill * (nbytes // len(fill) + 1))[0:nbytes]
else:
self.buffer[self.pos:self.pos+nbytes] = bytes(nbytes)
self.pos += nbytes
def put_data(self, data):
"""
Just put all the data into record.
"""
self.assert_bor()
size = len(data)
self._check_buf_size(size)
self.buffer[0:size] = data
self.pos = size
def put_n(self, data, dtype = None, order = 'F', offset = None):
"""
Write numpy object to buffer
KEYWORDS:
order - output order of array
`None` - use data default
default is `F`
dtype - output data type
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
if order is None:
if data.flags.fnc:
order = 'F'
else:
order = 'C'
assert order in ('F', 'C')
new_data = np.ndarray(data.shape,
dtype = dtype,
order = order)
new_data[()] = data[()]
data = new_data
if not data.flags.c_contiguous:
data = np.ndarray(data.shape,
dtype = data.dtype,
buffer = data.data,
order = 'C')
if self.swapbyteorder:
data.byteswap(True)
nbytes = data.nbytes
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
self.buffer[p:p+nbytes] = data.data.tobytes()
if offset is None:
self.pos += nbytes
def put_1d(self, data, dtype=np.float64, lead=0, tail=0, offset = None):
"""Write a 1D np array padded with 0 on either side as specified. Do not write padding.
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
items = prod(data.shape) - lead - tail
new_data = np.ndarray(items, dtype = dtype)
new_data[0:items] = data.flat[lead:lead+items]
data = new_data
if self.swapbyteorder:
data.byteswap(True)
nbytes = data.nbytes
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
self.buffer[p:p+nbytes] = data.data.tobytes()
if offset is None:
self.pos += nbytes
@staticmethod
def get_s_len(s, codec = 'cp437', strip = False):
"""
Return length of string after encoding.
If parameter is an array, return array of same shape.
If parameter is not an np.ndarray, return (nested) list.
PARAMETERS
codec - default: 'cp437' used to encode
"""
t = type(s)
if not isinstance(s, np.ndarray):
s = np.array(s, dtype = np.object)
l = np.ndarray(s.shape, dtype = np.int)
sflat = s.flat
lflat = l.flat
if strip:
for i in range(len(sflat)):
lflat[i] = len(sflat[i].strip().encode(codec))
else:
for i in range(len(sflat)):
lflat[i] = len(sflat[i].encode(codec))
if not issubclass(t, np.ndarray):
l = l.tolist()
else:
l = l[()]
return l
def put_s(self, s, length = None, fill = b'\x00', codec = 'cp437', order = 'F', strip = False, offset = None):
"""write string (array) to buffer
KWARGS
length - >0: length of string - fill/truncate
-1: find and max length
None: write actual length of each string
np.ndarray: length of strings if match shape
(TODO - extend in missing dimesions?)
fill - pattern (not encoded), memory data if None
codec - default 'cp437'
order - of data to write to buffer, default is 'F'
offset - relative to current location if positive
relative to beginning of buffer if negative (abs value)
`None` - no offset, advnace buffer
"""
if order is None:
if data.flags.fnc:
order = 'F'
else:
order = 'C'
assert order in ('F', 'C')
if not isinstance(s, np.ndarray):
s = np.array(s, dtype = np.object, order = order)
# create length array
try:
if length is None or length == -1:
l = self.get_s_len(s)
if length == -1:
l = np.max(l)
else:
l = length
except ValueError:
l = length
if not isinstance(l, np.ndarray):
l = np.array(l, dtype = np.int)
if prod(l.shape) == 1:
l = np.array(l.flat[0])
if l.shape == ():
l = np.tile(l, s.shape)
if order is 'F' and not s.flags.f_contiguous:
s = s.copy(order = 'F')
if order is 'F' and not l.flags.f_contiguous:
l = l.copy(order = 'F')
if not s.flags.c_contiguous:
s = np.ndarray(s.shape,
dtype = s.dtype,
buffer = s.data,
order = 'C')
if not l.flags.c_contiguous:
l = np.ndarray(l.shape,
dtype = l.dtype,
buffer = l.data,
order = 'C')
nbytes = np.sum(l)
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
if prod(l.shape) > 0:
lmax = np.max(l)
else:
lmax = 0
f = (fill * (lmax // len(fill) + 1))[:lmax]
for si, li in zip(s.flat, l.flat):
d = si.encode(codec)
n = min(len(d), li)
self.buffer[p:p+n] = d[:n]
if fill is not None and n < li:
self.buffer[p+n:p+li] = f[:li-n]
p += li
if offset is None:
self.pos += nbytes
assert p == self.pos, "inconsitency in written data"
# binary data
def put_buf(self, data, length = None, order = 'F', fill = b'\x00', offset = None):
"""Write array/list of raw data pieces of equal length to buffer.
ARGS:
data - array/scalar to be written
KEYWORDS:
length - of data pieces, truncate/fill, if `None` use max value
order - of junks in written array, default is 'F'
fill - default is \\x00
"""
if fill is not None:
assert isinstance(fill, bytes) , \
"Only bytes-type fill allowed."
if length is None:
dtype = np.dtype(np.bytes_)
else:
dtype = np.dtype((np.bytes_, length))
data = np.array(data,
dtype = dtype,
order = order)
if not fill in (None, b'\x00'):
if length is None:
length = data.dtype.itemsize
f = (fill * (length // len(fill) + 1)) [:length]
# array operations for concatenation do not work in numpy 1.11
d = data.flat
for i in range(prod(d.shape)):
d[i] += f
if not data.flags.c_contiguous:
data = np.ndarray(data.shape,
dtype = data.dtype,
buffer = data,
order = 'C')
nbytes = data.nbytes
self._check_buf_size(nbytes, offset)
if offset is not None:
if offset > 0:
p = self.pos + offset
else:
p = -offset
else:
p = self.pos
self.buffer[p:p+nbytes] = data.tobytes()
if offset is None:
self.pos += nbytes
# ========================================
# application-specific routines
# ========================================
def put_kep_parm(self, data):
"""Write a kepler parameter binary list with 32 bit integers."""
count = len(data)
value = np.zeros(
count,
dtype=np.float64)
ivalue = np.ndarray(
count,
buffer=value.data.cast('b'),
offset=4,
dtype=np.int32,
strides=8)
for i,d in enumerate(data):
if d.dtype == np.int32:
ivalue[i] = d
else:
value[i] = d
if self.swapbyteorder:
value.byteswap(True)
p = self.pos
nbytes = value.nbytes
self._check_buf_size(nbytes)
self.buffer[p:p+nbytes] = value.data.tobytes()
self.pos += nbytes
def put_kep_parm64(self, data):
"""Write a kepler parameter binary list with 64 bit integers."""
count = len(data)
if count == 0:
return
value = np.zeros(
count,
dtype=np.float64)
ivalue = np.ndarray(
count,
buffer=value.data.cast('b'),
dtype=np.int64)
for i,d in enumerate(data):
if d.dtype == np.int64:
ivalue[i] = d
else:
value[i] = d
if self.swapbyteorder:
value.byteswap(True)
p = self.pos
nbytes = value.nbytes
self._check_buf_size(nbytes)
self.buffer[p:p+nbytes] = value.data.tobytes()
self.pos += nbytes
def put_f8_kep_i4(self, data):
"""Write i4 in f8 array for kepler.
Pass the f8 dimension.
Half the space seems wasted the way KEPLER treats this, the
entire second half of each array is empty.
Here we shall just fill up the 2nd part of the array and
write the passed dimension.
Byteswap is only needed on i4 level (see read routine).
"""
self.put_n(data)
self.skip_bytes(data.nbytes, fill=b'\x00')
# dummy routine to allow data IO code below
def write(self):
"""
Provide interface for writing data to whereever.
"""
raise NotImplementedError("Writing Data not implemented.")
# =======================================================================
# data IO
# =======================================================================
def write_bytes(self, *args, **kwargs):
"""Write numpy empty bytes to file"""
self.assert_bor()
self.skip_bytes(*args, **kwargs)
self.write()
def write_data(self, *args, **kwargs):
"""Write plain buffer to file"""
self.assert_bor()
self.put_data(*args, **kwargs)
self.write()
def write_n(self, *args, **kwargs):
"""Write numpy scalar/array to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_n(*args, **kwargs)
self.write()
def write_1d(self, *args, **kwargs):
"""Write 1d padded numpy scalar/array to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_1d(*args, **kwargs)
self.write()
def write_s(self, *args, **kwargs):
"""Write (numpy) string (array) to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_s(*args, **kwargs)
self.write()
def write_buf(self, *args, **kwargs):
"""Write array/list of raw data pieces to file"""
self.assert_bor()
kwargs['offset'] = None
self.put_buf(*args, **kwargs)
self.write()
# application-specific routines
def write_kep_parm(self, data):
"""
write kepler parm array to file
"""
self.assert_bor()
self.put_kep_parm(data)
self.write()
def write_f8_kep_i4(self, data):
"""Write i4 in f8 array for kepler.
Pass the f8 dimension.
Half the space seems wasted the way KEPLER treats this, the
entire second half of each arry is empty.
Here we shall just fill up the 2nd part of the array and
write the passed dimension.
Byteswap is only needed on i4 level (see read routine).
"""
self.assert_bor()
self.put_f8_kep_i4(data)
self.write()
def _f_store_n(p = None, dt = None, **_kwargs):
def _f(self, *args, **kwargs):
kwargs['dtype'] = dt
kwargs.setdefault('order', 'F')
p(self, *args, **kwargs)
_f.dt = dt
_f.p = p
return _f
def _f_store_n1d(p = None, dt = None, **_kwargs):
def _f(self, data, *args, **kwargs):
kwargs['dtype'] = dt
p(self, data, *args, **kwargs)
_f.p = p
_f.dt = dt
return _f
def _f_store_n1d_(p = None, dt = None, lead = 0, tail = 0, **_kwargs):
def _f(self, data, *args, **kwargs):
kwargs['dtype'] = dt
kwargs['lead'] = lead
kwargs['tail'] = tail
p(self, data, *args, **kwargs)
_f.p = p
_f.dt = dt
_f.lead = lead
_f.tail = tail
return _f
for t in _np_types:
kw = dict(
cls = DataOutputBuffer,
t = t)
_set_method(
fn = _f_store_n,
parent = 'put_n',
name = 'put_{t}',
doc = """Write numpy {dn} to buffer at offset relative to
current position.\n
Does not advance buffer pointer.""",
**kw)
_set_method(
fn = _f_store_n,
parent = 'write_n',
name = 'write_{t}',
doc = "Write numpy {dn} array to file as record.",
**kw)
_set_method(
fn = _f_store_n1d,
parent = 'put_1d',
name = 'put_{t}_1d',
doc = """Write a 1D numpy {dn} array padded with 0 as specified to buffer. Padding is not written.""",
**kw)
_set_method(
fn = _f_store_n1d,
parent = 'write_1d',
name = 'write_{t}_1d',
doc = """Write a 1D numpy {dn} array padded with 0 as specified to file as record. Padding is not written.""",
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'put_1d',
name = 'put_{t}_1d_0',
doc = """Write a 1D numpy {dn} array padded with one element at beginning. Padding is not written.""",
extra_kw = dict(lead=1, tail=0),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'write_1d',
name = 'write_{t}_1d_0',
doc = """Write a 1D numpy {dn} array padded with one element at beginning to file as record. Padding is not written.""",
extra_kw = dict(lead=1, tail=0),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'put_1d',
name = 'put_{t}_1d_n',
doc = """Write a 1D numpy {dn} array padded with one element at end. Padding is not written.""",
extra_kw = dict(lead=0, tail=1),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'write_1d',
name = 'write_{t}_1d_n',
doc = """Write a 1D numpy {dn} array padded with one element at end to file as record. Padding is not written.""",
extra_kw = dict(lead=0, tail=1),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'put_1d',
name = 'put_{t}_1d_0n',
doc = """Write a 1D numpy {dn} array padded with one element at begiining and end each. Padding is not written.""",
extra_kw = dict(lead=1, tail=1),
**kw)
_set_method(
fn = _f_store_n1d_,
parent = 'write_1d',
name = 'write_{t}_1d_0n',
doc = """Write a 1D numpy {dn} array padded with one element at beginning and end each to file as record. Padding is not written.""",
extra_kw = dict(lead=1, tail=1),
**kw)
#=======================================================================
class DataWriter(DataOutputBuffer):
"""
Class for writing 'unformatted' binary files.
File names ending with .gz, .xz, .bz2 will be automatically
compressed.
For .gz this may fail, however, if the file is bigger than 2GB or
4GB.
"""
# TODO: add (file) truncate functionallity
def __init__(self,
filename, *args, **kwargs):
"""
Initialize data fields and open file.
Optionally the byte order can be specified.
The default is big endian.
"""
# TODO: Add append mode.
# not sure how/whether this will work with compressed files
super().__init__(*args, **kwargs)
self.open(filename)
def open(self, filename):
"""
Open the file for writing.
"""
self.filename = os.path.expandvars(os.path.expanduser(filename))
if self.filename.endswith('.gz'):
self.compressed = True
self.compress_mode = 'gz'
self.file = gzip.open(filename,'wb')
elif self.filename.endswith('.bz2'):
self.compressed = True
self.compress_mode = 'bz2'
self.file = bz2.BZ2File(filename,'wb',2**16)
elif self.filename.endswith('.xz'):
self.compressed = True
self.compress_mode = 'xz'
self.file = LZMAFile(self.filename,'wb')
else:
self.file = open(filename,'wb',-1)
self.compressed = False
self.compress_mode = None
self._init()
def _init(self):
"""Initialize the file position and data to empty."""
super()._init()
self.fpos = 0
def close(self):
"""Close the file."""
if self.pos != 0:
self.write()
self.file.close()
def rewind(self):
"""Rewind the file."""
self.file.seek(0, os.SEEK_SET)
self._init()
def write(self):
"""
Write the data record to file.
"""
self._write()
def _write(self):
"""
Write a data record to file.
"""
self.file.write(self.buffer[0:self.pos])
self.fpos += self.pos
self.pos = 0
# context manager interface
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.close()
return False
#=======================================================================
class FortranWriter(DataWriter, FortranSpecs):
"""
Class for writing 'unformatted' Fortran binary files.
Based on DataWriter, automatic compression support.
"""
# I still need to find out how to efficiently work with a buffer
# for writing internally - how to extend it, etc. The plan is to
# always write an entire record at once. Currently it would
# appear the largest FORTRAN files I have come with records for
# ppnb in KEPLER, 5000 isotopes * 2000 zones * 8 bytes ... 80 MB
# if pushing it ... usually < 16 MB. So we could start with that,
# then extend if ever needed.
# TODO - add functionallity for record-based backward skipping
# (potentially need to tuncate file on close)
def __init__(self, *args, reclen = 4, **kwargs):
self._set_reclen(reclen)
super().__init__(*args, **kwargs)
def _init(self):
"""Initialize the file position and data to empty."""
super()._init()
self.rpos = 0
def _set_byteorder(self, *args, **kwargs):
super()._set_byteorder(*args, **kwargs)
self.reclen_dtype = self.reclen_dtype.newbyteorder(self.byteorder)
def _write(self):
"""
Write a data record to file.
"""
self._write_reclen()
self.file.write(self.buffer[0:self.pos])
self._write_reclen()
self.fpos += self.pos + 2 * self.fortran_reclen
self.rpos += 1
self.pos = 0
def _write_reclen(self):
"""Write the record length."""
self.file.write(np.array(
self.pos,
dtype = self.reclen_dtype).data.tobytes())
| 31.324
| 142
| 0.519644
|
69221d87120b1fc09cb17bd910838805bd4a8321
| 5,521
|
py
|
Python
|
build/PureCloudPlatformClientV2/models/sms_config.py
|
cjohnson-ctl/platform-client-sdk-python
|
38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100
|
[
"MIT"
] | 10
|
2019-02-22T00:27:08.000Z
|
2021-09-12T23:23:44.000Z
|
libs/PureCloudPlatformClientV2/models/sms_config.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 5
|
2018-06-07T08:32:00.000Z
|
2021-07-28T17:37:26.000Z
|
libs/PureCloudPlatformClientV2/models/sms_config.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 6
|
2020-04-09T17:43:07.000Z
|
2022-02-17T08:48:05.000Z
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class SmsConfig(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SmsConfig - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'message_column': 'str',
'phone_column': 'str',
'sender_sms_phone_number': 'SmsPhoneNumberRef'
}
self.attribute_map = {
'message_column': 'messageColumn',
'phone_column': 'phoneColumn',
'sender_sms_phone_number': 'senderSmsPhoneNumber'
}
self._message_column = None
self._phone_column = None
self._sender_sms_phone_number = None
@property
def message_column(self):
"""
Gets the message_column of this SmsConfig.
The Contact List column specifying the message to send to the contact.
:return: The message_column of this SmsConfig.
:rtype: str
"""
return self._message_column
@message_column.setter
def message_column(self, message_column):
"""
Sets the message_column of this SmsConfig.
The Contact List column specifying the message to send to the contact.
:param message_column: The message_column of this SmsConfig.
:type: str
"""
self._message_column = message_column
@property
def phone_column(self):
"""
Gets the phone_column of this SmsConfig.
The Contact List column specifying the phone number to send a message to.
:return: The phone_column of this SmsConfig.
:rtype: str
"""
return self._phone_column
@phone_column.setter
def phone_column(self, phone_column):
"""
Sets the phone_column of this SmsConfig.
The Contact List column specifying the phone number to send a message to.
:param phone_column: The phone_column of this SmsConfig.
:type: str
"""
self._phone_column = phone_column
@property
def sender_sms_phone_number(self):
"""
Gets the sender_sms_phone_number of this SmsConfig.
A reference to the SMS Phone Number that will be used as the sender of a message.
:return: The sender_sms_phone_number of this SmsConfig.
:rtype: SmsPhoneNumberRef
"""
return self._sender_sms_phone_number
@sender_sms_phone_number.setter
def sender_sms_phone_number(self, sender_sms_phone_number):
"""
Sets the sender_sms_phone_number of this SmsConfig.
A reference to the SMS Phone Number that will be used as the sender of a message.
:param sender_sms_phone_number: The sender_sms_phone_number of this SmsConfig.
:type: SmsPhoneNumberRef
"""
self._sender_sms_phone_number = sender_sms_phone_number
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.169399
| 89
| 0.604238
|
bce1489f133afee12e58137c1434055371d5219d
| 2,830
|
py
|
Python
|
data_output.py
|
woctezuma/playing-with-simpleitk
|
a9ff669afdacdced53372ab0f13a21d71f1cf13c
|
[
"MIT"
] | 6
|
2019-11-25T17:58:53.000Z
|
2022-02-17T19:32:30.000Z
|
data_output.py
|
woctezuma/playing-with-simpleitk
|
a9ff669afdacdced53372ab0f13a21d71f1cf13c
|
[
"MIT"
] | 1
|
2019-11-25T18:15:05.000Z
|
2019-11-25T18:35:51.000Z
|
data_output.py
|
woctezuma/playing-with-simpleitk
|
a9ff669afdacdced53372ab0f13a21d71f1cf13c
|
[
"MIT"
] | 5
|
2020-09-15T07:04:20.000Z
|
2022-01-23T23:42:50.000Z
|
from pathlib import Path
from utils import get_data_root, get_ground_truth_keyword
def get_image_modality_convention_dict():
# We index each modality used in our dataset.
modality_dict = dict()
# The first modality is indexed with 0, following convention in:
# https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunet/dataset_conversion
modality_dict['ct'] = 0
return modality_dict
def get_output_save_folder():
# This is the folder where the output will be saved.
output_save_folder = 'output/'
return output_save_folder
def get_output_folder_structure():
# Convention used in:
# https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunet/dataset_conversion
output_folder_structure = dict()
output_folder_structure['training'] = dict()
output_folder_structure['training']['images'] = 'imagesTr/'
output_folder_structure['training']['ground_truth'] = 'labelsTr/'
output_folder_structure['test'] = dict()
output_folder_structure['test']['images'] = 'imagesTs/'
return output_folder_structure
def get_image_modality_convention(modality):
# Convention used in:
# https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunet/dataset_conversion
modality_dict = get_image_modality_convention_dict()
try:
modality_no = modality_dict[modality]
image_modality_convention = '_{:04.0f}'.format(modality_no)
except (KeyError, TypeError) as e:
image_modality_convention = ''
return image_modality_convention
def get_data_file_prefix():
# Convention used in:
# https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunet/dataset_conversion
data_file_prefix = 'patientID'
return data_file_prefix
def get_output_folder(data_root=None,
modality=None):
if data_root is None:
data_root = get_data_root()
output_folder_structure = get_output_folder_structure()
if modality is None:
folder = ''
elif modality == get_ground_truth_keyword():
folder = output_folder_structure['training']['ground_truth']
else:
folder = output_folder_structure['training']['images']
output_folder = data_root + get_output_save_folder() + folder
Path(output_folder).mkdir(parents=True, exist_ok=True)
return output_folder
def get_output_image_name(patient_no,
output_file_format,
data_root=None,
modality=None):
output_folder = get_output_folder(data_root, modality=modality)
file_name = get_data_file_prefix() + str(patient_no) + get_image_modality_convention(modality) + output_file_format
output_image_name = output_folder + file_name
return output_image_name
def main():
return True
if __name__ == '__main__':
main()
| 26.698113
| 119
| 0.704947
|
a60912286ff6ab0ad5b417fe4e5ba725ef8001c3
| 2,220
|
py
|
Python
|
mailchat/forms.py
|
MachineThing/Portfolio-SiteV2
|
5ffff652e0d19d49585cadb0df4521933df4d41c
|
[
"MIT"
] | 2
|
2021-05-29T04:52:50.000Z
|
2021-07-04T15:40:27.000Z
|
mailchat/forms.py
|
MachineThing/Portfolio-SiteV2
|
5ffff652e0d19d49585cadb0df4521933df4d41c
|
[
"MIT"
] | 9
|
2021-02-13T20:19:53.000Z
|
2021-07-04T15:43:57.000Z
|
mailchat/forms.py
|
MachineThing/Portfolio-SiteV2
|
5ffff652e0d19d49585cadb0df4521933df4d41c
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.forms import Form
from django.utils.crypto import get_random_string
from unidecode import unidecode
import country_list
from . import fields
from .models import Email
def return_countries():
countries = country_list.countries_for_language('EN')[:] # Copy the list to avoid modifying the master list
filter_list = lambda my_list, target:my_list.index(list(filter(lambda iterable:target.upper() in iterable, my_list))[0]) # Find index to the target
# Remove "troll" countries
troll_countries = ['KP', 'AQ']
for count in troll_countries:
countries.pop(filter_list(countries, count))
# Reverse sort (while treating diacritics as normal alphabet or it will go wrong)
countries.sort(key=lambda x: unidecode(x[1]).upper(), reverse=True)
# Add some countries to the front
countries += [countries.pop(filter_list(countries, 'GB'))]
countries += [countries.pop(filter_list(countries, 'US'))]
countries.reverse()
return countries
class MailForm(Form):
name = fields.CharField(label='Name:')
company = fields.CharField(label='School/Company:')
country = fields.SelectField(label='Country:', choices=return_countries())
email = fields.EmailField(label='Email address:')
subject = fields.CharField(label='Subject:')
message = fields.CharField(big=True, label='Message:', attrs={'rows':3})
def save(self, captcha_score):
data = self.cleaned_data
model = Email()
model.verified = False
model.sendee = data['email']
model.sending_date = datetime.now()
if captcha_score > 1 or captcha_score < 0:
model.captcha_score = 0.0 # Sets the captcha_score to 0 if the score is fishy
else:
model.captcha_score = round(captcha_score, 2)
model.verify_url = get_random_string(15)
model.message = """---Header Start---
Sendee: {}
Sending Date: {}
Company: {}
Name: {}
Country: {}
Captcha Score: {}
--- Header End ---
Subject: {}
{}
""".format(data['email'], model.sending_date, data['company'], data['name'], data['country'], model.captcha_score, data['subject'], data['message'])
model.save()
return model
| 37
| 151
| 0.683784
|
2d90d358b50d2ef42bddafb660be490607ecd4b9
| 8,489
|
py
|
Python
|
test/functional/test_framework/key.py
|
kenfmcoin/kenfmcoin
|
1fa48487593233f2066757dc54f48b2349e2d9db
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/key.py
|
kenfmcoin/kenfmcoin
|
1fa48487593233f2066757dc54f48b2349e2d9db
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/key.py
|
kenfmcoin/kenfmcoin
|
1fa48487593233f2066757dc54f48b2349e2d9db
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011 Sam Rushing
"""ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-kenfmcoinlib.
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey(object):
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash, low_s = True):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
assert mb_sig.raw[0] == 0x30
assert mb_sig.raw[1] == sig_size0.value - 2
total_size = mb_sig.raw[1]
assert mb_sig.raw[2] == 2
r_size = mb_sig.raw[3]
assert mb_sig.raw[4 + r_size] == 2
s_size = mb_sig.raw[5 + r_size]
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
return mb_sig.raw[:sig_size0.value]
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| 36.433476
| 130
| 0.687831
|
bcf0169a7e492b74a2257edeb14c0d14dfd9d64e
| 3,902
|
py
|
Python
|
benchmark/startCirq1454.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq1454.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq1454.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=51
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[1])) # number=26
c.append(cirq.CZ.on(input_qubit[4],input_qubit[1])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=28
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=34
c.append(cirq.CZ.on(input_qubit[4],input_qubit[1])) # number=35
c.append(cirq.Z.on(input_qubit[4])) # number=46
c.append(cirq.rx(0.8011061266653969).on(input_qubit[2])) # number=37
c.append(cirq.H.on(input_qubit[1])) # number=36
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=48
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=49
c.append(cirq.H.on(input_qubit[0])) # number=50
c.append(cirq.X.on(input_qubit[0])) # number=39
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=42
c.append(cirq.X.on(input_qubit[1])) # number=43
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=44
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.Y.on(input_qubit[1])) # number=45
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=22
c.append(cirq.X.on(input_qubit[4])) # number=47
c.append(cirq.X.on(input_qubit[0])) # number=23
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=30
c.append(cirq.X.on(input_qubit[1])) # number=31
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=32
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.H.on(input_qubit[4])) # number=29
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1454.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 38.254902
| 77
| 0.652486
|
6a3aa0f471befaf396ddcafeecdcee339c1d2cc1
| 17,766
|
py
|
Python
|
gameState.py
|
LimIvan336/chessengine-with-pygame
|
9ff10e90b101de1c2b4ab4f907e4de2138f824ce
|
[
"MIT"
] | 1
|
2021-06-05T08:12:55.000Z
|
2021-06-05T08:12:55.000Z
|
gameState.py
|
LimIvan336/chessengine-with-pygame
|
9ff10e90b101de1c2b4ab4f907e4de2138f824ce
|
[
"MIT"
] | null | null | null |
gameState.py
|
LimIvan336/chessengine-with-pygame
|
9ff10e90b101de1c2b4ab4f907e4de2138f824ce
|
[
"MIT"
] | null | null | null |
#Store the state, info of the game
from castleRights import CastleRights
from move import Move
class GameState():
def __init__(self):
#initialize 8x8 chess board
#2 characters, bK - black King
#"--" represents empty slot
self.board = [
["bR", "bN", "bB", "bQ", "bK", "bB", "bN", "bR"],
["bp", "bp", "bp", "bp", "bp", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "wp", "wp", "wp", "wp", "wp"],
["wR", "wN", "wB", "wQ", "wK", "wB", "wN", "wR"]
]
self.white_to_move = True #white starts to move
self.move_log = []
self.move_functions = {"p": self.get_pawn_moves, "R": self.get_rook_moves, "N": self.get_knight_moves,
"B": self.get_bishop_moves, "Q": self.get_queen_moves, "K": self.get_king_moves}
#for checking valid moves (check, checkmate) and castling moves
self.white_king_location = (7,4)
self.black_king_location = (0,4)
self.checkmate = False
self.stalemate = False
#en passant
self.enpassant_possible = () #coord where en passant is possible
#castling rights
self.current_castling_rights = CastleRights(True, True, True, True)
self.castling_rights_log = [CastleRights(self.current_castling_rights.wks, self.current_castling_rights.bks,
self.current_castling_rights.wqs, self.current_castling_rights.bqs)] #to undo
#executes a move, doesnt work on en passant, castling, pawn promotion
def make_move(self, move):
self.board[move.start_row][move.start_col] ="--"
self.board[move.end_row][move.end_col] = move.piece_moved
self.move_log.append(move) #to undo it later
#update king's location (for checking valid moves)
if move.piece_moved == "wK":
self.white_king_location = (move.end_row, move.end_col)
elif move.piece_moved == "bK":
self.black_king_location = (move.end_row, move.end_col)
#pawn promotion make to queen
if move.is_pawn_promotion:
self.board[move.end_row][move.end_col] = move.piece_moved[0] + "Q"
#en passant
if move.is_enpassant_move:
self.board[move.start_row][move.end_col] = "--" #capturing pawn
#update enpassant_possible var
if move.piece_moved[1] == "p" and abs(move.start_row - move.end_row) == 2: #only works on 2 square pawn adv
self.enpassant_possible = ((move.start_row + move.end_row)//2, move.start_col)
else:
self.enpassant_possible = () #reset back to "no" moves
#castling
if move.is_castle_move:
if move.end_col - move.start_col == 2: #this is a king side castle moves
self.board[move.end_row][move.end_col-1] = self.board[move.end_row][move.end_col + 1] #moves rook
self.board[move.end_row][move.end_col+1] = "--" #removes old rook
else: #this is a queen side castle move
self.board[move.end_row][move.end_col+1] = self.board[move.end_row][move.end_col - 2] #moves rook
self.board[move.end_row][move.end_col-2] = "--"
#update castling rights - when rook / king moves
self.update_castling_rights(move)
self.castling_rights_log.append(CastleRights(self.current_castling_rights.wks, self.current_castling_rights.bks,
self.current_castling_rights.wqs, self.current_castling_rights.bqs)) #to undo later
self.white_to_move = not self.white_to_move #swap players
def update_castling_rights(self, move):
#check white king
if move.piece_moved == "wK":
self.current_castling_rights.wks = False
self.current_castling_rights.wqs = False
#check black king
elif move.piece_moved == "bK":
self.current_castling_rights.bks = False
self.current_castling_rights.bqs = False
#check rooks
elif move.piece_moved == "wR":
if move.start_row == 7:
if move.start_col == 0: #left rook
self.current_castling_rights.wqs = False
elif move.start_col == 7: #right rook
self.current_castling_rights.wks = False
elif move.piece_moved == "bR":
if move.start_row == 0:
if move.start_col == 0: #left rook
self.current_castling_rights.bqs = False
elif move.start_col == 7: #right rook
self.current_castling_rights.bks = False
#check if rook is taken
if move.piece_captured == "wR":
if move.end_row == 7:
if move.end_col == 0: #left rook
self.current_castling_rights.wqs = False
elif move.end_col == 7: #right rook
self.current_castling_rights.wks = False
elif move.piece_captured == "bR":
if move.end_col == 0:
if move.end_col == 0: #left rook
self.current_castling_rights.bqs = False
elif move.end_col == 7: #right rook
self.current_castling_rights.bks = False
#undo last move
def undo_move(self):
if len(self.move_log) != 0: #make sure theres a move to undo
move = self.move_log.pop()
self.board[move.start_row][move.start_col] = move.piece_moved
self.board[move.end_row][move.end_col] = move.piece_captured
#undo king's location (for checking valid moves)
if move.piece_moved == "wK":
self.white_king_location = (move.start_row, move.start_col)
elif move.piece_moved == "bK":
self.black_king_location = (move.start_row, move.start_col)
#undo en passant move
if move.is_enpassant_move:
self.board[move.end_row][move.end_col] = "--"
self.board[move.start_row][move.end_col] = move.piece_captured
self.enpassant_possible = (move.end_row, move.end_col) #make sure enpassant_possible is set back
#undo 2 square pawn advance
if move.piece_moved[1] == "p" and abs(move.start_row - move.end_row) ==2:
self.enpassant_possible = ()
#undo castling rights
self.castling_rights_log.pop() #remove last castling rights
new_rights = self.castling_rights_log[-1]
self.current_castling_rights = CastleRights(new_rights.wks, new_rights.bks, new_rights.wqs, new_rights.bqs) #set current_castling_rights to last castling rights
#undo castle moves
if move.is_castle_move:
if move.end_col - move.start_col == 2: #kingside
self.board[move.end_row][move.end_col + 1] = self.board[move.end_row][move.end_col - 1] #moves rook back
self.board[move.end_row][move.end_col -1] = "--"
else: #queenside
self.board[move.end_row][move.end_col -2] = self.board[move.end_row][move.end_col + 1] #moves rook back
self.board[move.end_row][move.end_col +1] = "--"
self.white_to_move = not self.white_to_move
#All moves considering checks (valid moves)
def get_valid_moves(self):
#Debugging castling_rights_log
# for log in self.castling_rights_log:
# print(log.wks, log.wqs, log.bks, log.bqs, end=", ")
# print()
temp_enpassant_possible = self.enpassant_possible #save value when generating all possible moves
temp_castling_rights = CastleRights(self.current_castling_rights.wks, self.current_castling_rights.bks,
self.current_castling_rights.wqs, self.current_castling_rights.bqs)
#generate all possible moves
moves = self.get_all_possible_moves()
if self.white_to_move:
self.get_castling_moves(self.white_king_location[0], self.white_king_location[1], moves)
else:
self.get_castling_moves(self.black_king_location[0], self.black_king_location[1], moves)
#for each move, make that move
for idx in range(len(moves)-1,-1,-1): #backwards to reduce bugs
self.make_move(moves[idx])
self.white_to_move = not self.white_to_move #swap turns because self.make_move switch turns
#generate oponent's move
#for each opp moves, see if king is attacked
if self.in_check():
moves.remove(moves[idx])
self.white_to_move = not self.white_to_move
self.undo_move()
#if king attacked, not a valid move, remove from moves
#check if checkmate/stalemate
if len(moves) == 0:
if self.in_check():
self.checkmate = True
print("CHECKMATE")
else:
self.stalemate = True
print("STALEMATE")
else: #undo move
self.checkmate = False
self.stalemate = False
self.enpassant_possible = temp_enpassant_possible
self.current_castling_rights = temp_castling_rights
#Debugging
# for move in moves:
# print(move.piece_moved, move.piece_captured, move.move_id)
return moves
#All moves that dont consider checks
def get_all_possible_moves(self):
possible_moves = []
for row in range(len(self.board)):
for col in range(len(self.board[row])):
turn = self.board[row][col][0] #checking whose turn it is
if (turn == "w" and self.white_to_move) or (turn == "b" and not self.white_to_move):
piece = self.board[row][col][1]
self.move_functions[piece](row, col, possible_moves) #call moves for all pieces
return possible_moves
#Pieces moves
def get_pawn_moves(self, row, col, moves):
if self.white_to_move: #white
if self.board[row-1][col] == "--": #if is empty
moves.append(Move((row,col) , (row-1,col) , self.board))
if row == 6 and self.board[row-2][col] == "--": #2 square pawn move
moves.append(Move((row,col) , (row-2,col) , self.board))
if col - 1 >= 0: #forward left
if self.board[row-1][col-1][0] == "b": #capture black
moves.append(Move((row,col) , (row-1,col-1) , self.board))
elif ((row-1, col-1) == self.enpassant_possible):
moves.append(Move((row,col) , (row-1,col-1) , self.board, is_enpassant_move=True))
if col + 1 < 8: #forward right
if self.board[row-1][col+1][0] == "b": #capture black
moves.append(Move((row,col) , (row-1,col+1) , self.board))
elif ((row-1, col+1) == self.enpassant_possible):
moves.append(Move((row,col) , (row-1,col+1) , self.board, is_enpassant_move=True))
else: #black
if self.board[row+1][col] == "--": #if is empty
moves.append(Move((row,col) , (row+1,col) , self.board))
if row == 1 and self.board[row+2][col] == "--": #2 square pawn move
moves.append(Move((row,col) , (row+2,col) , self.board))
if col - 1 >= 0: #downward left
if self.board[row+1][col-1][0] == "w": #capture white
moves.append(Move((row,col) , (row+1,col-1) , self.board))
elif ((row+1, col-1) == self.enpassant_possible):
moves.append(Move((row,col) , (row+1,col-1) , self.board, is_enpassant_move=True))
if col + 1 < 8: #downward right
if self.board[row+1][col+1][0] == "w": #capture white
moves.append(Move((row,col) , (row+1,col+1) , self.board))
elif ((row+1, col+1) == self.enpassant_possible):
moves.append(Move((row,col) , (row+1,col+1) , self.board, is_enpassant_move=True))
def get_rook_moves(self, row, col, moves):
possible_directions = ((-1,0), (0,-1), (1,0), (0,1)) #(change_row, change_col)
enemy_color = "b" if self.white_to_move else "w"
for direction in possible_directions:
for i in range(1,8):
end_row = row + direction[0] * i
end_col = col + direction[1] * i
if 0 <= end_row < 8 and 0 <= end_col < 8:
end_piece = self.board[end_row][end_col]
if end_piece == "--":
moves.append(Move((row,col) , (end_row , end_col) , self.board))
elif end_piece[0] == enemy_color:
moves.append(Move((row,col) , (end_row , end_col) , self.board))
break
else: #friendly fire
break
else: #off board
break
def get_knight_moves(self, row, col, moves):
l_moves = ((-2,-1), (-2,1), (2,-1), (2, 1), (-1,-2), (-1,2), (1,-2), (1,2))
ally_color = "w" if self.white_to_move else "b"
for l_move in l_moves:
end_row = row + l_move[0]
end_col = col + l_move[1]
if 0 <= end_row < 8 and 0 <= end_col <8: #stays on board
end_piece = self.board[end_row][end_col]
if end_piece[0] != ally_color: #empty/enemy
moves.append(Move((row,col) , (end_row , end_col) , self.board))
def get_bishop_moves(self, row, col, moves):
possible_directions = ((-1,-1), (-1,1), (1,-1), (1,1)) #(change_row, change_col)
enemy_color = "b" if self.white_to_move else "w"
for direction in possible_directions:
for i in range(1,8):
end_row = row + direction[0] * i
end_col = col + direction[1] * i
if 0 <= end_row < 8 and 0 <= end_col < 8:
end_piece = self.board[end_row][end_col]
if end_piece == "--":
moves.append(Move((row,col) , (end_row , end_col) , self.board))
elif end_piece[0] == enemy_color:
moves.append(Move((row,col) , (end_row , end_col) , self.board))
break
else: #friendly fire
break
else: #off board
break
def get_queen_moves(self, row, col, moves):
self.get_rook_moves(row, col, moves)
self.get_bishop_moves(row, col, moves)
def get_king_moves(self, row, col, moves):
king_moves = ((-1,0), (0,-1), (1,0), (0,1), (-1,-1), (-1,1), (1,-1), (1,1))
ally_color = "w" if self.white_to_move else "b"
for king_move in king_moves:
end_row = row + king_move[0]
end_col = col + king_move[1]
if 0 <= end_row < 8 and 0 <= end_col < 8:
end_piece = self.board[end_row][end_col]
if end_piece[0] != ally_color:
moves.append(Move((row,col) , (end_row , end_col) , self.board))
#get castling moves
def get_castling_moves(self, row, col, moves):
#if king in check
if self.square_under_attacked(row, col):
return #cant castle if in check
#king side
if (self.white_to_move and self.current_castling_rights.wks) or (not self.white_to_move and self.current_castling_rights.bks):
self.get_kingside_castling_moves(row, col, moves)
#queen side
if (self.white_to_move and self.current_castling_rights.wqs) or (not self.white_to_move and self.current_castling_rights.bqs):
self.get_queenside_castling_moves(row, col, moves)
def get_kingside_castling_moves(self, row, col, moves):
#check 2 squares
if self.board[row][col+1] == "--" and self.board[row][col+2] == "--":
if not self.square_under_attacked(row, col+1) and not self.square_under_attacked(row, col+2):
moves.append(Move((row, col), (row, col+2), self.board, is_castle_move = True))
def get_queenside_castling_moves(self, row, col, moves):
#check 3 square
if self.board[row][col-1] == "--" and self.board[row][col-2] == "--" and self.board[row][col-3] == "--":
if not self.square_under_attacked(row, col-1) and not self.square_under_attacked(row, col-2):
moves.append(Move((row, col), (row, col-2), self.board, is_castle_move = True))
#determine if player is in check
def in_check(self):
if self.white_to_move:
return self.square_under_attacked(self.white_king_location[0], self.white_king_location[1])
else:
return self.square_under_attacked(self.black_king_location[0], self.black_king_location[1])
#determine if the enemy can attack the square (row, col)
def square_under_attacked(self, row, col):
self.white_to_move = not self.white_to_move #switch to opponent
opp_moves = self.get_all_possible_moves()
for move in opp_moves:
if move.end_row == row and move.end_col == col:
self.white_to_move = not self.white_to_move #switch back
return True #square is under attack
self.white_to_move = not self.white_to_move
return False
| 45.78866
| 172
| 0.565687
|
8d58c91dce978c78b0a4b3e99e226a010a9af887
| 17,885
|
py
|
Python
|
web-crawl/runtime/lib/python3.4/site-packages/w3lib/url.py
|
yangsongx/refrence
|
58376b97d0d1364b6a92dad89f5125a0d6e009e0
|
[
"Apache-2.0"
] | 1
|
2017-01-14T15:20:35.000Z
|
2017-01-14T15:20:35.000Z
|
web-crawl/runtime/lib/python3.4/site-packages/w3lib/url.py
|
yangsongx/refrence
|
58376b97d0d1364b6a92dad89f5125a0d6e009e0
|
[
"Apache-2.0"
] | 1
|
2017-01-07T17:41:51.000Z
|
2017-05-18T08:38:42.000Z
|
env/lib/python3.6/site-packages/w3lib/url.py
|
Tokyo-Buffalo/tokyosouth
|
0fcbff35b6bfc9944b5196a6b059e4549d989c06
|
[
"MIT"
] | 1
|
2019-01-29T06:37:19.000Z
|
2019-01-29T06:37:19.000Z
|
"""
This module contains general purpose URL functions not found in the standard
library.
"""
import codecs
import os
import re
import posixpath
import warnings
import six
from six.moves.urllib.parse import (urljoin, urlsplit, urlunsplit,
urldefrag, urlencode, urlparse,
quote, parse_qs, parse_qsl,
ParseResult, unquote, urlunparse)
from six.moves.urllib.request import pathname2url, url2pathname
from w3lib.util import to_bytes, to_native_str, to_unicode
# error handling function for bytes-to-Unicode decoding errors with URLs
def _quote_byte(error):
return (to_unicode(quote(error.object[error.start:error.end])), error.end)
codecs.register_error('percentencode', _quote_byte)
# Python 2.x urllib.always_safe become private in Python 3.x;
# its content is copied here
_ALWAYS_SAFE_BYTES = (b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789' b'_.-')
def urljoin_rfc(base, ref, encoding='utf-8'):
r"""
.. warning::
This function is deprecated and will be removed in future.
It is not supported with Python 3.
Please use ``urlparse.urljoin`` instead.
Same as urlparse.urljoin but supports unicode values in base and ref
parameters (in which case they will be converted to str using the given
encoding).
Always returns a str.
>>> import w3lib.url
>>> w3lib.url.urljoin_rfc('http://www.example.com/path/index.html', u'/otherpath/index2.html')
'http://www.example.com/otherpath/index2.html'
>>>
>>> # Note: the following does not work in Python 3
>>> w3lib.url.urljoin_rfc(b'http://www.example.com/path/index.html', u'fran\u00e7ais/d\u00e9part.htm') # doctest: +SKIP
'http://www.example.com/path/fran\xc3\xa7ais/d\xc3\xa9part.htm'
>>>
"""
warnings.warn("w3lib.url.urljoin_rfc is deprecated, use urlparse.urljoin instead",
DeprecationWarning)
str_base = to_bytes(base, encoding)
str_ref = to_bytes(ref, encoding)
return urljoin(str_base, str_ref)
_reserved = b';/?:@&=+$|,#' # RFC 3986 (Generic Syntax)
_unreserved_marks = b"-_.!~*'()" # RFC 3986 sec 2.3
_safe_chars = _ALWAYS_SAFE_BYTES + b'%' + _reserved + _unreserved_marks
def safe_url_string(url, encoding='utf8', path_encoding='utf8'):
"""Convert the given URL into a legal URL by escaping unsafe characters
according to RFC-3986.
If a bytes URL is given, it is first converted to `str` using the given
encoding (which defaults to 'utf-8'). 'utf-8' encoding is used for
URL path component (unless overriden by path_encoding), and given
encoding is used for query string or form data.
When passing an encoding, you should use the encoding of the
original page (the page from which the URL was extracted from).
Calling this function on an already "safe" URL will return the URL
unmodified.
Always returns a native `str` (bytes in Python2, unicode in Python3).
"""
# Python3's urlsplit() chokes on bytes input with non-ASCII chars,
# so let's decode (to Unicode) using page encoding:
# - it is assumed that a raw bytes input comes from a document
# encoded with the supplied encoding (or UTF8 by default)
# - if the supplied (or default) encoding chokes,
# percent-encode offending bytes
parts = urlsplit(to_unicode(url, encoding=encoding,
errors='percentencode'))
# IDNA encoding can fail for too long labels (>63 characters)
# or missing labels (e.g. http://.example.com)
try:
netloc = parts.netloc.encode('idna')
except UnicodeError:
netloc = parts.netloc
# quote() in Python2 return type follows input type;
# quote() in Python3 always returns Unicode (native str)
return urlunsplit((
to_native_str(parts.scheme),
to_native_str(netloc).rstrip(':'),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
# encoding of query and fragment follows page encoding
# or form-charset (if known and passed)
quote(to_bytes(parts.query, encoding), _safe_chars),
quote(to_bytes(parts.fragment, encoding), _safe_chars),
))
_parent_dirs = re.compile(r'/?(\.\./)+')
def safe_download_url(url):
""" Make a url for download. This will call safe_url_string
and then strip the fragment, if one exists. The path will
be normalised.
If the path is outside the document root, it will be changed
to be within the document root.
"""
safe_url = safe_url_string(url)
scheme, netloc, path, query, _ = urlsplit(safe_url)
if path:
path = _parent_dirs.sub('', posixpath.normpath(path))
if url.endswith('/') and not path.endswith('/'):
path += '/'
else:
path = '/'
return urlunsplit((scheme, netloc, path, query, ''))
def is_url(text):
return text.partition("://")[0] in ('file', 'http', 'https')
def url_query_parameter(url, parameter, default=None, keep_blank_values=0):
"""Return the value of a url parameter, given the url and parameter name
General case:
>>> import w3lib.url
>>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "id")
'200'
>>>
Return a default value if the parameter is not found:
>>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault")
'mydefault'
>>>
Returns None if `keep_blank_values` not set or 0 (default):
>>> w3lib.url.url_query_parameter("product.html?id=", "id")
>>>
Returns an empty string if `keep_blank_values` set to 1:
>>> w3lib.url.url_query_parameter("product.html?id=", "id", keep_blank_values=1)
''
>>>
"""
queryparams = parse_qs(
urlsplit(str(url))[3],
keep_blank_values=keep_blank_values
)
return queryparams.get(parameter, [default])[0]
def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):
"""Clean URL arguments leaving only those passed in the parameterlist keeping order
>>> import w3lib.url
>>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ('id',))
'product.html?id=200'
>>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id', 'name'])
'product.html?id=200&name=wired'
>>>
If `unique` is ``False``, do not remove duplicated keys
>>> w3lib.url.url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ['d'], unique=False)
'product.html?d=1&d=2&d=3'
>>>
If `remove` is ``True``, leave only those **not in parameterlist**.
>>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id'], remove=True)
'product.html?foo=bar&name=wired'
>>> w3lib.url.url_query_cleaner("product.html?id=2&foo=bar&name=wired", ['id', 'foo'], remove=True)
'product.html?name=wired'
>>>
By default, URL fragments are removed. If you need to preserve fragments,
pass the ``keep_fragments`` argument as ``True``.
>>> w3lib.url.url_query_cleaner('http://domain.tld/?bla=123#123123', ['bla'], remove=True, keep_fragments=True)
'http://domain.tld/#123123'
"""
if isinstance(parameterlist, (six.text_type, bytes)):
parameterlist = [parameterlist]
url, fragment = urldefrag(url)
base, _, query = url.partition('?')
seen = set()
querylist = []
for ksv in query.split(sep):
k, _, _ = ksv.partition(kvsep)
if unique and k in seen:
continue
elif remove and k in parameterlist:
continue
elif not remove and k not in parameterlist:
continue
else:
querylist.append(ksv)
seen.add(k)
url = '?'.join([base, sep.join(querylist)]) if querylist else base
if keep_fragments:
url += '#' + fragment
return url
def add_or_replace_parameter(url, name, new_value):
"""Add or remove a parameter to a given url
>>> import w3lib.url
>>> w3lib.url.add_or_replace_parameter('http://www.example.com/index.php', 'arg', 'v')
'http://www.example.com/index.php?arg=v'
>>> w3lib.url.add_or_replace_parameter('http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3', 'arg4', 'v4')
'http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3&arg4=v4'
>>> w3lib.url.add_or_replace_parameter('http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3', 'arg3', 'v3new')
'http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3new'
>>>
"""
parsed = urlsplit(url)
args = parse_qsl(parsed.query, keep_blank_values=True)
new_args = []
found = False
for name_, value_ in args:
if name_ == name:
new_args.append((name_, new_value))
found = True
else:
new_args.append((name_, value_))
if not found:
new_args.append((name, new_value))
query = urlencode(new_args)
return urlunsplit(parsed._replace(query=query))
def path_to_file_uri(path):
"""Convert local filesystem path to legal File URIs as described in:
http://en.wikipedia.org/wiki/File_URI_scheme
"""
x = pathname2url(os.path.abspath(path))
if os.name == 'nt':
x = x.replace('|', ':') # http://bugs.python.org/issue5861
return 'file:///%s' % x.lstrip('/')
def file_uri_to_path(uri):
"""Convert File URI to local filesystem path according to:
http://en.wikipedia.org/wiki/File_URI_scheme
"""
uri_path = urlparse(uri).path
return url2pathname(uri_path)
def any_to_uri(uri_or_path):
"""If given a path name, return its File URI, otherwise return it
unmodified
"""
if os.path.splitdrive(uri_or_path)[0]:
return path_to_file_uri(uri_or_path)
u = urlparse(uri_or_path)
return uri_or_path if u.scheme else path_to_file_uri(uri_or_path)
__all__ = ["add_or_replace_parameter",
"any_to_uri",
"canonicalize_url",
"file_uri_to_path",
"is_url",
"path_to_file_uri",
"safe_download_url",
"safe_url_string",
"url_query_cleaner",
"url_query_parameter",
# this last one is deprecated ; include it to be on the safe side
"urljoin_rfc"]
def _safe_ParseResult(parts, encoding='utf8', path_encoding='utf8'):
# IDNA encoding can fail for too long labels (>63 characters)
# or missing labels (e.g. http://.example.com)
try:
netloc = parts.netloc.encode('idna')
except UnicodeError:
netloc = parts.netloc
return (
to_native_str(parts.scheme),
to_native_str(netloc),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
quote(to_bytes(parts.params, path_encoding), _safe_chars),
# encoding of query and fragment follows page encoding
# or form-charset (if known and passed)
quote(to_bytes(parts.query, encoding), _safe_chars),
quote(to_bytes(parts.fragment, encoding), _safe_chars)
)
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
encoding=None):
r"""Canonicalize the given url by applying the following procedures:
- sort query arguments, first by key, then by value
- percent encode paths ; non-ASCII characters are percent-encoded
using UTF-8 (RFC-3986)
- percent encode query arguments ; non-ASCII characters are percent-encoded
using passed `encoding` (UTF-8 by default)
- normalize all spaces (in query arguments) '+' (plus symbol)
- normalize percent encodings case (%2f -> %2F)
- remove query arguments with blank values (unless `keep_blank_values` is True)
- remove fragments (unless `keep_fragments` is True)
The url passed can be bytes or unicode, while the url returned is
always a native str (bytes in Python 2, unicode in Python 3).
>>> import w3lib.url
>>>
>>> # sorting query arguments
>>> w3lib.url.canonicalize_url('http://www.example.com/do?c=3&b=5&b=2&a=50')
'http://www.example.com/do?a=50&b=2&b=5&c=3'
>>>
>>> # UTF-8 conversion + percent-encoding of non-ASCII characters
>>> w3lib.url.canonicalize_url(u'http://www.example.com/r\u00e9sum\u00e9')
'http://www.example.com/r%C3%A9sum%C3%A9'
>>>
For more examples, see the tests in `tests/test_url.py`.
"""
# If supplied `encoding` is not compatible with all characters in `url`,
# fallback to UTF-8 as safety net.
# UTF-8 can handle all Unicode characters,
# so we should be covered regarding URL normalization,
# if not for proper URL expected by remote website.
try:
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding=encoding)
except UnicodeEncodeError as e:
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding='utf8')
# 1. decode query-string as UTF-8 (or keep raw bytes),
# sort values,
# and percent-encode them back
if six.PY2:
keyvals = parse_qsl(query, keep_blank_values)
else:
# Python3's urllib.parse.parse_qsl does not work as wanted
# for percent-encoded characters that do not match passed encoding,
# they get lost.
#
# e.g., 'q=b%a3' becomes [('q', 'b\ufffd')]
# (ie. with 'REPLACEMENT CHARACTER' (U+FFFD),
# instead of \xa3 that you get with Python2's parse_qsl)
#
# what we want here is to keep raw bytes, and percent encode them
# so as to preserve whatever encoding what originally used.
#
# See https://tools.ietf.org/html/rfc3987#section-6.4:
#
# For example, it is possible to have a URI reference of
# "http://www.example.org/r%E9sum%E9.xml#r%C3%A9sum%C3%A9", where the
# document name is encoded in iso-8859-1 based on server settings, but
# where the fragment identifier is encoded in UTF-8 according to
# [XPointer]. The IRI corresponding to the above URI would be (in XML
# notation)
# "http://www.example.org/r%E9sum%E9.xml#résumé".
# Similar considerations apply to query parts. The functionality of
# IRIs (namely, to be able to include non-ASCII characters) can only be
# used if the query part is encoded in UTF-8.
keyvals = parse_qsl_to_bytes(query, keep_blank_values)
keyvals.sort()
query = urlencode(keyvals)
# 2. decode percent-encoded sequences in path as UTF-8 (or keep raw bytes)
# and percent-encode path again (this normalizes to upper-case %XX)
uqp = _unquotepath(path)
path = quote(uqp, _safe_chars) or '/'
fragment = '' if not keep_fragments else fragment
# every part should be safe already
return urlunparse((scheme,
netloc.lower().rstrip(':'),
path,
params,
query,
fragment))
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
if six.PY2:
# in Python 2, '%a3' becomes '\xa3', which is what we want
return unquote(path)
else:
# in Python 3,
# standard lib's unquote() does not work for non-UTF-8
# percent-escaped characters, they get lost.
# e.g., '%a3' becomes 'REPLACEMENT CHARACTER' (U+FFFD)
#
# unquote_to_bytes() returns raw bytes instead
return unquote_to_bytes(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
if isinstance(url, ParseResult):
return url
return urlparse(to_unicode(url, encoding))
if not six.PY2:
from urllib.parse import _coerce_args, unquote_to_bytes
def parse_qsl_to_bytes(qs, keep_blank_values=False):
"""Parse a query given as a string argument.
Data are returned as a list of name, value pairs as bytes.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
"""
# This code is the same as Python3's parse_qsl()
# (at https://hg.python.org/cpython/rev/c38ac7ab8d9a)
# except for the unquote(s, encoding, errors) calls replaced
# with unquote_to_bytes(s)
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote_to_bytes(name)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote_to_bytes(value)
value = _coerce_result(value)
r.append((name, value))
return r
| 36.64959
| 123
| 0.638021
|
351455c685e9df347bc008f90f3de6ffa135a7f2
| 879
|
py
|
Python
|
leetcode/Array/1295. Find Numbers with Even Number of Digits.py
|
yanshengjia/algorithm
|
0608d286be9c93d51768d47f21e569c6b0be9cda
|
[
"MIT"
] | 23
|
2019-08-02T12:02:47.000Z
|
2022-03-09T15:24:16.000Z
|
leetcode/Array/1295. Find Numbers with Even Number of Digits.py
|
yanshengjia/algorithm
|
0608d286be9c93d51768d47f21e569c6b0be9cda
|
[
"MIT"
] | null | null | null |
leetcode/Array/1295. Find Numbers with Even Number of Digits.py
|
yanshengjia/algorithm
|
0608d286be9c93d51768d47f21e569c6b0be9cda
|
[
"MIT"
] | 21
|
2019-12-22T04:47:32.000Z
|
2021-09-12T14:29:35.000Z
|
"""
Given an array nums of integers, return how many of them contain an even number of digits.
Example 1:
Input: nums = [12,345,2,6,7896]
Output: 2
Explanation:
12 contains 2 digits (even number of digits).
345 contains 3 digits (odd number of digits).
2 contains 1 digit (odd number of digits).
6 contains 1 digit (odd number of digits).
7896 contains 4 digits (even number of digits).
Therefore only 12 and 7896 contain an even number of digits.
Example 2:
Input: nums = [555,901,482,1771]
Output: 1
Explanation:
Only 1771 contains an even number of digits.
Solution:
Simulation
"""
# Simple Simulation
# Time: O(N), where N is the length of nums
# Space: O(1)
class Solution:
def findNumbers(self, nums: List[int]) -> int:
res = 0
for num in nums:
if len(str(num)) % 2 == 0:
res += 1
return res
| 22.538462
| 90
| 0.658703
|
0b780f9f152851feae0669ba881ed655a84613cb
| 4,321
|
py
|
Python
|
src/pymortests/lyapunov.py
|
TreeerT/pymor
|
e8b18d2d4c4b5998f0bd84f6728e365e0693b753
|
[
"Unlicense"
] | 1
|
2021-08-17T15:55:12.000Z
|
2021-08-17T15:55:12.000Z
|
src/pymortests/lyapunov.py
|
TreeerT/pymor
|
e8b18d2d4c4b5998f0bd84f6728e365e0693b753
|
[
"Unlicense"
] | null | null | null |
src/pymortests/lyapunov.py
|
TreeerT/pymor
|
e8b18d2d4c4b5998f0bd84f6728e365e0693b753
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import os
import sys
import numpy as np
import scipy.linalg as spla
import scipy.sparse as sps
from pymor.algorithms.lyapunov import solve_lyap_lrcf, solve_lyap_dense
from pymor.core.config import config
from pymor.operators.numpy import NumpyMatrixOperator
import pytest
n_list = [100, 200]
m_list = [1, 2]
lyap_lrcf_solver_list = [
'scipy',
'slycot_bartels-stewart',
'pymess_glyap',
'pymess_lradi',
'lradi',
]
lyap_dense_solver_list = [
'scipy',
'slycot_bartels-stewart',
'pymess_glyap',
]
def fro_norm(A):
if not sps.issparse(A):
return spla.norm(A)
else:
return sps.linalg.norm(A)
def conv_diff_1d_fd(n, a, b):
diagonals = [-a * 2 * (n + 1) ** 2 * np.ones((n,)),
(a * (n + 1) ** 2 + b * (n + 1) / 2) * np.ones((n - 1,)),
(a * (n + 1) ** 2 - b * (n + 1) / 2) * np.ones((n - 1,))]
A = sps.diags(diagonals, [0, -1, 1], format='csc')
return A
def conv_diff_1d_fem(n, a, b):
diagonals = [-a * 2 * (n + 1) ** 2 * np.ones((n,)),
(a * (n + 1) ** 2 + b * (n + 1) / 2) * np.ones((n - 1,)),
(a * (n + 1) ** 2 - b * (n + 1) / 2) * np.ones((n - 1,))]
A = sps.diags(diagonals, [0, -1, 1], format='csc')
diagonals = [2 / 3 * np.ones((n,)),
1 / 6 * np.ones((n - 1,)),
1 / 6 * np.ones((n - 1,))]
E = sps.diags(diagonals, [0, -1, 1], format='csc')
return A, E
def relative_residual(A, E, B, X, trans=False):
if not trans:
if E is None:
AX = A @ X
BBT = B @ B.T
res = fro_norm(AX + AX.T + BBT)
rhs = fro_norm(BBT)
else:
AXET = A @ X @ E.T
BBT = B @ B.T
res = fro_norm(AXET + AXET.T + BBT)
rhs = fro_norm(BBT)
else:
if E is None:
ATX = A.T @ X
CTC = B.T @ B
res = fro_norm(ATX + ATX.T + CTC)
rhs = fro_norm(CTC)
else:
ATXE = A.T @ X @ E
CTC = B.T @ B
res = fro_norm(ATXE + ATXE.T + CTC)
rhs = fro_norm(CTC)
return res / rhs
def _check_availability(lyap_solver):
if (lyap_solver.startswith('slycot')
and not os.environ.get('DOCKER_PYMOR', False)
and not config.HAVE_SLYCOT):
pytest.skip('slycot not available')
if (lyap_solver.startswith('pymess')
and not os.environ.get('DOCKER_PYMOR', False)
and not config.HAVE_PYMESS):
pytest.skip('pymess not available')
@pytest.mark.parametrize('n', n_list)
@pytest.mark.parametrize('m', m_list)
@pytest.mark.parametrize('with_E', [False, True])
@pytest.mark.parametrize('trans', [False, True])
@pytest.mark.parametrize('lyap_solver', lyap_lrcf_solver_list)
def test_lrcf(n, m, with_E, trans, lyap_solver):
_check_availability(lyap_solver)
if not with_E:
A = conv_diff_1d_fd(n, 1, 1)
E = None
else:
A, E = conv_diff_1d_fem(n, 1, 1)
np.random.seed(0)
B = np.random.randn(n, m)
if trans:
B = B.T
Aop = NumpyMatrixOperator(A)
Eop = NumpyMatrixOperator(E) if with_E else None
Bva = Aop.source.from_numpy(B.T if not trans else B)
Zva = solve_lyap_lrcf(Aop, Eop, Bva, trans=trans, options=lyap_solver)
assert len(Zva) <= n
Z = Zva.to_numpy().T
assert relative_residual(A, E, B, Z @ Z.T, trans=trans) < 1e-10
@pytest.mark.parametrize('n', n_list)
@pytest.mark.parametrize('m', m_list)
@pytest.mark.parametrize('with_E', [False, True])
@pytest.mark.parametrize('trans', [False, True])
@pytest.mark.parametrize('lyap_solver', lyap_dense_solver_list)
def test_dense(n, m, with_E, trans, lyap_solver):
_check_availability(lyap_solver)
np.random.seed(0)
A = np.random.randn(n, n)
E = np.eye(n) + np.random.randn(n, n) / n if with_E else None
B = np.random.randn(n, m)
if trans:
B = B.T
X = solve_lyap_dense(A, E, B, trans=trans, options=lyap_solver)
assert type(X) is np.ndarray
assert relative_residual(A, E, B, X, trans=trans) < 1e-10
| 29.394558
| 77
| 0.574173
|
e4b143f42b18f29a351105aa8b03bb297981d263
| 1,713
|
py
|
Python
|
aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/CreateMediaComplexTaskRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/CreateMediaComplexTaskRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/CreateMediaComplexTaskRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateMediaComplexTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'CreateMediaComplexTask','imm')
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project)
def get_NotifyEndpoint(self):
return self.get_query_params().get('NotifyEndpoint')
def set_NotifyEndpoint(self,NotifyEndpoint):
self.add_query_param('NotifyEndpoint',NotifyEndpoint)
def get_NotifyTopicName(self):
return self.get_query_params().get('NotifyTopicName')
def set_NotifyTopicName(self,NotifyTopicName):
self.add_query_param('NotifyTopicName',NotifyTopicName)
def get_Parameters(self):
return self.get_query_params().get('Parameters')
def set_Parameters(self,Parameters):
self.add_query_param('Parameters',Parameters)
| 34.959184
| 81
| 0.770578
|
60ef205f9c63ddc8f0d28f12598018a4ba314e06
| 149
|
py
|
Python
|
1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 8/E2.py
|
codeclubbentleigh/Python
|
94d6a937aa3520b201ee1641c2009bd90566d52a
|
[
"MIT"
] | 12
|
2018-11-14T03:55:58.000Z
|
2021-12-12T01:13:05.000Z
|
1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 8/E2.py
|
codeclubbentleigh/Python
|
94d6a937aa3520b201ee1641c2009bd90566d52a
|
[
"MIT"
] | null | null | null |
1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 8/E2.py
|
codeclubbentleigh/Python
|
94d6a937aa3520b201ee1641c2009bd90566d52a
|
[
"MIT"
] | 7
|
2019-10-10T06:28:58.000Z
|
2022-02-15T07:18:12.000Z
|
prefixes = "JKLMNOPQ"
suffix = "ack"
for letter in prefixes:
if letter == "O" or letter == "Q":
letter += "u"
print(letter + suffix)
| 21.285714
| 38
| 0.577181
|
c51d8f7220f815cc72f11a46c3434d010a43bda2
| 18,323
|
py
|
Python
|
corehq/messaging/scheduling/models/content.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/messaging/scheduling/models/content.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/messaging/scheduling/models/content.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import jsonfield as old_jsonfield
from contextlib import contextmanager
from copy import deepcopy
from corehq.apps.accounting.utils import domain_is_on_trial
from corehq.apps.app_manager.exceptions import XFormIdNotUnique
from corehq.apps.app_manager.models import Form
from corehq.apps.hqwebapp.tasks import send_mail_async
from corehq.apps.smsforms.app import start_session
from corehq.apps.smsforms.util import form_requires_input, critical_section_for_smsforms_sessions
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.scheduling.models.abstract import Content
from corehq.apps.reminders.models import EmailUsage
from corehq.apps.sms.api import (
MessageMetadata,
send_sms,
send_sms_to_verified_number,
)
from corehq.apps.sms.models import MessagingEvent, PhoneNumber, PhoneBlacklist
from corehq.apps.sms.util import format_message_list, touchforms_error_is_config_error, get_formplayer_exception
from corehq.apps.smsforms.models import SQLXFormsSession
from couchdbkit import ResourceNotFound
from memoized import memoized
from dimagi.utils.logging import notify_exception
from dimagi.utils.modules import to_function
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.db import models
from corehq.apps.formplayer_api.smsforms.api import TouchformsError
@contextmanager
def no_op_context_manager():
yield
class SMSContent(Content):
message = old_jsonfield.JSONField(default=dict)
def create_copy(self):
"""
See Content.create_copy() for docstring
"""
return SMSContent(
message=deepcopy(self.message),
)
def render_message(self, message, recipient, logged_subevent):
if not message:
logged_subevent.error(MessagingEvent.ERROR_NO_MESSAGE)
return None
renderer = self.get_template_renderer(recipient)
try:
return renderer.render(message)
except:
logged_subevent.error(MessagingEvent.ERROR_CANNOT_RENDER_MESSAGE)
return None
def send(self, recipient, logged_event, phone_entry=None):
logged_subevent = logged_event.create_subevent_from_contact_and_content(
recipient,
self,
case_id=self.case.case_id if self.case else None,
)
phone_entry_or_number = phone_entry or self.get_two_way_entry_or_phone_number(recipient)
if not phone_entry_or_number:
logged_subevent.error(MessagingEvent.ERROR_NO_PHONE_NUMBER)
return
message = self.get_translation_from_message_dict(
logged_event.domain,
self.message,
recipient.get_language_code()
)
message = self.render_message(message, recipient, logged_subevent)
self.send_sms_message(logged_event.domain, recipient, phone_entry_or_number, message, logged_subevent)
logged_subevent.completed()
class EmailContent(Content):
subject = old_jsonfield.JSONField(default=dict)
message = old_jsonfield.JSONField(default=dict)
TRIAL_MAX_EMAILS = 50
def create_copy(self):
"""
See Content.create_copy() for docstring
"""
return EmailContent(
subject=deepcopy(self.subject),
message=deepcopy(self.message),
)
def render_subject_and_message(self, subject, message, recipient):
renderer = self.get_template_renderer(recipient)
return renderer.render(subject), renderer.render(message)
def send(self, recipient, logged_event, phone_entry=None):
email_usage = EmailUsage.get_or_create_usage_record(logged_event.domain)
is_trial = domain_is_on_trial(logged_event.domain)
logged_subevent = logged_event.create_subevent_from_contact_and_content(
recipient,
self,
case_id=self.case.case_id if self.case else None,
)
subject = self.get_translation_from_message_dict(
logged_event.domain,
self.subject,
recipient.get_language_code()
)
message = self.get_translation_from_message_dict(
logged_event.domain,
self.message,
recipient.get_language_code()
)
try:
subject, message = self.render_subject_and_message(subject, message, recipient)
except:
logged_subevent.error(MessagingEvent.ERROR_CANNOT_RENDER_MESSAGE)
return
subject = subject or '(No Subject)'
if not message:
logged_subevent.error(MessagingEvent.ERROR_NO_MESSAGE)
return
email_address = recipient.get_email()
if not email_address:
logged_subevent.error(MessagingEvent.ERROR_NO_EMAIL_ADDRESS)
return
if is_trial and EmailUsage.get_total_count(logged_event.domain) >= self.TRIAL_MAX_EMAILS:
logged_subevent.error(MessagingEvent.ERROR_TRIAL_EMAIL_LIMIT_REACHED)
return
send_mail_async.delay(subject, message, settings.DEFAULT_FROM_EMAIL, [email_address])
email_usage.update_count()
logged_subevent.completed()
class SMSSurveyContent(Content):
form_unique_id = models.CharField(max_length=126)
# See corehq.apps.smsforms.models.SQLXFormsSession for an
# explanation of these properties
expire_after = models.IntegerField()
reminder_intervals = JSONField(default=list)
submit_partially_completed_forms = models.BooleanField(default=False)
include_case_updates_in_partial_submissions = models.BooleanField(default=False)
def create_copy(self):
"""
See Content.create_copy() for docstring
"""
return SMSSurveyContent(
form_unique_id=None,
expire_after=self.expire_after,
reminder_intervals=deepcopy(self.reminder_intervals),
submit_partially_completed_forms=self.submit_partially_completed_forms,
include_case_updates_in_partial_submissions=self.include_case_updates_in_partial_submissions,
)
@memoized
def get_memoized_app_module_form(self, domain):
try:
form = Form.get_form(self.form_unique_id)
app = form.get_app()
module = form.get_module()
except (ResourceNotFound, XFormIdNotUnique):
return None, None, None, None
if app.domain != domain:
return None, None, None, None
return app, module, form, form_requires_input(form)
def phone_has_opted_out(self, phone_entry_or_number):
if isinstance(phone_entry_or_number, PhoneNumber):
pb = PhoneBlacklist.get_by_phone_number_or_none(phone_entry_or_number.phone_number)
else:
pb = PhoneBlacklist.get_by_phone_number_or_none(phone_entry_or_number)
return pb is not None and not pb.send_sms
def get_critical_section(self, recipient):
if self.critical_section_already_acquired:
return no_op_context_manager()
return critical_section_for_smsforms_sessions(recipient.get_id)
def send(self, recipient, logged_event, phone_entry=None):
app, module, form, requires_input = self.get_memoized_app_module_form(logged_event.domain)
if any([o is None for o in (app, module, form)]):
logged_event.error(MessagingEvent.ERROR_CANNOT_FIND_FORM)
return
logged_subevent = logged_event.create_subevent_from_contact_and_content(
recipient,
self,
case_id=self.case.case_id if self.case else None,
)
# We don't try to look up the phone number from the user case in this scenario
# because this use case involves starting a survey session, which can be
# very different if the contact is a user or is a case. So here if recipient
# is a user we only allow them to fill out the survey as the user contact, and
# not the user case contact.
phone_entry_or_number = (
phone_entry or
self.get_two_way_entry_or_phone_number(recipient, try_user_case=False)
)
if phone_entry_or_number is None:
logged_subevent.error(MessagingEvent.ERROR_NO_PHONE_NUMBER)
return
if requires_input and not isinstance(phone_entry_or_number, PhoneNumber):
logged_subevent.error(MessagingEvent.ERROR_NO_TWO_WAY_PHONE_NUMBER)
return
# The SMS framework already checks if the number has opted out before sending to
# it. But for this use case we check for it here because we don't want to start
# the survey session if they've opted out.
if self.phone_has_opted_out(phone_entry_or_number):
logged_subevent.error(MessagingEvent.ERROR_PHONE_OPTED_OUT)
return
with self.get_critical_section(recipient):
# Get the case to submit the form against, if any
case_id = None
if is_commcarecase(recipient):
case_id = recipient.case_id
elif self.case:
case_id = self.case.case_id
if form.requires_case() and not case_id:
logged_subevent.error(MessagingEvent.ERROR_NO_CASE_GIVEN)
return
session, responses = self.start_smsforms_session(
logged_event.domain,
recipient,
case_id,
phone_entry_or_number,
logged_subevent,
self.get_workflow(logged_event),
app,
module,
form
)
if session:
logged_subevent.xforms_session = session
logged_subevent.save()
self.send_first_message(
logged_event.domain,
recipient,
phone_entry_or_number,
session,
responses,
logged_subevent,
self.get_workflow(logged_event)
)
logged_subevent.completed()
def start_smsforms_session(self, domain, recipient, case_id, phone_entry_or_number, logged_subevent, workflow,
app, module, form):
# Close all currently open sessions
SQLXFormsSession.close_all_open_sms_sessions(domain, recipient.get_id)
# Start the new session
try:
session, responses = start_session(
SQLXFormsSession.create_session_object(
domain,
recipient,
(phone_entry_or_number.phone_number
if isinstance(phone_entry_or_number, PhoneNumber)
else phone_entry_or_number),
app,
form,
expire_after=self.expire_after,
reminder_intervals=self.reminder_intervals,
submit_partially_completed_forms=self.submit_partially_completed_forms,
include_case_updates_in_partial_submissions=self.include_case_updates_in_partial_submissions
),
domain,
recipient,
app,
module,
form,
case_id,
)
except TouchformsError as e:
logged_subevent.error(
MessagingEvent.ERROR_TOUCHFORMS_ERROR,
additional_error_text=get_formplayer_exception(domain, e)
)
if touchforms_error_is_config_error(domain, e):
# Don't reraise the exception because this means there are configuration
# issues with the form that need to be fixed. The error is logged in the
# above lines.
return None, None
# Reraise the exception so that the framework retries it again later
raise
except:
logged_subevent.error(MessagingEvent.ERROR_TOUCHFORMS_ERROR)
# Reraise the exception so that the framework retries it again later
raise
session.workflow = workflow
session.save()
return session, responses
def send_first_message(self, domain, recipient, phone_entry_or_number, session, responses, logged_subevent,
workflow):
if len(responses) > 0:
message = format_message_list(responses)
metadata = MessageMetadata(
workflow=workflow,
xforms_session_couch_id=session.couch_id,
)
if isinstance(phone_entry_or_number, PhoneNumber):
send_sms_to_verified_number(
phone_entry_or_number,
message,
metadata,
logged_subevent=logged_subevent
)
else:
send_sms(
domain,
recipient,
phone_entry_or_number,
message,
metadata
)
class IVRSurveyContent(Content):
"""
IVR is no longer supported, but in order to display old configurations we
need to keep this model around.
"""
# The unique id of the form that will be used as the IVR Survey
form_unique_id = models.CharField(max_length=126)
# If empty list, this is ignored. Otherwise, this is a list of intervals representing
# minutes to wait.
# After waiting the amount of minutes specified by each interval, the framework will
# check if an outbound IVR call was answered for this event. If not, it will retry
# the outbound call again.
reminder_intervals = JSONField(default=list)
# At the end of the IVR call, if this is True, the form will be submitted in its current
# state regardless if it was completed or not.
submit_partially_completed_forms = models.BooleanField(default=False)
# Only matters when submit_partially_completed_forms is True.
# If True, then case updates will be included in partial form submissions, otherwise
# they will be excluded.
include_case_updates_in_partial_submissions = models.BooleanField(default=False)
# The maximum number of times to attempt asking a question on a phone call
# before giving up and hanging up. This is meant to prevent long running calls
# where the user is giving invalid answers or not answering at all.
max_question_attempts = models.IntegerField(default=5)
def send(self, recipient, logged_event, phone_entry=None):
pass
class SMSCallbackContent(Content):
"""
This use case is no longer supported, but in order to display old configurations we
need to keep this model around.
The way that this use case worked was as follows. When the event fires for the
first time, the SMS message is sent as it is for SMSContent. The recipient is then
expected to perform a "call back" or "flash back" to the system, where they call
a phone number, let it ring, and hang up. CommCareHQ records the inbound call when
this happens.
Then, for every interval specified by reminder_intervals, the system will wait
that number of minutes and then check for the expected inbound call from the
recipient. If the inbound call was received, then no further action is needed.
If not, the SMS message is sent again. On the last interval, the SMS is not
sent again and the expected callback event is just closed out.
The results of the expected call back are stored in an entry in
corehq.apps.sms.models.ExpectedCallback.
"""
message = JSONField(default=dict)
# This is a list of intervals representing minutes to wait. It should never be empty.
# See the explanation above to understand how this is used.
reminder_intervals = JSONField(default=list)
def send(self, recipient, logged_event, phone_entry=None):
pass
class CustomContent(Content):
# Should be a key in settings.AVAILABLE_CUSTOM_SCHEDULING_CONTENT
# which points to a function to call at runtime to get a list of
# messsages to send to the recipient.
custom_content_id = models.CharField(max_length=126)
def create_copy(self):
"""
See Content.create_copy() for docstring
"""
return CustomContent(
custom_content_id=self.custom_content_id,
)
def get_list_of_messages(self, recipient):
if not self.schedule_instance:
raise ValueError(
"Expected CustomContent to be invoked in the context of a "
"ScheduleInstance. Please pass ScheduleInstance to .set_context()"
)
if self.custom_content_id not in settings.AVAILABLE_CUSTOM_SCHEDULING_CONTENT:
raise ValueError("Encountered unexpected custom content id %s" % self.custom_content_id)
custom_function = to_function(
settings.AVAILABLE_CUSTOM_SCHEDULING_CONTENT[self.custom_content_id][0]
)
messages = custom_function(recipient, self.schedule_instance)
if not isinstance(messages, list):
raise TypeError("Expected content to be a list of messages")
return messages
def send(self, recipient, logged_event, phone_entry=None):
logged_subevent = logged_event.create_subevent_from_contact_and_content(
recipient,
self,
case_id=self.case.case_id if self.case else None,
)
phone_entry_or_number = self.get_two_way_entry_or_phone_number(recipient)
if not phone_entry_or_number:
logged_subevent.error(MessagingEvent.ERROR_NO_PHONE_NUMBER)
return
# An empty list of messages returned from a custom content handler means
# we shouldn't send anything, so we don't log an error for that.
for message in self.get_list_of_messages(recipient):
self.send_sms_message(logged_event.domain, recipient, phone_entry_or_number, message, logged_subevent)
logged_subevent.completed()
| 38.902335
| 114
| 0.670523
|
bc5468b8ba363da089641b887d9013deada34cf0
| 11,860
|
py
|
Python
|
dashboard/dashboard/pinpoint/models/tasks/run_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 1
|
2019-11-01T23:31:22.000Z
|
2019-11-01T23:31:22.000Z
|
dashboard/dashboard/pinpoint/models/tasks/run_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/pinpoint/models/tasks/run_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import itertools
import json
import logging
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models import task as task_module
from dashboard.pinpoint.models.quest import run_test as run_test_quest
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.services import swarming
class ScheduleTestAction(
collections.namedtuple('ScheduleTestAction',
('job', 'task', 'properties'))):
__slots__ = ()
@task_module.LogStateTransitionFailures
def __call__(self, _):
logging.debug('Scheduling a Swarming task to run a test.')
self.properties.update(run_test_quest.VPYTHON_PARAMS)
body = {
'name':
'Pinpoint job',
'user':
'Pinpoint',
# TODO(dberris): Make these constants configurable?
'priority':
'100',
'task_slices': [{
'properties': self.properties,
'expiration_secs': '86400', # 1 day.
}],
# Since we're always going to be using the PubSub handling, we add the
# tags unconditionally.
'tags': [
'%s:%s' % (k, v)
for k, v in run_test_quest.SwarmingTagsFromJob(self.job).items()
],
# TODO(dberris): Consolidate constants in environment vars?
'pubsub_topic':
'projects/chromeperf/topics/pinpoint-swarming-updates',
'pubsub_auth_token':
'UNUSED',
'pubsub_userdata':
json.dumps({
'job_id': self.job.job_id,
'task': {
'type': 'run_test',
'id': self.task.id,
},
}),
}
self.task.payload.update({
'swarming_request_body': body,
})
# At this point we know we were successful in transitioning to 'ongoing'.
# TODO(dberris): Figure out error-handling for Swarming request failures?
response = swarming.Swarming(
self.task.payload.get('swarming_server')).Tasks().New(body)
logging.debug('Swarming response: %s', response)
self.task.payload.update({
'swarming_task_id': response.get('task_id'),
'tries': self.task.payload.get('tries', 0) + 1
})
# Update the payload with the task id from the Swarming request.
task_module.UpdateTask(
self.job, self.task.id, new_state='ongoing', payload=self.task.payload)
class PollSwarmingTaskAction(
collections.namedtuple('PollSwarmingTaskAction', ('job', 'task'))):
__slots__ = ()
@task_module.LogStateTransitionFailures
def __call__(self, _):
logging.debug('Polling a swarming task; task = %s', self.task)
swarming_server = self.task.payload.get('swarming_server')
task_id = self.task.payload.get('swarming_task_id')
swarming_task = swarming.Swarming(swarming_server).Task(task_id)
result = swarming_task.Result()
self.task.payload.update({
'swarming_task_result': {
k: v
for k, v in result.items()
if k in {'bot_id', 'state', 'failure'}
}
})
task_state = result.get('state')
if task_state in {'PENDING', 'RUNNING'}:
return
if task_state == 'EXPIRED':
# TODO(dberris): Do a retry, reset the payload and run an "initiate"?
self.task.payload.update({
'errors': [{
'reason': 'SwarmingExpired',
'message': 'Request to the Swarming service expired.',
}]
})
task_module.UpdateTask(
self.job, self.task.id, new_state='failed', payload=self.task.payload)
return
if task_state != 'COMPLETED':
task_module.UpdateTask(
self.job, self.task.id, new_state='failed', payload=self.task.payload)
return
self.task.payload.update({
'isolate_server': result.get('outputs_ref', {}).get('isolatedserver'),
'isolate_hash': result.get('outputs_ref', {}).get('isolated'),
})
new_state = 'completed'
if result.get('failure', False):
new_state = 'failed'
exception_string = run_test_quest.ParseException(
swarming_task.Stdout()['output'])
if not exception_string:
exception_string = 'No exception found in Swarming task output.'
self.task.payload.update({
'errors': [{
'reason': 'RunTestFailed',
'message': 'Running the test failed: %s' % (exception_string,)
}]
})
task_module.UpdateTask(
self.job, self.task.id, new_state=new_state, payload=self.task.payload)
# Everything after this point aims to define an evaluator for the 'run_test'
# tasks.
class InitiateEvaluator(object):
def __init__(self, job):
self.job = job
def __call__(self, task, event, accumulator):
# Outline:
# - Check dependencies to see if they're 'completed', looking for:
# - Isolate server
# - Isolate hash
dep_map = {
dep: {
'isolate_server': accumulator.get(dep, {}).get('isolate_server'),
'isolate_hash': accumulator.get(dep, {}).get('isolate_hash'),
'status': accumulator.get(dep, {}).get('status'),
} for dep in task.dependencies
}
if not dep_map:
logging.error(
'No dependencies for "run_test" task, unlikely to proceed; task = %s',
task)
return None
dep_value = {}
if len(dep_map) > 1:
# TODO(dberris): Figure out whether it's a valid use-case to have multiple
# isolate inputs to Swarming.
logging.error(('Found multiple dependencies for run_test; '
'picking a random input; task = %s'), task)
dep_value.update(dep_map.values()[0])
if dep_value.get('status') == 'failed':
task.payload.update({
'errors': [{
'reason':
'BuildIsolateNotFound',
'message': ('The build task this depends on failed, '
'so we cannot proceed to running the tests.')
}]
})
return [
lambda _: task_module.UpdateTask(
self.job, task.id, new_state='failed', payload=task.payload)
]
if dep_value.get('status') == 'completed':
properties = {
'input_ref': {
'isolatedserver': dep_value.get('isolate_server'),
'isolated': dep_value.get('isolate_hash'),
},
'extra_args': task.payload.get('extra_args'),
'dimensions': task.payload.get('dimensions'),
# TODO(dberris): Make these hard-coded-values configurable?
'execution_timeout_secs': '21600', # 6 hours, for rendering.mobile.
'io_timeout_secs': '14400', # 4 hours, to match the perf bots.
}
return [
ScheduleTestAction(job=self.job, task=task, properties=properties)
]
class UpdateEvaluator(object):
def __init__(self, job):
self.job = job
def __call__(self, task, event, accumulator):
# Check that the task has the required information to poll Swarming. In this
# handler we're going to look for the 'swarming_task_id' key in the payload.
# TODO(dberris): Move this out, when we incorporate validation properly.
required_payload_keys = {'swarming_task_id', 'swarming_server'}
missing_keys = required_payload_keys - set(task.payload)
if missing_keys:
logging.error('Failed to find required keys from payload: %s; task = %s',
missing_keys, task.payload)
return [PollSwarmingTaskAction(job=self.job, task=task)]
class Evaluator(evaluators.SequenceEvaluator):
def __init__(self, job):
super(Evaluator, self).__init__(
evaluators=(
evaluators.TaskPayloadLiftingEvaluator(),
evaluators.FilteringEvaluator(
predicate=evaluators.All(
evaluators.TaskTypeEq('run_test'),
evaluators.TaskIsEventTarget(),
),
delegate=evaluators.DispatchByEventTypeEvaluator({
'initiate':
evaluators.FilteringEvaluator(
predicate=evaluators.Not(
evaluators.TaskStatusIn(
{'ongoing', 'failed', 'completed'})),
delegate=InitiateEvaluator(job)),
'update':
evaluators.FilteringEvaluator(
predicate=evaluators.TaskStatusIn({'ongoing'}),
delegate=UpdateEvaluator(job)),
})),
))
def ReportError(task, _, accumulator):
# TODO(dberris): Factor this out into smaller pieces?
task_errors = []
logging.debug('Validating task: %s', task)
if len(task.dependencies) != 1:
task_errors.append({
'cause':
'DependencyError',
'message':
'Task must have exactly 1 dependency; has %s' %
(len(task.dependencies),)
})
if task.status == 'ongoing':
required_payload_keys = {'swarming_task_id', 'swarming_server'}
missing_keys = required_payload_keys - (
set(task.payload) & required_payload_keys)
if missing_keys:
task_errors.append({
'cause': 'MissingRequirements',
'message': 'Missing required keys %s in task payload.' % missing_keys
})
elif task.status == 'pending' and task.dependencies and all(
accumulator.get(dep, {}).get('status') == 'completed'
for dep in task.dependencies):
required_dependency_keys = {'isolate_server', 'isolate_hash'}
dependency_keys = set(
itertools.chain(
*[accumulator.get(dep, []) for dep in task.dependencies]))
missing_keys = required_dependency_keys - (
dependency_keys & required_dependency_keys)
if missing_keys:
task_errors.append({
'cause':
'MissingDependencyInputs',
'message':
'Missing keys from dependency payload: %s' % (missing_keys,)
})
if task_errors:
accumulator.update({task.id: {'errors': task_errors}})
class Validator(evaluators.FilteringEvaluator):
def __init__(self):
super(Validator, self).__init__(
predicate=evaluators.TaskTypeEq('run_test'), delegate=ReportError)
def TaskId(change, attempt):
return 'run_test_%s_%s' % (change, attempt)
TaskOptions = collections.namedtuple('TaskOptions',
('build_options', 'swarming_server',
'dimensions', 'extra_args', 'attempts'))
def CreateGraph(options):
if not isinstance(options, TaskOptions):
raise ValueError('options is not an instance of run_test.TaskOptions')
subgraph = find_isolate.CreateGraph(options.build_options)
find_isolate_tasks = [
task for task in subgraph.vertices if task.vertex_type == 'find_isolate'
]
assert len(find_isolate_tasks) == 1
find_isolate_task = find_isolate_tasks[0]
subgraph.vertices.extend([
task_module.TaskVertex(
id=TaskId(
find_isolate.ChangeId(options.build_options.change), attempt),
vertex_type='run_test',
payload={
'swarming_server': options.swarming_server,
'dimensions': options.dimensions,
'extra_args': options.extra_args,
}) for attempt in range(options.attempts)
])
subgraph.edges.extend([
task_module.Dependency(from_=task.id, to=find_isolate_task.id)
for task in subgraph.vertices
if task.vertex_type == 'run_test'
])
return subgraph
| 35.088757
| 80
| 0.613322
|
2ac0749ea25711444e959559fd4f00153e34a6dc
| 558
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwShmtranslationsCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwShmtranslationsCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwShmtranslationsCom.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractWwwShmtranslationsCom(item):
'''
Parser for 'www.shmtranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 25.363636
| 104
| 0.637993
|
02292634c3a5ac03df7b60f354e83c283f10049b
| 4,782
|
py
|
Python
|
tests/chia_log/handlers/test_harvester_activity_handler.py
|
kilbot/chiadog
|
1a49192b8b2e9c50ebcca4d43d1a6427c3b00f62
|
[
"MIT"
] | 1
|
2022-03-10T08:25:29.000Z
|
2022-03-10T08:25:29.000Z
|
tests/chia_log/handlers/test_harvester_activity_handler.py
|
kilbot/chiadog
|
1a49192b8b2e9c50ebcca4d43d1a6427c3b00f62
|
[
"MIT"
] | null | null | null |
tests/chia_log/handlers/test_harvester_activity_handler.py
|
kilbot/chiadog
|
1a49192b8b2e9c50ebcca4d43d1a6427c3b00f62
|
[
"MIT"
] | null | null | null |
# std
import unittest
from pathlib import Path
# project
from src.chia_log.handlers import harvester_activity_handler
from src.notifier import EventType, EventService, EventPriority
class TestHarvesterActivityHandler(unittest.TestCase):
def setUp(self) -> None:
self.handler = harvester_activity_handler.HarvesterActivityHandler()
self.example_logs_path = Path(__file__).resolve().parents[1] / "logs/harvester_activity"
def testNominal(self):
with open(self.example_logs_path / "nominal.txt") as f:
logs = f.readlines()
# Third log should trigger an event for a found proof
expected_number_events = [1, 1, 2, 1, 1]
for log, number_events in zip(logs, expected_number_events):
events = self.handler.handle(log)
self.assertEqual(len(events), number_events, "Only expecting 1 event for keep-alive")
self.assertEqual(events[0].type, EventType.KEEPALIVE, "Unexpected event type")
self.assertEqual(events[0].priority, EventPriority.NORMAL, "Unexpected priority")
self.assertEqual(events[0].service, EventService.HARVESTER, "Unexpected service")
if number_events == 2:
self.assertEqual(events[1].type, EventType.USER, "Unexpected event type")
self.assertEqual(events[1].priority, EventPriority.LOW, "Unexpected priority")
self.assertEqual(events[1].service, EventService.HARVESTER, "Unexpected service")
self.assertEqual(events[1].message, "Found 1 proof(s)!")
def testDecreasedPlots(self):
with open(self.example_logs_path / "plots_decreased.txt") as f:
logs = f.readlines()
# Fourth log should trigger an event for a decreased plot count
expected_number_events = [1, 1, 1, 2, 1]
for log, number_events in zip(logs, expected_number_events):
events = self.handler.handle(log)
self.assertEqual(len(events), number_events, "Un-expected number of events")
self.assertEqual(events[0].type, EventType.KEEPALIVE, "Unexpected event type")
self.assertEqual(events[0].priority, EventPriority.NORMAL, "Unexpected priority")
self.assertEqual(events[0].service, EventService.HARVESTER, "Unexpected service")
if number_events == 2:
self.assertEqual(events[1].type, EventType.USER, "Unexpected event type")
self.assertEqual(events[1].priority, EventPriority.HIGH, "Unexpected priority")
self.assertEqual(events[1].service, EventService.HARVESTER, "Unexpected service")
self.assertEqual(events[1].message, "Disconnected HDD? The total plot count decreased from 43 to 30.")
def testLostSyncTemporarily(self):
with open(self.example_logs_path / "lost_sync_temporary.txt") as f:
logs = f.readlines()
# Fourth log should trigger an event for harvester outage
expected_number_events = [1, 1, 1, 2, 1]
for log, number_events in zip(logs, expected_number_events):
events = self.handler.handle(log)
self.assertEqual(len(events), number_events, "Un-expected number of events")
self.assertEqual(events[0].type, EventType.KEEPALIVE, "Unexpected event type")
self.assertEqual(events[0].priority, EventPriority.NORMAL, "Unexpected priority")
self.assertEqual(events[0].service, EventService.HARVESTER, "Unexpected service")
if number_events == 2:
self.assertEqual(events[1].type, EventType.USER, "Unexpected event type")
self.assertEqual(events[1].priority, EventPriority.NORMAL, "Unexpected priority")
self.assertEqual(events[1].service, EventService.HARVESTER, "Unexpected service")
self.assertEqual(
events[1].message,
"Experiencing networking issues? Harvester did not participate in any "
"challenge for 608 seconds. It's now working again.",
)
def testSlowSeekTime(self):
with open(self.example_logs_path / "slow_seek_time.txt") as f:
logs = f.readlines()
for log in logs:
events = self.handler.handle(log)
self.assertEqual(len(events), 2, "Un-expected number of events")
self.assertEqual(events[1].type, EventType.USER, "Unexpected event type")
self.assertEqual(events[1].priority, EventPriority.NORMAL, "Unexpected priority")
self.assertEqual(events[1].service, EventService.HARVESTER, "Unexpected service")
self.assertEqual(events[1].message, "Seeking plots took too long: 28.12348 seconds!")
if __name__ == "__main__":
unittest.main()
| 52.549451
| 118
| 0.663112
|
80dd5d3e771c08a60c5a6b24893aee8f0f2451da
| 776
|
py
|
Python
|
sdk/confidentialledger/azure-confidentialledger/tests/test_confidential_ledger_client_certificate_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/confidentialledger/azure-confidentialledger/tests/test_confidential_ledger_client_certificate_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/confidentialledger/azure-confidentialledger/tests/test_confidential_ledger_client_certificate_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
from azure.confidentialledger import (
ConfidentialLedgerCertificateCredential,
)
from azure.confidentialledger.aio import ConfidentialLedgerClient
from _shared.client_test_common_async import AsyncConfidentialLedgerClientTestMixin
class AsyncCertificateCredentialClientTest(
AsyncConfidentialLedgerClientTestMixin.AsyncBaseTest
):
def setUp(self):
super(AsyncCertificateCredentialClientTest, self).setUp()
self.client = self.create_client_from_credential(
ConfidentialLedgerClient,
credential=ConfidentialLedgerCertificateCredential(
self.user_certificate_path
),
ledger_certificate_path=self.network_certificate_path,
endpoint=self.confidential_ledger_url,
)
| 35.272727
| 83
| 0.762887
|
80d04b9c406680653cb8dc2bf70658a08daecf89
| 750
|
py
|
Python
|
Engineering/Software Engineering/Code/Raspberry Pi/model-airport/test/python/ModelAirportDevicesTest.py
|
DarshanShet777/Model-Airport
|
873411d8c5017d996622f932c3a39c1b4d7ee340
|
[
"BSD-3-Clause"
] | 1
|
2019-04-25T21:24:39.000Z
|
2019-04-25T21:24:39.000Z
|
Engineering/Software Engineering/Code/Raspberry Pi/model-airport/test/python/ModelAirportDevicesTest.py
|
DarshanShet777/Model-Airport
|
873411d8c5017d996622f932c3a39c1b4d7ee340
|
[
"BSD-3-Clause"
] | 49
|
2019-04-25T21:03:59.000Z
|
2021-04-19T03:21:00.000Z
|
Engineering/Software Engineering/Code/Raspberry Pi/model-airport/test/python/ModelAirportDevicesTest.py
|
DarshanShet777/Model-Airport
|
873411d8c5017d996622f932c3a39c1b4d7ee340
|
[
"BSD-3-Clause"
] | null | null | null |
from time import sleep
from ModelAirportGPIO import ModelAirportGPIO
print("[ModelAirportDevicesTest] Setting up GPIO...")
gpio = ModelAirportGPIO()
print("[ModelAirportDevicesTest] Testing all devices...")
gpio.setDeviceState("Pavement-Lighting", True)
gpio.setDeviceState("Backstage-Entrance", True)
gpio.setDeviceState("Runway-Threshold", True)
print(gpio.getDeviceState("Backstage-Entrance"))
print(gpio.getDeviceState("Runway-Threshold"))
sleep(5)
print("[ModelAirportDevicesTest] Ending test...")
gpio.setDeviceState("Pavement-Lighting", False)
gpio.setDeviceState("Backstage-Entrance", False)
gpio.setDeviceState("Runway-Threshold", False)
print(gpio.getDeviceState("Backstage-Entrance"))
print(gpio.getDeviceState("Runway-Threshold"))
| 30
| 57
| 0.797333
|
6f31712a2faaabb4fc6e75b081a9e4d131ea61de
| 400
|
py
|
Python
|
flask_admin_markdown/ckeditor/fields.py
|
suckmybigdick/flask-admin-markdown
|
2711a95118efb5fcb4c82edda3565fb4b9561adb
|
[
"MIT"
] | null | null | null |
flask_admin_markdown/ckeditor/fields.py
|
suckmybigdick/flask-admin-markdown
|
2711a95118efb5fcb4c82edda3565fb4b9561adb
|
[
"MIT"
] | null | null | null |
flask_admin_markdown/ckeditor/fields.py
|
suckmybigdick/flask-admin-markdown
|
2711a95118efb5fcb4c82edda3565fb4b9561adb
|
[
"MIT"
] | null | null | null |
#coding:utf8
from wtforms import TextAreaField
from wtforms.widgets import TextArea
class CKTextAreaWidget(TextArea):
def __call__(self, field, **kwargs):
if kwargs.get('class'):
kwargs['class'] += ' ckeditor'
else:
kwargs.setdefault('class', 'ckeditor')
return super(CKTextAreaWidget, self).__call__(field, **kwargs)
class CKTextAreaField(TextAreaField):
widget = CKTextAreaWidget()
| 26.666667
| 64
| 0.75
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.