hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
919cc09b862c3d10e4bf58ead560f4ea51420c8b | 14,250 | py | Python | python/src/iceberg/expressions/base.py | felixYyu/iceberg | 120cbe9a5db4cedec76d2a7f097ec67de9c25c96 | [
"Apache-2.0"
] | 1 | 2021-11-18T02:27:29.000Z | 2021-11-18T02:27:29.000Z | python/src/iceberg/expressions/base.py | felixYyu/iceberg | 120cbe9a5db4cedec76d2a7f097ec67de9c25c96 | [
"Apache-2.0"
] | null | null | null | python/src/iceberg/expressions/base.py | felixYyu/iceberg | 120cbe9a5db4cedec76d2a7f097ec67de9c25c96 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABC, abstractmethod
from enum import Enum, auto
from functools import reduce, singledispatch
from typing import Any, Generic, TypeVar
from iceberg.files import StructProtocol
from iceberg.schema import Accessor, Schema
from iceberg.types import NestedField
from iceberg.utils.singleton import Singleton
T = TypeVar("T")
class Operation(Enum):
"""Operations to be used as components in expressions
Operations can be negated by calling the negate method.
>>> Operation.TRUE.negate()
<Operation.FALSE: 2>
>>> Operation.IS_NULL.negate()
<Operation.NOT_NULL: 4>
The above example uses the OPERATION_NEGATIONS map which maps each enum
to it's opposite enum.
Raises:
ValueError: This is raised when attempting to negate an operation
that cannot be negated.
"""
TRUE = auto()
FALSE = auto()
IS_NULL = auto()
NOT_NULL = auto()
IS_NAN = auto()
NOT_NAN = auto()
LT = auto()
LT_EQ = auto()
GT = auto()
GT_EQ = auto()
EQ = auto()
NOT_EQ = auto()
IN = auto()
NOT_IN = auto()
NOT = auto()
AND = auto()
OR = auto()
def negate(self) -> "Operation":
"""Returns the operation used when this is negated."""
try:
return OPERATION_NEGATIONS[self]
except KeyError as e:
raise ValueError(f"No negation defined for operation {self}") from e
OPERATION_NEGATIONS = {
Operation.TRUE: Operation.FALSE,
Operation.FALSE: Operation.TRUE,
Operation.IS_NULL: Operation.NOT_NULL,
Operation.NOT_NULL: Operation.IS_NULL,
Operation.IS_NAN: Operation.NOT_NAN,
Operation.NOT_NAN: Operation.IS_NAN,
Operation.LT: Operation.GT_EQ,
Operation.LT_EQ: Operation.GT,
Operation.GT: Operation.LT_EQ,
Operation.GT_EQ: Operation.LT,
Operation.EQ: Operation.NOT_EQ,
Operation.NOT_EQ: Operation.EQ,
Operation.IN: Operation.NOT_IN,
Operation.NOT_IN: Operation.IN,
}
class Literal(Generic[T], ABC):
"""Literal which has a value and can be converted between types"""
@property
@abstractmethod
class BooleanExpression(ABC):
"""base class for all boolean expressions"""
@abstractmethod
class And(BooleanExpression):
"""AND operation expression - logical conjunction"""
@property
@property
class Or(BooleanExpression):
"""OR operation expression - logical disjunction"""
@property
@property
class Not(BooleanExpression):
"""NOT operation expression - logical negation"""
class AlwaysTrue(BooleanExpression, ABC, Singleton):
"""TRUE expression"""
class AlwaysFalse(BooleanExpression, ABC, Singleton):
"""FALSE expression"""
class BoundReference:
"""A reference bound to a field in a schema
Args:
field (NestedField): A referenced field in an Iceberg schema
accessor (Accessor): An Accessor object to access the value at the field's position
"""
@property
def field(self) -> NestedField:
"""The referenced field"""
return self._field
def eval(self, struct: StructProtocol) -> Any:
"""Returns the value at the referenced field's position in an object that abides by the StructProtocol
Args:
struct (StructProtocol): A row object that abides by the StructProtocol and returns values given a position
Returns:
Any: The value at the referenced field's position in `struct`
"""
return self._accessor.get(struct)
class UnboundReference:
"""A reference not yet bound to a field in a schema
Args:
name (str): The name of the field
Note:
An unbound reference is sometimes referred to as a "named" reference
"""
@property
def bind(self, schema: Schema, case_sensitive: bool) -> BoundReference:
"""Bind the reference to an Iceberg schema
Args:
schema (Schema): An Iceberg schema
case_sensitive (bool): Whether to consider case when binding the reference to the field
Raises:
ValueError: If an empty name is provided
Returns:
BoundReference: A reference bound to the specific field in the Iceberg schema
"""
field = schema.find_field(name_or_id=self.name, case_sensitive=case_sensitive)
if not field:
raise ValueError(f"Cannot find field '{self.name}' in schema: {schema}")
return BoundReference(field=field, accessor=schema.accessor_for_field(field.field_id))
@singledispatch
def visit(obj, visitor: BooleanExpressionVisitor[T]) -> T:
"""A generic function for applying a boolean expression visitor to any point within an expression
The function traverses the expression in post-order fashion
Args:
obj(BooleanExpression): An instance of a BooleanExpression
visitor(BooleanExpressionVisitor[T]): An instance of an implementation of the generic BooleanExpressionVisitor base class
Raises:
NotImplementedError: If attempting to visit an unsupported expression
"""
raise NotImplementedError(f"Cannot visit unsupported expression: {obj}")
@visit.register(AlwaysTrue)
def _(obj: AlwaysTrue, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an AlwaysTrue boolean expression with a concrete BooleanExpressionVisitor"""
return visitor.visit_true()
@visit.register(AlwaysFalse)
def _(obj: AlwaysFalse, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an AlwaysFalse boolean expression with a concrete BooleanExpressionVisitor"""
return visitor.visit_false()
@visit.register(Not)
def _(obj: Not, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit a Not boolean expression with a concrete BooleanExpressionVisitor"""
child_result: T = visit(obj.child, visitor=visitor)
return visitor.visit_not(child_result=child_result)
@visit.register(And)
def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an And boolean expression with a concrete BooleanExpressionVisitor"""
left_result: T = visit(obj.left, visitor=visitor)
right_result: T = visit(obj.right, visitor=visitor)
return visitor.visit_and(left_result=left_result, right_result=right_result)
@visit.register(Or)
def _(obj: Or, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an Or boolean expression with a concrete BooleanExpressionVisitor"""
left_result: T = visit(obj.left, visitor=visitor)
right_result: T = visit(obj.right, visitor=visitor)
return visitor.visit_or(left_result=left_result, right_result=right_result)
| 31.045752 | 129 | 0.658737 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABC, abstractmethod
from enum import Enum, auto
from functools import reduce, singledispatch
from typing import Any, Generic, TypeVar
from iceberg.files import StructProtocol
from iceberg.schema import Accessor, Schema
from iceberg.types import NestedField
from iceberg.utils.singleton import Singleton
T = TypeVar("T")
class Operation(Enum):
"""Operations to be used as components in expressions
Operations can be negated by calling the negate method.
>>> Operation.TRUE.negate()
<Operation.FALSE: 2>
>>> Operation.IS_NULL.negate()
<Operation.NOT_NULL: 4>
The above example uses the OPERATION_NEGATIONS map which maps each enum
to it's opposite enum.
Raises:
ValueError: This is raised when attempting to negate an operation
that cannot be negated.
"""
TRUE = auto()
FALSE = auto()
IS_NULL = auto()
NOT_NULL = auto()
IS_NAN = auto()
NOT_NAN = auto()
LT = auto()
LT_EQ = auto()
GT = auto()
GT_EQ = auto()
EQ = auto()
NOT_EQ = auto()
IN = auto()
NOT_IN = auto()
NOT = auto()
AND = auto()
OR = auto()
def negate(self) -> "Operation":
"""Returns the operation used when this is negated."""
try:
return OPERATION_NEGATIONS[self]
except KeyError as e:
raise ValueError(f"No negation defined for operation {self}") from e
OPERATION_NEGATIONS = {
Operation.TRUE: Operation.FALSE,
Operation.FALSE: Operation.TRUE,
Operation.IS_NULL: Operation.NOT_NULL,
Operation.NOT_NULL: Operation.IS_NULL,
Operation.IS_NAN: Operation.NOT_NAN,
Operation.NOT_NAN: Operation.IS_NAN,
Operation.LT: Operation.GT_EQ,
Operation.LT_EQ: Operation.GT,
Operation.GT: Operation.LT_EQ,
Operation.GT_EQ: Operation.LT,
Operation.EQ: Operation.NOT_EQ,
Operation.NOT_EQ: Operation.EQ,
Operation.IN: Operation.NOT_IN,
Operation.NOT_IN: Operation.IN,
}
class Literal(Generic[T], ABC):
"""Literal which has a value and can be converted between types"""
def __init__(self, value: T, value_type: type):
if value is None or not isinstance(value, value_type):
raise TypeError(f"Invalid literal value: {value} (not a {value_type})")
self._value = value
@property
def value(self) -> T:
return self._value # type: ignore
@abstractmethod
def to(self, type_var):
... # pragma: no cover
def __repr__(self):
return f"{type(self).__name__}({self.value})"
def __str__(self):
return str(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
def __le__(self, other):
return self.value <= other.value
def __ge__(self, other):
return self.value >= other.value
class BooleanExpression(ABC):
"""base class for all boolean expressions"""
@abstractmethod
def __invert__(self) -> "BooleanExpression":
...
class And(BooleanExpression):
"""AND operation expression - logical conjunction"""
def __new__(cls, left: BooleanExpression, right: BooleanExpression, *rest: BooleanExpression):
if rest:
return reduce(And, (left, right, *rest))
if left is AlwaysFalse() or right is AlwaysFalse():
return AlwaysFalse()
elif left is AlwaysTrue():
return right
elif right is AlwaysTrue():
return left
self = super().__new__(cls)
self._left = left # type: ignore
self._right = right # type: ignore
return self
@property
def left(self) -> BooleanExpression:
return self._left # type: ignore
@property
def right(self) -> BooleanExpression:
return self._right # type: ignore
def __eq__(self, other) -> bool:
return id(self) == id(other) or (isinstance(other, And) and self.left == other.left and self.right == other.right)
def __invert__(self) -> "Or":
return Or(~self.left, ~self.right)
def __repr__(self) -> str:
return f"And({repr(self.left)}, {repr(self.right)})"
def __str__(self) -> str:
return f"({self.left} and {self.right})"
class Or(BooleanExpression):
"""OR operation expression - logical disjunction"""
def __new__(cls, left: BooleanExpression, right: BooleanExpression, *rest: BooleanExpression):
if rest:
return reduce(Or, (left, right, *rest))
if left is AlwaysTrue() or right is AlwaysTrue():
return AlwaysTrue()
elif left is AlwaysFalse():
return right
elif right is AlwaysFalse():
return left
self = super().__new__(cls)
self._left = left # type: ignore
self._right = right # type: ignore
return self
@property
def left(self) -> BooleanExpression:
return self._left # type: ignore
@property
def right(self) -> BooleanExpression:
return self._right # type: ignore
def __eq__(self, other) -> bool:
return id(self) == id(other) or (isinstance(other, Or) and self.left == other.left and self.right == other.right)
def __invert__(self) -> "And":
return And(~self.left, ~self.right)
def __repr__(self) -> str:
return f"Or({repr(self.left)}, {repr(self.right)})"
def __str__(self) -> str:
return f"({self.left} or {self.right})"
class Not(BooleanExpression):
"""NOT operation expression - logical negation"""
def __new__(cls, child: BooleanExpression):
if child is AlwaysTrue():
return AlwaysFalse()
elif child is AlwaysFalse():
return AlwaysTrue()
elif isinstance(child, Not):
return child.child
return super().__new__(cls)
def __init__(self, child):
self.child = child
def __eq__(self, other) -> bool:
return id(self) == id(other) or (isinstance(other, Not) and self.child == other.child)
def __invert__(self) -> BooleanExpression:
return self.child
def __repr__(self) -> str:
return f"Not({repr(self.child)})"
def __str__(self) -> str:
return f"(not {self.child})"
class AlwaysTrue(BooleanExpression, ABC, Singleton):
"""TRUE expression"""
def __invert__(self) -> "AlwaysFalse":
return AlwaysFalse()
def __repr__(self) -> str:
return "AlwaysTrue()"
def __str__(self) -> str:
return "true"
class AlwaysFalse(BooleanExpression, ABC, Singleton):
"""FALSE expression"""
def __invert__(self) -> "AlwaysTrue":
return AlwaysTrue()
def __repr__(self) -> str:
return "AlwaysFalse()"
def __str__(self) -> str:
return "false"
class BoundReference:
"""A reference bound to a field in a schema
Args:
field (NestedField): A referenced field in an Iceberg schema
accessor (Accessor): An Accessor object to access the value at the field's position
"""
def __init__(self, field: NestedField, accessor: Accessor):
self._field = field
self._accessor = accessor
def __str__(self):
return f"BoundReference(field={repr(self.field)}, accessor={repr(self._accessor)})"
def __repr__(self):
return f"BoundReference(field={repr(self.field)}, accessor={repr(self._accessor)})"
@property
def field(self) -> NestedField:
"""The referenced field"""
return self._field
def eval(self, struct: StructProtocol) -> Any:
"""Returns the value at the referenced field's position in an object that abides by the StructProtocol
Args:
struct (StructProtocol): A row object that abides by the StructProtocol and returns values given a position
Returns:
Any: The value at the referenced field's position in `struct`
"""
return self._accessor.get(struct)
class UnboundReference:
"""A reference not yet bound to a field in a schema
Args:
name (str): The name of the field
Note:
An unbound reference is sometimes referred to as a "named" reference
"""
def __init__(self, name: str):
if not name:
raise ValueError(f"Name cannot be null: {name}")
self._name = name
def __str__(self) -> str:
return f"UnboundReference(name={repr(self.name)})"
def __repr__(self) -> str:
return f"UnboundReference(name={repr(self.name)})"
@property
def name(self) -> str:
return self._name
def bind(self, schema: Schema, case_sensitive: bool) -> BoundReference:
"""Bind the reference to an Iceberg schema
Args:
schema (Schema): An Iceberg schema
case_sensitive (bool): Whether to consider case when binding the reference to the field
Raises:
ValueError: If an empty name is provided
Returns:
BoundReference: A reference bound to the specific field in the Iceberg schema
"""
field = schema.find_field(name_or_id=self.name, case_sensitive=case_sensitive)
if not field:
raise ValueError(f"Cannot find field '{self.name}' in schema: {schema}")
return BoundReference(field=field, accessor=schema.accessor_for_field(field.field_id))
class BooleanExpressionVisitor(Generic[T], ABC):
@abstractmethod
def visit_true(self) -> T:
"""Visit method for an AlwaysTrue boolean expression
Note: This visit method has no arguments since AlwaysTrue instances have no context.
"""
@abstractmethod
def visit_false(self) -> T:
"""Visit method for an AlwaysFalse boolean expression
Note: This visit method has no arguments since AlwaysFalse instances have no context.
"""
@abstractmethod
def visit_not(self, child_result: T) -> T:
"""Visit method for a Not boolean expression
Args:
result (T): The result of visiting the child of the Not boolean expression
"""
@abstractmethod
def visit_and(self, left_result: T, right_result: T) -> T:
"""Visit method for an And boolean expression
Args:
left_result (T): The result of visiting the left side of the expression
right_result (T): The result of visiting the right side of the expression
"""
@abstractmethod
def visit_or(self, left_result: T, right_result: T) -> T:
"""Visit method for an Or boolean expression
Args:
left_result (T): The result of visiting the left side of the expression
right_result (T): The result of visiting the right side of the expression
"""
@abstractmethod
def visit_unbound_predicate(self, predicate) -> T:
"""Visit method for an unbound predicate in an expression tree
Args:
predicate (UnboundPredicate): An instance of an UnboundPredicate
"""
@abstractmethod
def visit_bound_predicate(self, predicate) -> T:
"""Visit method for a bound predicate in an expression tree
Args:
predicate (BoundPredicate): An instance of a BoundPredicate
"""
@singledispatch
def visit(obj, visitor: BooleanExpressionVisitor[T]) -> T:
"""A generic function for applying a boolean expression visitor to any point within an expression
The function traverses the expression in post-order fashion
Args:
obj(BooleanExpression): An instance of a BooleanExpression
visitor(BooleanExpressionVisitor[T]): An instance of an implementation of the generic BooleanExpressionVisitor base class
Raises:
NotImplementedError: If attempting to visit an unsupported expression
"""
raise NotImplementedError(f"Cannot visit unsupported expression: {obj}")
@visit.register(AlwaysTrue)
def _(obj: AlwaysTrue, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an AlwaysTrue boolean expression with a concrete BooleanExpressionVisitor"""
return visitor.visit_true()
@visit.register(AlwaysFalse)
def _(obj: AlwaysFalse, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an AlwaysFalse boolean expression with a concrete BooleanExpressionVisitor"""
return visitor.visit_false()
@visit.register(Not)
def _(obj: Not, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit a Not boolean expression with a concrete BooleanExpressionVisitor"""
child_result: T = visit(obj.child, visitor=visitor)
return visitor.visit_not(child_result=child_result)
@visit.register(And)
def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an And boolean expression with a concrete BooleanExpressionVisitor"""
left_result: T = visit(obj.left, visitor=visitor)
right_result: T = visit(obj.right, visitor=visitor)
return visitor.visit_and(left_result=left_result, right_result=right_result)
@visit.register(Or)
def _(obj: Or, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an Or boolean expression with a concrete BooleanExpressionVisitor"""
left_result: T = visit(obj.left, visitor=visitor)
right_result: T = visit(obj.right, visitor=visitor)
return visitor.visit_or(left_result=left_result, right_result=right_result)
| 3,784 | 1,862 | 1,230 |
261611360ae3da7a443b41a52e0dd85e11baa6cf | 2,787 | py | Python | osrsmath/tests/general/test_skills.py | Palfore/OSRSmath | 373eb1e7f9702b98de318b3c708084353626a177 | [
"MIT"
] | 5 | 2020-06-30T06:51:25.000Z | 2021-11-16T01:04:48.000Z | osrsmath/tests/general/test_skills.py | Palfore/OSRS-Combat | 373eb1e7f9702b98de318b3c708084353626a177 | [
"MIT"
] | 15 | 2020-06-19T14:36:38.000Z | 2021-04-16T16:17:08.000Z | osrsmath/tests/general/test_skills.py | Palfore/OSRS-Combat | 373eb1e7f9702b98de318b3c708084353626a177 | [
"MIT"
] | null | null | null | from osrsmath.general.skills import *
import unittest
| 39.253521 | 83 | 0.776821 | from osrsmath.general.skills import *
import unittest
class TestExperience(unittest.TestCase):
def test_experience_for_levels_below_1_raises(self):
self.assertRaises(ValueError, lambda:experience(0))
self.assertRaises(ValueError, lambda:experience(-3))
def test_experience_for_levels_above_level_cap_with_no_flag_raises(self):
self.assertRaises(ValueError, lambda:experience(100, virtual_levels=False))
self.assertRaises(ValueError, lambda:experience(112, virtual_levels=False))
def test_experience_for_levels_above_virtual_cap_raises(self):
self.assertRaises(ValueError, lambda:experience(127))
self.assertRaises(ValueError, lambda:experience(140))
def test_experience_for_levels_below_level_cap(self):
self.assertEqual(experience(85), 3_258_594)
self.assertEqual(experience(34), 20_224)
def test_experience_for_levels_above_virtual_cap_with_flag(self):
self.assertEqual(experience(100, virtual_levels=True), 14_391_160)
self.assertEqual(experience(112, virtual_levels=True), 47_221_641)
class TestLevel(unittest.TestCase):
def test_experience_below_zero_raises(self):
self.assertRaises(ValueError, lambda:level(-1))
def test_experience_of_zero_is_lowest_level(self):
self.assertEqual(level(0), 1)
def test_experience_above_level_cap_returns_max_level_without_flag(self):
self.assertEqual(level(14_000_000, virtual_levels=False), 99)
self.assertEqual(level(200_000_000, virtual_levels=False), 99)
def test_experience_above_level_cap_with_flag(self):
self.assertEqual(level(14_000_000, virtual_levels=True), 99)
self.assertEqual(level(112_000_000, virtual_levels=True), 120)
self.assertEqual(level(200_000_000, virtual_levels=True), 126)
def test_experience_above_maximum_experience_raises(self):
self.assertRaises(ValueError, lambda:level(200_000_001))
self.assertRaises(ValueError, lambda:level(252_532_523))
def test_experience_within_bounds(self):
self.assertEqual(level(40_000), 40)
self.assertEqual(level(700_000), 69)
self.assertEqual(level(9_000_000), 95)
def test_invertability(self):
small_experience = 1
for l in range(1, 99+1):
with self.subTest(level=l):
self.assertEqual(level(experience(l)), l)
def test_experience_just_over_level_same_level(self):
small_experience = 1
for l in range(1, 99+1):
with self.subTest(level=l):
self.assertEqual(level(experience(l) + small_experience), l)
def test_experience_just_under_level_is_previous_level(self):
small_experience = 1
for l in range(2, 99+1):
with self.subTest(level=l):
if l == 1:
self.assertRaises(ValueError, lambda:level(experience(l) - small_experience))
else:
self.assertEqual(level(experience(l) - small_experience), l - 1)
| 2,286 | 33 | 410 |
745034e241e0b4198cfb1998e265ecd1a3fc6b88 | 3,019 | py | Python | groundStationSoftware/socket/pyServer.py | ajayyy/rocket-code-2020 | b4c0fa741d17785d4637c153814c59d4628ff20f | [
"MIT"
] | null | null | null | groundStationSoftware/socket/pyServer.py | ajayyy/rocket-code-2020 | b4c0fa741d17785d4637c153814c59d4628ff20f | [
"MIT"
] | null | null | null | groundStationSoftware/socket/pyServer.py | ajayyy/rocket-code-2020 | b4c0fa741d17785d4637c153814c59d4628ff20f | [
"MIT"
] | null | null | null | import socket
import serial
import time
import sys
import glob
import signal
from sys import exit
address = '127.0.0.1'
port = 8080
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
serialCom = serial.Serial(port)
serialCom.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
signal.signal(signal.SIGINT, handler) # ctlr + c
signal.signal(signal.SIGTSTP, handler) # ctlr + z
global server
# next create a socket object
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket successfully created.")
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((address, port))
print("Socket binded to %s." %(port))
# put the socket into listening mode
server.listen(5)
print("Socket is listening.")
openSerial()
while True:
# Establish connection with client.
try:
c, addr = server.accept()
except:
# server has been closed
break
with c:
print('Connected by', addr)
while True:
try:
x = ser.read(1) # read one byte
# print(type(x))
print(int.from_bytes(x, "big"))
except Exception as e:
print("Serial communication lost.")
print(e)
openSerial()
break
try:
c.send(x)
# pass
except:
break
#x = b'1'
# read serial
# if not data: break
#sleep(1)
print("Client disconnected.")
| 24.152 | 108 | 0.556807 | import socket
import serial
import time
import sys
import glob
import signal
from sys import exit
address = '127.0.0.1'
port = 8080
def closeConnection():
print("\nClosing connection.")
try:
server.shutdown(socket.SHUT_RDWR)
server.close()
except:
pass
try:
ser.close()
except:
pass
def handler(signal_received, frame):
closeConnection()
exit()
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
serialCom = serial.Serial(port)
serialCom.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def openSerial():
global ser
valid = False
while(not valid):
time.sleep(1)
try:
ser = serial.Serial([(port) for port in serial_ports() if 'USB' in port][0], 9600, timeout=None)
ser.reset_input_buffer()
valid = True
print("Serial communication established.")
except:
valid = False
signal.signal(signal.SIGINT, handler) # ctlr + c
signal.signal(signal.SIGTSTP, handler) # ctlr + z
global server
# next create a socket object
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket successfully created.")
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((address, port))
print("Socket binded to %s." %(port))
# put the socket into listening mode
server.listen(5)
print("Socket is listening.")
openSerial()
while True:
# Establish connection with client.
try:
c, addr = server.accept()
except:
# server has been closed
break
with c:
print('Connected by', addr)
while True:
try:
x = ser.read(1) # read one byte
# print(type(x))
print(int.from_bytes(x, "big"))
except Exception as e:
print("Serial communication lost.")
print(e)
openSerial()
break
try:
c.send(x)
# pass
except:
break
#x = b'1'
# read serial
# if not data: break
#sleep(1)
print("Client disconnected.")
| 592 | 0 | 69 |
9974dc908d88962393609c47d9df85b510796c7f | 5,101 | py | Python | pubsub.py | HotSushi/SimplePythonPubSub | 8e870b6d65e6877703bde043d4621120f7527e49 | [
"MIT"
] | null | null | null | pubsub.py | HotSushi/SimplePythonPubSub | 8e870b6d65e6877703bde043d4621120f7527e49 | [
"MIT"
] | 1 | 2016-05-19T06:39:15.000Z | 2016-05-19T06:39:35.000Z | pubsub.py | HotSushi/SimplePythonPubSub | 8e870b6d65e6877703bde043d4621120f7527e49 | [
"MIT"
] | null | null | null | import socket
import thread
import time
__author__ = "Sushant Raikar"
__email__ = "sushantraikar123@yahoo.com"
class SocketClient:
"""
=================
Pub Sub Generic Client
=================
Description: This is a generic client implementation. All interaction
with the broker is done through this class. It continuously listens
for published messages in a thread, provides api for publishing mess-
ages. A client can subscribe to more than one channels at a time.
API:
publish(channel_name, message)
uses broker's PUB API.
subscribe(channel_name)
uses broker's SUB API.
exiter()
uses broker's EXIT API.
set_callback(function)
function will be triggered with the message, ie. function(message)
,when a message is received from subscribed channel.
"""
def __init__(self, host, port):
"""
Initializes client with host and port. Starts a new thread for li-
stening to incoming messages.
"""
self.host = host
self.port = port
self.callback = None
self.sock = socket.socket()
self.sock.connect((host, port))
thread.start_new_thread(SocketClient.clientthread,(self.sock, self.__message_received_callback))
@staticmethod
def clientthread(conn, callback):
"""
Listens for incoming message.
Raises RuntimeError, if server connection breaks abruptly.
"""
while True:
try:
data = conn.recv(1024)
callback(data)
except:
raise RuntimeError("Server crashed")
conn.close()
def __message_received_callback(self, msg):
"""
Triggers callback function if its set.
"""
if self.callback:
self.callback(msg)
def __send(self, data):
"""
Send function, sleep after sending to avoid socket combining con-
secutive messages.
"""
self.sock.send(data)
time.sleep(0.01)
def set_callback(self, fn):
"""
Api for setting callback function.
"""
self.callback = fn
def publish(self, channel, msg):
"""
Api for publishing message.
"""
send_data = "PUB %s %s"%(channel, msg)
self.__send(send_data)
def subscribe(self, channel):
"""
Api for subscribing to a channel.
"""
send_data = "SUB %s"%(channel)
self.__send(send_data)
def exiter(self):
"""
Api for closing connection.
"""
send_data = "EXIT "
self.__send(send_data)
class Publisher:
"""
=================
Pub Sub Publisher
=================
Description: This is a wrapper over client implementation, for publisher
specific events. Publisher is initialized with a channel name. All mess-
ages are published only on this channel.
API:
send(message)
publishes message on the channel.
stop()
stop connection.
"""
class Subscriber:
"""
=================
Pub Sub Subscriber
=================
Description: This is a wrapper over client implementation, for subscrib-
er specific events. Subscriber is initialized with a channel name. All
messages received will only be from this channel. This class also provi-
des api for setting callback. If callback is not set, messages received
are stored in a message queue. Subsequent calls to recv(), will dequeue
messages one at a time. It is recommended to use recv() and set_callback
exclusively.
API:
recv()
Checks if there are any messages in message queue. If callback is s-
et this api will return None.
set_callback(function)
triggers `function(message)`.
stop()
disconnect and stop receiving messages.
"""
| 28.497207 | 104 | 0.59949 | import socket
import thread
import time
__author__ = "Sushant Raikar"
__email__ = "sushantraikar123@yahoo.com"
class SocketClient:
"""
=================
Pub Sub Generic Client
=================
Description: This is a generic client implementation. All interaction
with the broker is done through this class. It continuously listens
for published messages in a thread, provides api for publishing mess-
ages. A client can subscribe to more than one channels at a time.
API:
publish(channel_name, message)
uses broker's PUB API.
subscribe(channel_name)
uses broker's SUB API.
exiter()
uses broker's EXIT API.
set_callback(function)
function will be triggered with the message, ie. function(message)
,when a message is received from subscribed channel.
"""
def __init__(self, host, port):
"""
Initializes client with host and port. Starts a new thread for li-
stening to incoming messages.
"""
self.host = host
self.port = port
self.callback = None
self.sock = socket.socket()
self.sock.connect((host, port))
thread.start_new_thread(SocketClient.clientthread,(self.sock, self.__message_received_callback))
@staticmethod
def clientthread(conn, callback):
"""
Listens for incoming message.
Raises RuntimeError, if server connection breaks abruptly.
"""
while True:
try:
data = conn.recv(1024)
callback(data)
except:
raise RuntimeError("Server crashed")
conn.close()
def __message_received_callback(self, msg):
"""
Triggers callback function if its set.
"""
if self.callback:
self.callback(msg)
def __send(self, data):
"""
Send function, sleep after sending to avoid socket combining con-
secutive messages.
"""
self.sock.send(data)
time.sleep(0.01)
def set_callback(self, fn):
"""
Api for setting callback function.
"""
self.callback = fn
def publish(self, channel, msg):
"""
Api for publishing message.
"""
send_data = "PUB %s %s"%(channel, msg)
self.__send(send_data)
def subscribe(self, channel):
"""
Api for subscribing to a channel.
"""
send_data = "SUB %s"%(channel)
self.__send(send_data)
def exiter(self):
"""
Api for closing connection.
"""
send_data = "EXIT "
self.__send(send_data)
class Publisher:
"""
=================
Pub Sub Publisher
=================
Description: This is a wrapper over client implementation, for publisher
specific events. Publisher is initialized with a channel name. All mess-
ages are published only on this channel.
API:
send(message)
publishes message on the channel.
stop()
stop connection.
"""
def __init__(self, channel, host = "localhost", port = 52000):
self.socket_client = SocketClient(host, port)
self.channel = channel
def send(self, message):
self.socket_client.publish(self.channel, message)
def stop(self):
self.socket_client.exiter()
class Subscriber:
"""
=================
Pub Sub Subscriber
=================
Description: This is a wrapper over client implementation, for subscrib-
er specific events. Subscriber is initialized with a channel name. All
messages received will only be from this channel. This class also provi-
des api for setting callback. If callback is not set, messages received
are stored in a message queue. Subsequent calls to recv(), will dequeue
messages one at a time. It is recommended to use recv() and set_callback
exclusively.
API:
recv()
Checks if there are any messages in message queue. If callback is s-
et this api will return None.
set_callback(function)
triggers `function(message)`.
stop()
disconnect and stop receiving messages.
"""
def __init__(self, channel, host = "localhost", port = 52000):
self.socket_client = SocketClient(host, port)
self.socket_client.set_callback(self.__on_recv)
self.socket_client.subscribe(channel)
self.callback = None
self.channel = channel
self.message_queue = []
def __on_recv(self, message):
if self.callback:
self.callback(message)
else:
self.message_queue.append(message)
def set_callback(self, fn):
self.callback = fn
def recv(self):
# pop message queue
if self.message_queue:
ret = self.message_queue[0]
self.message_queue = self.message_queue[1:]
return ret
return None
def stop(self):
self.callback = None
self.socket_client.exiter()
| 920 | 0 | 214 |
48b699e52c0a91716bd0a163b91e68d21ba15e33 | 1,057 | py | Python | mindspore/nn/acc/__init__.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | 1 | 2021-07-03T06:52:20.000Z | 2021-07-03T06:52:20.000Z | mindspore/nn/acc/__init__.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | mindspore/nn/acc/__init__.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Accelerating.
Provide auto accelerating for network, such as Less BN, Gradient Freeze.
"""
from .acc import *
from .base import *
from .less_batch_normalization import *
from .grad_freeze import *
__all__ = ['AutoAcc',
'OptimizerProcess', 'ParameterProcess',
'LessBN',
'GradientFreeze', 'FreezeOpt', 'freeze_cell',
'GradientAccumulation']
| 34.096774 | 78 | 0.672658 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Accelerating.
Provide auto accelerating for network, such as Less BN, Gradient Freeze.
"""
from .acc import *
from .base import *
from .less_batch_normalization import *
from .grad_freeze import *
__all__ = ['AutoAcc',
'OptimizerProcess', 'ParameterProcess',
'LessBN',
'GradientFreeze', 'FreezeOpt', 'freeze_cell',
'GradientAccumulation']
| 0 | 0 | 0 |
aaaa551b83491d58e0c7f64eb9b325fde232a0f9 | 1,305 | py | Python | frappe/website/doctype/blog_post/blog_post.py | cadencewatches/frappe | d9dcf132a10d68b2dcc80ef348e6d967f1e44084 | [
"MIT"
] | null | null | null | frappe/website/doctype/blog_post/blog_post.py | cadencewatches/frappe | d9dcf132a10d68b2dcc80ef348e6d967f1e44084 | [
"MIT"
] | null | null | null | frappe/website/doctype/blog_post/blog_post.py | cadencewatches/frappe | d9dcf132a10d68b2dcc80ef348e6d967f1e44084 | [
"MIT"
] | 1 | 2018-03-21T15:51:46.000Z | 2018-03-21T15:51:46.000Z | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re
from frappe.website.website_generator import WebsiteGenerator
from frappe.website.render import clear_cache
from frappe import _
from frappe.utils import today
| 26.632653 | 84 | 0.729502 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re
from frappe.website.website_generator import WebsiteGenerator
from frappe.website.render import clear_cache
from frappe import _
from frappe.utils import today
class BlogPost(WebsiteGenerator):
save_versions = True
def get_page_title(self):
return self.title
def validate(self):
if not self.blog_intro:
self.blog_intro = self.content[:140]
re.sub("\<[^>]*\>", "", self.blog_intro)
if self.blog_intro:
self.blog_intro = self.blog_intro[:140]
if self.published and not self.published_on:
self.published_on = today()
self.parent_website_route = frappe.db.get_value("Website Route",
{"ref_doctype": "Blog Category", "docname": self.blog_category})
# update posts
frappe.db.sql("""update tabBlogger set posts=(select count(*) from `tabBlog Post`
where ifnull(blogger,'')=tabBlogger.name)
where name=%s""", (self.blogger,))
def on_update(self):
WebsiteGenerator.on_update(self)
clear_cache("writers")
def clear_blog_cache():
for blog in frappe.db.sql_list("""select page_name from
`tabBlog Post` where ifnull(published,0)=1"""):
clear_cache(blog)
clear_cache("writers")
| 819 | 116 | 46 |
e0f3ecead8a60aadcb3cae7198e8edd8d55eb835 | 5,324 | py | Python | mods/Siege.py | VinMannie/BombSquad-Community-Mod-Manager | d80d8bfe5c9bae422990df0df78e6098f379b27d | [
"Unlicense"
] | 3 | 2018-12-31T01:34:57.000Z | 2020-08-12T18:50:40.000Z | mods/Siege.py | EternalARK/BombSquad-Community-Mod-Manager | aa9318217a5bd86d2b897208536a8caf69fda939 | [
"Unlicense"
] | null | null | null | mods/Siege.py | EternalARK/BombSquad-Community-Mod-Manager | aa9318217a5bd86d2b897208536a8caf69fda939 | [
"Unlicense"
] | 2 | 2021-02-03T06:43:01.000Z | 2021-05-09T09:23:34.000Z | #Siege
import bs
import bsUtils
import random
| 43.284553 | 148 | 0.498873 | #Siege
import bs
import bsUtils
import random
def bsGetAPIVersion():
return 4
def bsGetGames():
return [Siege]
class SiegePowerupFactory(bs.PowerupFactory):
def getRandomPowerupType(self,forceType=None,excludeTypes=['tripleBombs','iceBombs','impactBombs','shield','health','curse','snoball','bunny']):
while True:
t = self._powerupDist[random.randint(0,len(self._powerupDist)-1)]
if t not in excludeTypes:
break
self._lastPowerupType = t
return t
class Puck(bs.Actor): # Borrowed from the hockey game
def __init__(self, position=(0,1,0)):
bs.Actor.__init__(self)
self.info = bs.NodeActor(bs.newNode('text',
attrs={'vAttach': 'bottom',
'hAlign': 'center',
'vrDepth': 0,
'color': (0,.2,0),
'shadow': 1.0,
'flatness': 1.0,
'position': (0,0),
'scale': 0.8,
'text': "Created by MattZ45986 on Github",
}))
activity = self.getActivity()
self._spawnPos = (position[0], position[1]+1.0, position[2])
self.lastPlayersToTouch = {}
self.node = bs.newNode("prop",
attrs={'model': bs.getModel('puck'),
'colorTexture': bs.getTexture('puckColor'),
'body':'puck',
'reflection':'soft',
'reflectionScale':[0.2],
'shadowSize': 1.0,
'gravityScale':2.5,
'isAreaOfInterest':True,
'position':self._spawnPos,
'materials': [bs.getSharedObject('objectMaterial'),activity._puckMaterial]
},
delegate=self)
class Siege(bs.TeamGameActivity):
@classmethod
def getName(cls):
return "Siege"
@classmethod
def getDescription(cls, sessionType):
return "Get the flag from the castle!"
@classmethod
def getScoreInfo(cls):
return{'scoreType':'points'}
@classmethod
def getSupportedMaps(cls, sessionType):
return ['Football Stadium']
@classmethod
def supportsSessionType(cls, sessionType):
return True if issubclass(sessionType, bs.FreeForAllSession) or issubclass(sessionType, bs.TeamsSession) else False
def __init__(self,settings):
bs.TeamGameActivity.__init__(self,settings)
self._puckMaterial = bs.Material()
self._puckMaterial.addActions(actions=( ("modifyPartCollision","friction",100000)))
self._puckMaterial.addActions(conditions=("theyHaveMaterial",bs.getSharedObject('pickupMaterial')),
actions=( ("modifyPartCollision","collide",False)))
self._puckMaterial.addActions(conditions=( ("weAreYoungerThan",100),'and',
("theyHaveMaterial",bs.getSharedObject('objectMaterial')) ),
actions=( ("modifyNodeCollision","collide",False)))
self.pucks = []
self.flag = bs.Flag(color=(1,1,1),
position=(0,1,-2),
touchable=True)
def onTransitionIn(self):
bs.TeamGameActivity.onTransitionIn(self,music='FlagCatcher')
def _standardDropPowerup(self,index,expire=True):
import bsPowerup
bsPowerup.Powerup(position=self.getMap().powerupSpawnPoints[index],
powerupType=SiegePowerupFactory().getRandomPowerupType(),expire=expire).autoRetain()
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self.setupStandardPowerupDrops(True)
for j in range(0,12,3):
for i in range(-6,4,3):
self.pucks.append(Puck((3,j/4.0,i/2.0)))
self.pucks.append(Puck((-3,j/4.0,i/2.0)))
for i in range(-3,4,2):
self.pucks.append(Puck((i/2.0,j/4.0,-3)))
self.pucks.append(Puck((i/2.0,j/4.0,1.75)))
def handleMessage(self,m):
if isinstance(m,bs.FlagPickedUpMessage):
winner = m.node.getDelegate()
self.endGame(winner)
elif isinstance(m,bs.PlayerSpazDeathMessage):
self.respawnPlayer(m.spaz.getPlayer())
else: bs.TeamGameActivity.handleMessage(self,m)
def endGame(self, winner):
results = bs.TeamGameResults()
for team in self.teams:
if winner.getPlayer() in team.players: score = 50
else: score = 0
results.setTeamScore(team,score)
self.end(results=results,announceDelay=10)
| 4,636 | 474 | 168 |
aef428720eadf953beb4860b66d1592c2ce744ca | 881 | py | Python | core_sparton/src/myserial.py | wher0001/sparton_AHRSM2 | 908d908440c95dd3bd1a5eb4b9ea383561e9227a | [
"MIT"
] | null | null | null | core_sparton/src/myserial.py | wher0001/sparton_AHRSM2 | 908d908440c95dd3bd1a5eb4b9ea383561e9227a | [
"MIT"
] | null | null | null | core_sparton/src/myserial.py | wher0001/sparton_AHRSM2 | 908d908440c95dd3bd1a5eb4b9ea383561e9227a | [
"MIT"
] | null | null | null | import serial | 24.472222 | 48 | 0.561862 | import serial
class SimpleSerialWrapper:
def __init__(self):
self.ser = serial.Serial()
def portOpen(self):
self.ser.open()
def flushData(self):
self.ser.flush()
def sendData(self, data):
self.ser.write(data.encode())
def getData(self, read_size):
if read_size == 0:
return self.ser.readline()
return self.ser.read(read_size)
def setParams(self, device, baud, time_out):
# The following line was just a test
# print(device, baud, time_out)
self.ser.baudrate = baud
self.ser.port = device
self.ser.timeout = time_out
def getBaud(self):
return self.ser.baudrate
def getDevice(self):
return self.ser.port
def getTimeout(self):
return self.ser.timeout | 542 | 5 | 321 |
9fe281f9ecd6772cbc5eb0070fcf1e095c005f5f | 27,086 | py | Python | jaqs/research/signaldigger/plotting.py | WestXu/JAQS | 3c9389afab518f188b8628af72297d750c07dfb1 | [
"Apache-2.0"
] | 602 | 2017-11-21T00:39:40.000Z | 2022-03-16T06:13:08.000Z | jaqs/research/signaldigger/plotting.py | WestXu/JAQS | 3c9389afab518f188b8628af72297d750c07dfb1 | [
"Apache-2.0"
] | 63 | 2017-12-08T08:21:16.000Z | 2020-03-07T13:57:35.000Z | jaqs/research/signaldigger/plotting.py | WestXu/JAQS | 3c9389afab518f188b8628af72297d750c07dfb1 | [
"Apache-2.0"
] | 365 | 2017-11-21T01:38:36.000Z | 2022-03-30T15:55:30.000Z | # encoding: utf-8
from __future__ import print_function
from functools import wraps
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
import seaborn as sns
from . import performance as pfm
import jaqs.util as jutil
DECIMAL_TO_BPS = 10000
DECIMAL_TO_PCT = 100
COLOR_MAP = cm.get_cmap('rainbow') # cm.get_cmap('RdBu')
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
mpl.rcParams.update(MPL_RCPARAMS)
# -----------------------------------------------------------------------------------
# plotting settings
def customize(func):
"""
Decorator to set plotting context and axes style during function call.
"""
@wraps(func)
return call_w_context
def plotting_context(context='notebook', font_scale=1.5, rc=None):
"""
Create signaldigger default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by signal font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.plotting_context(font_scale=2):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None):
"""Create signaldigger default axes style context.
Under the hood, calls and returns seaborn.axes_style() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
style : str, optional
Name of seaborn style.
rc : dict, optional
Config flags.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.axes_style(style='whitegrid'):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.axes_style(style=style, rc=rc)
# -----------------------------------------------------------------------------------
# Functions to Plot Tables
def plot_table(table, name=None, fmt=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pd.Series or pd.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
fmt : str, optional
Formatter to use for displaying table elements.
E.g. '{0:.2f}%' for displaying 100 as '100.00%'.
Restores original setting after displaying.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
print(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option)
# -----------------------------------------------------------------------------------
# Functions to Plot Returns
'''
def plot_quantile_returns_bar(mean_ret_by_q,
# ylim_percentiles=None,
ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_ret_by_q : pd.DataFrame
DataFrame with quantile, (group) and mean period wise return values.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
mean_ret_by_q = mean_ret_by_q.copy().loc[:, ['mean']]
ymin = None
ymax = None
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
mean_ret_by_q.multiply(DECIMAL_TO_BPS) \
.plot(kind='bar',
title="Mean Return (on symbol, time) By signal Quantile", ax=ax)
ax.set(xlabel='Quantile', ylabel='Mean Return (bps)',
ylim=(ymin, ymax))
return ax
'''
def plot_quantile_returns_ts(mean_ret_by_q, ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_ret_by_q : pd.DataFrame
DataFrame with quantile, (group) and mean period wise return values.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
ret_wide = pd.concat({k: v['mean'] for k, v in mean_ret_by_q.items()}, axis=1)
ret_wide.index = pd.to_datetime(ret_wide.index, format="%Y%m%d")
ret_wide = ret_wide.mul(DECIMAL_TO_PCT)
# ret_wide = ret_wide.rolling(window=22).mean()
ret_wide.plot(lw=1.2, ax=ax, cmap=COLOR_MAP)
df = pd.DataFrame()
ax.legend(loc='upper left')
ymin, ymax = ret_wide.min().min(), ret_wide.max().max()
ax.set(ylabel='Return (%)',
title="Daily Quantile Return (equal weight within quantile)",
xlabel='Date',
# yscale='symlog',
# yticks=np.linspace(ymin, ymax, 5),
ylim=(ymin, ymax))
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.axhline(1.0, linestyle='-', color='black', lw=1)
return ax
def plot_mean_quantile_returns_spread_time_series(mean_returns_spread, period,
std_err=None,
bandwidth=1,
ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_returns_spread : pd.Series
Series with difference between quantile mean returns by period.
std_err : pd.Series
Series with standard error of difference between quantile
mean returns each period.
bandwidth : float
Width of displayed error bands in standard deviations.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if False: # isinstance(mean_returns_spread, pd.DataFrame):
if ax is None:
ax = [None for a in mean_returns_spread.columns]
ymin, ymax = (None, None)
for (i, a), (name, fr_column) in zip(enumerate(ax),
mean_returns_spread.items()):
stdn = None if std_err is None else std_err[name]
stdn = mean_returns_spread.loc
a = plot_mean_quantile_returns_spread_time_series(fr_column,
std_err=stdn,
ax=a)
ax[i] = a
curr_ymin, curr_ymax = a.get_ylim()
ymin = curr_ymin if ymin is None else min(ymin, curr_ymin)
ymax = curr_ymax if ymax is None else max(ymax, curr_ymax)
for a in ax:
a.set_ylim([ymin, ymax])
return ax
periods = period
title = ('Top Minus Bottom Quantile Return'
.format(periods if periods is not None else ""))
if ax is None:
f, ax = plt.subplots(figsize=(18, 6))
mean_returns_spread.index = pd.to_datetime(mean_returns_spread.index, format="%Y%m%d")
mean_returns_spread_bps = mean_returns_spread['mean_diff'] * DECIMAL_TO_PCT
std_err_bps = mean_returns_spread['std'] * DECIMAL_TO_PCT
upper = mean_returns_spread_bps.values + (std_err_bps * bandwidth)
lower = mean_returns_spread_bps.values - (std_err_bps * bandwidth)
mean_returns_spread_bps.plot(alpha=0.4, ax=ax, lw=0.7, color='navy')
mean_returns_spread_bps.rolling(22).mean().plot(color='green',
alpha=0.7,
ax=ax)
# ax.fill_between(mean_returns_spread.index, lower, upper,
# alpha=0.3, color='indianred')
ax.axhline(0.0, linestyle='-', color='black', lw=1, alpha=0.8)
ax.legend(['mean returns spread', '1 month moving avg'], loc='upper right')
ylim = np.nanpercentile(abs(mean_returns_spread_bps.values), 95)
ax.set(ylabel='Difference In Quantile Mean Return (%)',
xlabel='',
title=title,
ylim=(-ylim, ylim))
return ax
def plot_cumulative_return(ret, ax=None, title=None):
"""
Plots the cumulative returns of the returns series passed in.
Parameters
----------
ret : pd.Series
Period wise returns of dollar neutral portfolio weighted by signal
value.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
ret = ret.copy()
cum = ret # pfm.daily_ret_to_cum(ret)
cum.index = pd.to_datetime(cum.index, format="%Y%m%d")
cum = cum.mul(DECIMAL_TO_PCT)
cum.plot(ax=ax, lw=3, color='indianred', alpha=1.0)
ax.axhline(0.0, linestyle='-', color='black', lw=1)
metrics = pfm.calc_performance_metrics(cum, cum_return=True, compound=False)
ax.text(.85, .30,
"Ann.Ret. = {:.1f}%\nAnn.Vol. = {:.1f}%\nSharpe = {:.2f}".format(metrics['ann_ret'],
metrics['ann_vol'],
metrics['sharpe']),
fontsize=12,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
if title is None:
title = "Cumulative Return"
ax.set(ylabel='Cumulative Return (%)',
title=title,
xlabel='Date')
return ax
def plot_cumulative_returns_by_quantile(quantile_ret, ax=None):
"""
Plots the cumulative returns of various signal quantiles.
Parameters
----------
quantile_ret : int: pd.DataFrame
Cumulative returns by signal quantile.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
cum_ret = quantile_ret
cum_ret.index = pd.to_datetime(cum_ret.index, format="%Y%m%d")
cum_ret = cum_ret.mul(DECIMAL_TO_PCT)
cum_ret.plot(lw=2, ax=ax, cmap=COLOR_MAP)
ax.axhline(0.0, linestyle='-', color='black', lw=1)
ax.legend(loc='upper left')
ymin, ymax = cum_ret.min().min(), cum_ret.max().max()
ax.set(ylabel='Cumulative Returns (%)',
title='Cumulative Return of Each Quantile (equal weight within quantile)',
xlabel='Date',
# yscale='symlog',
# yticks=np.linspace(ymin, ymax, 5),
ylim=(ymin, ymax))
sharpes = ["sharpe_{:d} = {:.2f}".format(col, pfm.calc_performance_metrics(ser, cum_return=True,
compound=False)['sharpe'])
for col, ser in cum_ret.iteritems()]
ax.text(.02, .30,
'\n'.join(sharpes),
fontsize=12,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
ax.yaxis.set_major_formatter(ScalarFormatter())
return ax
# -----------------------------------------------------------------------------------
# Functions to Plot IC
def plot_ic_ts(ic, period, ax=None):
"""
Plots Spearman Rank Information Coefficient and IC moving
average for a given signal.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
ic = ic.copy()
if isinstance(ic, pd.DataFrame):
ic = ic.iloc[:, 0]
mean, std = ic.mean(), ic.std()
if ax is None:
num_plots = 1
f, ax = plt.subplots(num_plots, 1, figsize=(18, num_plots * 7))
ax = np.asarray([ax]).flatten()
ic.plot(ax=ax, lw=0.6, color='navy', label='daily IC', alpha=0.8)
ic.rolling(22).mean().plot(ax=ax, color='royalblue', lw=2, alpha=0.6, label='1 month MA')
ax.axhline(0.0, linestyle='-', color='black', linewidth=1, alpha=0.8)
ax.text(.05, .95,
"Mean {:.3f} \n Std. {:.3f}".format(mean, std),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top',
)
ymin, ymax = (None, None)
curr_ymin, curr_ymax = ax.get_ylim()
ymin = curr_ymin if ymin is None else min(ymin, curr_ymin)
ymax = curr_ymax if ymax is None else max(ymax, curr_ymax)
ax.legend(loc='upper right')
ax.set(ylabel='IC', xlabel="", ylim=[ymin, ymax],
title="Daily IC and Moving Average".format(period))
return ax
def plot_ic_hist(ic, period, ax=None):
"""
Plots Spearman Rank Information Coefficient histogram for a given signal.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
ic = ic.copy()
if isinstance(ic, pd.DataFrame):
ic = ic.iloc[:, 0]
mean, std = ic.mean(), ic.std()
if ax is None:
v_spaces = 1
f, ax = plt.subplots(v_spaces, 3, figsize=(18, v_spaces * 6))
ax = ax.flatten()
sns.distplot(ic.replace(np.nan, 0.), ax=ax,
hist_kws={'color': 'royalblue'},
kde_kws={'color': 'navy', 'alpha': 0.5},
# hist_kws={'weights':},
)
ax.axvline(mean, color='indianred', linestyle='dashed', linewidth=1.0, label='Mean')
ax.text(.05, .95,
"Mean {:.3f} \n Std. {:.3f}".format(mean, std),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
ax.set(title="Distribution of Daily IC",
xlabel='IC',
xlim=[-1, 1])
ax.legend(loc='upper right')
return ax
def plot_monthly_ic_heatmap(mean_monthly_ic, period, ax=None):
"""
Plots a heatmap of the information coefficient or returns by month.
Parameters
----------
mean_monthly_ic : pd.DataFrame
The mean monthly IC for N periods forward.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
MONTH_MAP = {1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'}
mean_monthly_ic = mean_monthly_ic.copy()
num_plots = 1.0
v_spaces = ((num_plots - 1) // 3) + 1
if ax is None:
f, ax = plt.subplots(v_spaces, 3, figsize=(18, v_spaces * 6))
ax = ax.flatten()
new_index_year = []
new_index_month = []
for date in mean_monthly_ic.index:
new_index_year.append(date.year)
new_index_month.append(MONTH_MAP[date.month])
mean_monthly_ic.index = pd.MultiIndex.from_arrays(
[new_index_year, new_index_month],
names=["year", "month"])
ic_year_month = mean_monthly_ic['ic'].unstack()
sns.heatmap(
ic_year_month,
annot=True,
alpha=1.0,
center=0.0,
annot_kws={"size": 7},
linewidths=0.01,
linecolor='white',
cmap=cm.get_cmap('RdBu'),
cbar=False,
ax=ax)
ax.set(ylabel='', xlabel='')
ax.set_title("IC Monthly Mean".format(period))
return ax
# -----------------------------------------------------------------------------------
# Functions to Plot Others
'''
def plot_event_dist_NEW(df_events, axs, grouper=None):
i = 0
def _plot(ser):
ax = axs[i]
sns.distplot(ser, ax=ax)
ax.axvline(ser.mean(), lw=1, ls='--', label='Average', color='red')
ax.legend(loc='upper left')
ax.set(xlabel='Return (%)', ylabel='',
title="Distribution of return after {:d} trade dats".format(period))
if grouper is None:
for (date, period), row in df_events.iterrows():
ax = axs[i]
sns.distplot(ser, ax=ax)
ax.axvline(ser.mean(), lw=1, ls='--', label='Average', color='red')
ax.legend(loc='upper left')
ax.set(xlabel='Return (%)', ylabel='',
title="Distribution of return after {:d} trade dats".format(period))
# self.show_fig(fig, 'event_return_{:d}days.png'.format(my_period))
i += 1
# print(mean)
'''
def plot_batch_backtest(df, ax):
"""
Parameters
----------
df : pd.DataFrame
ax : axes
"""
df = df.copy()
df.index = jutil.convert_int_to_datetime(df.index)
df.mul(DECIMAL_TO_PCT).plot(# marker='x',
lw=1.2, ax=ax, cmap=COLOR_MAP)
ax.axhline(0.0, color='k', ls='--', lw=0.7, alpha=.5)
ax.set(xlabel="Date", ylabel="Cumulative Return (%)",
title="Cumulative Return for Different Buy Condition", )
| 31.568765 | 107 | 0.555342 | # encoding: utf-8
from __future__ import print_function
from functools import wraps
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
import seaborn as sns
from . import performance as pfm
import jaqs.util as jutil
DECIMAL_TO_BPS = 10000
DECIMAL_TO_PCT = 100
COLOR_MAP = cm.get_cmap('rainbow') # cm.get_cmap('RdBu')
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
mpl.rcParams.update(MPL_RCPARAMS)
# -----------------------------------------------------------------------------------
# plotting settings
def customize(func):
"""
Decorator to set plotting context and axes style during function call.
"""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with plotting_context(), axes_style():
sns.despine(left=True)
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def plotting_context(context='notebook', font_scale=1.5, rc=None):
"""
Create signaldigger default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by signal font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.plotting_context(font_scale=2):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None):
"""Create signaldigger default axes style context.
Under the hood, calls and returns seaborn.axes_style() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
style : str, optional
Name of seaborn style.
rc : dict, optional
Config flags.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.axes_style(style='whitegrid'):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.axes_style(style=style, rc=rc)
class GridFigure(object):
def __init__(self, rows, cols, height_ratio=1.0):
self.rows = rows * 2
self.cols = cols
self.fig = plt.figure(figsize=(14, rows * 7 * height_ratio))
self.gs = gridspec.GridSpec(self.rows, self.cols, wspace=0.1, hspace=0.5)
self.curr_row = 0
self.curr_col = 0
self._in_row = False
def next_row(self):
if self._in_row:
self.curr_row += 2
self.curr_col = 0
self._in_row = False
subplt = plt.subplot(self.gs[self.curr_row: self.curr_row + 2, :])
self.curr_row += 2
return subplt
def next_subrow(self):
if self._in_row:
self.curr_row += 2
self.curr_col = 0
self._in_row = False
subplt = plt.subplot(self.gs[self.curr_row, :])
self.curr_row += 1
return subplt
def next_cell(self):
subplt = plt.subplot(self.gs[self.curr_row: self.curr_row + 2, self.curr_col])
self.curr_col += 1
self._in_row = True
if self.curr_col >= self.cols:
self.curr_row += 2
self.curr_col = 0
self._in_row = False
return subplt
# -----------------------------------------------------------------------------------
# Functions to Plot Tables
def plot_table(table, name=None, fmt=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pd.Series or pd.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
fmt : str, optional
Formatter to use for displaying table elements.
E.g. '{0:.2f}%' for displaying 100 as '100.00%'.
Restores original setting after displaying.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
print(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option)
def plot_information_table(ic_summary_table):
print("Information Analysis")
plot_table(ic_summary_table.apply(lambda x: x.round(3)).T)
def plot_quantile_statistics_table(tb):
print("\n\nValue of signals of Different Quantiles Statistics")
plot_table(tb)
# -----------------------------------------------------------------------------------
# Functions to Plot Returns
'''
def plot_quantile_returns_bar(mean_ret_by_q,
# ylim_percentiles=None,
ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_ret_by_q : pd.DataFrame
DataFrame with quantile, (group) and mean period wise return values.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
mean_ret_by_q = mean_ret_by_q.copy().loc[:, ['mean']]
ymin = None
ymax = None
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
mean_ret_by_q.multiply(DECIMAL_TO_BPS) \
.plot(kind='bar',
title="Mean Return (on symbol, time) By signal Quantile", ax=ax)
ax.set(xlabel='Quantile', ylabel='Mean Return (bps)',
ylim=(ymin, ymax))
return ax
'''
def plot_quantile_returns_ts(mean_ret_by_q, ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_ret_by_q : pd.DataFrame
DataFrame with quantile, (group) and mean period wise return values.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
ret_wide = pd.concat({k: v['mean'] for k, v in mean_ret_by_q.items()}, axis=1)
ret_wide.index = pd.to_datetime(ret_wide.index, format="%Y%m%d")
ret_wide = ret_wide.mul(DECIMAL_TO_PCT)
# ret_wide = ret_wide.rolling(window=22).mean()
ret_wide.plot(lw=1.2, ax=ax, cmap=COLOR_MAP)
df = pd.DataFrame()
ax.legend(loc='upper left')
ymin, ymax = ret_wide.min().min(), ret_wide.max().max()
ax.set(ylabel='Return (%)',
title="Daily Quantile Return (equal weight within quantile)",
xlabel='Date',
# yscale='symlog',
# yticks=np.linspace(ymin, ymax, 5),
ylim=(ymin, ymax))
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.axhline(1.0, linestyle='-', color='black', lw=1)
return ax
def plot_mean_quantile_returns_spread_time_series(mean_returns_spread, period,
std_err=None,
bandwidth=1,
ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_returns_spread : pd.Series
Series with difference between quantile mean returns by period.
std_err : pd.Series
Series with standard error of difference between quantile
mean returns each period.
bandwidth : float
Width of displayed error bands in standard deviations.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if False: # isinstance(mean_returns_spread, pd.DataFrame):
if ax is None:
ax = [None for a in mean_returns_spread.columns]
ymin, ymax = (None, None)
for (i, a), (name, fr_column) in zip(enumerate(ax),
mean_returns_spread.items()):
stdn = None if std_err is None else std_err[name]
stdn = mean_returns_spread.loc
a = plot_mean_quantile_returns_spread_time_series(fr_column,
std_err=stdn,
ax=a)
ax[i] = a
curr_ymin, curr_ymax = a.get_ylim()
ymin = curr_ymin if ymin is None else min(ymin, curr_ymin)
ymax = curr_ymax if ymax is None else max(ymax, curr_ymax)
for a in ax:
a.set_ylim([ymin, ymax])
return ax
periods = period
title = ('Top Minus Bottom Quantile Return'
.format(periods if periods is not None else ""))
if ax is None:
f, ax = plt.subplots(figsize=(18, 6))
mean_returns_spread.index = pd.to_datetime(mean_returns_spread.index, format="%Y%m%d")
mean_returns_spread_bps = mean_returns_spread['mean_diff'] * DECIMAL_TO_PCT
std_err_bps = mean_returns_spread['std'] * DECIMAL_TO_PCT
upper = mean_returns_spread_bps.values + (std_err_bps * bandwidth)
lower = mean_returns_spread_bps.values - (std_err_bps * bandwidth)
mean_returns_spread_bps.plot(alpha=0.4, ax=ax, lw=0.7, color='navy')
mean_returns_spread_bps.rolling(22).mean().plot(color='green',
alpha=0.7,
ax=ax)
# ax.fill_between(mean_returns_spread.index, lower, upper,
# alpha=0.3, color='indianred')
ax.axhline(0.0, linestyle='-', color='black', lw=1, alpha=0.8)
ax.legend(['mean returns spread', '1 month moving avg'], loc='upper right')
ylim = np.nanpercentile(abs(mean_returns_spread_bps.values), 95)
ax.set(ylabel='Difference In Quantile Mean Return (%)',
xlabel='',
title=title,
ylim=(-ylim, ylim))
return ax
def plot_cumulative_return(ret, ax=None, title=None):
"""
Plots the cumulative returns of the returns series passed in.
Parameters
----------
ret : pd.Series
Period wise returns of dollar neutral portfolio weighted by signal
value.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
ret = ret.copy()
cum = ret # pfm.daily_ret_to_cum(ret)
cum.index = pd.to_datetime(cum.index, format="%Y%m%d")
cum = cum.mul(DECIMAL_TO_PCT)
cum.plot(ax=ax, lw=3, color='indianred', alpha=1.0)
ax.axhline(0.0, linestyle='-', color='black', lw=1)
metrics = pfm.calc_performance_metrics(cum, cum_return=True, compound=False)
ax.text(.85, .30,
"Ann.Ret. = {:.1f}%\nAnn.Vol. = {:.1f}%\nSharpe = {:.2f}".format(metrics['ann_ret'],
metrics['ann_vol'],
metrics['sharpe']),
fontsize=12,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
if title is None:
title = "Cumulative Return"
ax.set(ylabel='Cumulative Return (%)',
title=title,
xlabel='Date')
return ax
def plot_cumulative_returns_by_quantile(quantile_ret, ax=None):
"""
Plots the cumulative returns of various signal quantiles.
Parameters
----------
quantile_ret : int: pd.DataFrame
Cumulative returns by signal quantile.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
cum_ret = quantile_ret
cum_ret.index = pd.to_datetime(cum_ret.index, format="%Y%m%d")
cum_ret = cum_ret.mul(DECIMAL_TO_PCT)
cum_ret.plot(lw=2, ax=ax, cmap=COLOR_MAP)
ax.axhline(0.0, linestyle='-', color='black', lw=1)
ax.legend(loc='upper left')
ymin, ymax = cum_ret.min().min(), cum_ret.max().max()
ax.set(ylabel='Cumulative Returns (%)',
title='Cumulative Return of Each Quantile (equal weight within quantile)',
xlabel='Date',
# yscale='symlog',
# yticks=np.linspace(ymin, ymax, 5),
ylim=(ymin, ymax))
sharpes = ["sharpe_{:d} = {:.2f}".format(col, pfm.calc_performance_metrics(ser, cum_return=True,
compound=False)['sharpe'])
for col, ser in cum_ret.iteritems()]
ax.text(.02, .30,
'\n'.join(sharpes),
fontsize=12,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
ax.yaxis.set_major_formatter(ScalarFormatter())
return ax
# -----------------------------------------------------------------------------------
# Functions to Plot IC
def plot_ic_ts(ic, period, ax=None):
"""
Plots Spearman Rank Information Coefficient and IC moving
average for a given signal.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
ic = ic.copy()
if isinstance(ic, pd.DataFrame):
ic = ic.iloc[:, 0]
mean, std = ic.mean(), ic.std()
if ax is None:
num_plots = 1
f, ax = plt.subplots(num_plots, 1, figsize=(18, num_plots * 7))
ax = np.asarray([ax]).flatten()
ic.plot(ax=ax, lw=0.6, color='navy', label='daily IC', alpha=0.8)
ic.rolling(22).mean().plot(ax=ax, color='royalblue', lw=2, alpha=0.6, label='1 month MA')
ax.axhline(0.0, linestyle='-', color='black', linewidth=1, alpha=0.8)
ax.text(.05, .95,
"Mean {:.3f} \n Std. {:.3f}".format(mean, std),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top',
)
ymin, ymax = (None, None)
curr_ymin, curr_ymax = ax.get_ylim()
ymin = curr_ymin if ymin is None else min(ymin, curr_ymin)
ymax = curr_ymax if ymax is None else max(ymax, curr_ymax)
ax.legend(loc='upper right')
ax.set(ylabel='IC', xlabel="", ylim=[ymin, ymax],
title="Daily IC and Moving Average".format(period))
return ax
def plot_ic_hist(ic, period, ax=None):
"""
Plots Spearman Rank Information Coefficient histogram for a given signal.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
ic = ic.copy()
if isinstance(ic, pd.DataFrame):
ic = ic.iloc[:, 0]
mean, std = ic.mean(), ic.std()
if ax is None:
v_spaces = 1
f, ax = plt.subplots(v_spaces, 3, figsize=(18, v_spaces * 6))
ax = ax.flatten()
sns.distplot(ic.replace(np.nan, 0.), ax=ax,
hist_kws={'color': 'royalblue'},
kde_kws={'color': 'navy', 'alpha': 0.5},
# hist_kws={'weights':},
)
ax.axvline(mean, color='indianred', linestyle='dashed', linewidth=1.0, label='Mean')
ax.text(.05, .95,
"Mean {:.3f} \n Std. {:.3f}".format(mean, std),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
ax.set(title="Distribution of Daily IC",
xlabel='IC',
xlim=[-1, 1])
ax.legend(loc='upper right')
return ax
def plot_monthly_ic_heatmap(mean_monthly_ic, period, ax=None):
"""
Plots a heatmap of the information coefficient or returns by month.
Parameters
----------
mean_monthly_ic : pd.DataFrame
The mean monthly IC for N periods forward.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
MONTH_MAP = {1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'}
mean_monthly_ic = mean_monthly_ic.copy()
num_plots = 1.0
v_spaces = ((num_plots - 1) // 3) + 1
if ax is None:
f, ax = plt.subplots(v_spaces, 3, figsize=(18, v_spaces * 6))
ax = ax.flatten()
new_index_year = []
new_index_month = []
for date in mean_monthly_ic.index:
new_index_year.append(date.year)
new_index_month.append(MONTH_MAP[date.month])
mean_monthly_ic.index = pd.MultiIndex.from_arrays(
[new_index_year, new_index_month],
names=["year", "month"])
ic_year_month = mean_monthly_ic['ic'].unstack()
sns.heatmap(
ic_year_month,
annot=True,
alpha=1.0,
center=0.0,
annot_kws={"size": 7},
linewidths=0.01,
linecolor='white',
cmap=cm.get_cmap('RdBu'),
cbar=False,
ax=ax)
ax.set(ylabel='', xlabel='')
ax.set_title("IC Monthly Mean".format(period))
return ax
# -----------------------------------------------------------------------------------
# Functions to Plot Others
def plot_event_bar_OLD(mean, std, ax):
idx = mean.index
DECIMAL_TO_PERCENT = 100.0
ax.errorbar(idx, mean * DECIMAL_TO_PERCENT, yerr=std * DECIMAL_TO_PERCENT,
marker='o',
ecolor='lightblue', elinewidth=5)
ax.set(xlabel='Period Length (trade days)', ylabel='Return (%)',
title="Annual Return Mean and StdDev")
ax.set(xticks=idx)
return ax
def plot_event_bar(df, x, y, hue, ax):
DECIMAL_TO_PERCENT = 100.0
n = len(np.unique(df[hue]))
palette_gen = (c for c in sns.color_palette("Reds_r", n))
gp = df.groupby(hue)
for p, dfgp in gp:
idx = dfgp[x]
mean = dfgp[y]
# std = dfgp['Annu. Vol.']
c = next(palette_gen)
ax.errorbar(idx, mean * DECIMAL_TO_PERCENT,
marker='o', color=c,
# yerr=std * DECIMAL_TO_PERCENT, ecolor='lightblue', elinewidth=5,
label="{}".format(p))
ax.axhline(0.0, color='k', ls='--', lw=1, alpha=.5)
ax.set(xlabel='Period Length (trade days)', ylabel='Return (%)',
title="Average Annual Return")
ax.legend(loc='upper right')
ax.set(xticks=idx)
return ax
def plot_event_dist(df_events, date, axs):
i = 0
for period, ser in df_events.iteritems():
ax = axs[i]
sns.distplot(ser, ax=ax)
ax.axvline(ser.mean(), lw=1, ls='--', label='Average', color='red')
ax.legend(loc='upper left')
ax.set(xlabel='Return (%)', ylabel='',
title="{} Distribution of return after {:d} trade dats".format(date, period))
# self.show_fig(fig, 'event_return_{:d}days.png'.format(my_period))
i += 1
# print(mean)
'''
def plot_event_dist_NEW(df_events, axs, grouper=None):
i = 0
def _plot(ser):
ax = axs[i]
sns.distplot(ser, ax=ax)
ax.axvline(ser.mean(), lw=1, ls='--', label='Average', color='red')
ax.legend(loc='upper left')
ax.set(xlabel='Return (%)', ylabel='',
title="Distribution of return after {:d} trade dats".format(period))
if grouper is None:
for (date, period), row in df_events.iterrows():
ax = axs[i]
sns.distplot(ser, ax=ax)
ax.axvline(ser.mean(), lw=1, ls='--', label='Average', color='red')
ax.legend(loc='upper left')
ax.set(xlabel='Return (%)', ylabel='',
title="Distribution of return after {:d} trade dats".format(period))
# self.show_fig(fig, 'event_return_{:d}days.png'.format(my_period))
i += 1
# print(mean)
'''
def plot_calendar_distribution(signal, monthly_signal, yearly_signal, ax1, ax2):
idx = signal.index.values
start = jutil.convert_int_to_datetime(idx[0]).date()
end = jutil.convert_int_to_datetime(idx[-1]).date()
count = np.sum(yearly_signal.values.flatten())
print("\n " + "Calendar Distribution ({} occurance from {} to {}):".format(count, start, end))
# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 12), dpi=72)
# sns.barplot(data=monthly_signal.reset_index(), x='Month', y='Time', ax=ax1£©
# sns.barplot(x=monthly_signal.index.values, y=monthly_signal.values, ax=ax1)
ax1.bar(monthly_signal.index, monthly_signal['Time'].values)
ax1.axhline(monthly_signal.values.mean(), lw=1, ls='--', color='red', label='Average')
ax1.legend(loc='upper right')
months_str = ['Jan', 'Feb', 'March', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
ax1.set(xticks=range(len(months_str)), xticklabels=months_str,
title="Monthly Distribution",
xlabel='Month', ylabel='Time')
# sns.barplot(data=yearly_signal.reset_index(), x='Year', y='Times', ax=ax2, color='forestgreen')
ax2.bar(yearly_signal.index, yearly_signal['Time'].values)
ax2.axhline(yearly_signal.values.mean(), lw=1, ls='--', color='red', label='Average')
ax2.legend(loc='upper right')
ax2.set(xticks=yearly_signal.index,
title="Yearly Distribution",
xlabel='Month', ylabel='Time')
def plot_event_pvalue(pv, ax):
idx = pv.index
v = pv.values
ax.plot(idx, v, marker='D')
ax.set(xlabel='Period Length (trade days)', ylabel='p-value',
title="P Value of Test: Mean(return) == 0")
ax.set(xticks=idx)
return ax
def plot_ic_decay(df_ic, ax):
df_ic.mul(DECIMAL_TO_PCT).plot(marker='x', lw=1.2, ax=ax, cmap=COLOR_MAP)
ax.axhline(0.0, color='k', ls='--', lw=0.7, alpha=.5)
ax.set(xlabel="Period Length (trade days)", ylabel="IC (%)",
title="IC Decay",
xticks=df_ic.index,
xlim=(0, df_ic.index[-1] + 1))
def plot_quantile_return_mean_std(dic, ax):
n_quantiles = len(dic)
palette_gen = (COLOR_MAP(x) for x in np.linspace(0, 1, n_quantiles))
#palette_gen_light = (COLOR_MAP(x) for x in np.linspace(0, 1, n_quantiles))
# palette_gen = (c for c in sns.color_palette("RdBu", n_quantiles, desat=0.5))
# palette_gen =\
# (c for c in sns.cubehelix_palette(n_quantiles,
# start=0, rot=0.5,
# dark=0.1, light=0.8, reverse=True,
# gamma=.9))
# palette_gen_light = (c for c in sns.color_palette("RdBu", n_quantiles, desat=0.5))
# palette_gen_light = (c for c in sns.cubehelix_palette(n_quantiles,
# start=0, rot=0.5,
# dark=0.1, light=0.8, reverse=True,
# gamma=.3))
df_tmp = list(dic.values())[0]
idx = df_tmp.columns
offsets = np.linspace(-0.3, 0.3, n_quantiles)
for i, (quantile, df) in enumerate(dic.items()):
mean = df.loc['mean', :]
std = df.loc['std', :]
c = next(palette_gen)
c_light = list(c)
c_light[3] = c_light[3] * .2
# c_light = next(palette_gen_light)
ax.errorbar(idx + offsets[i], mean * DECIMAL_TO_PCT,
marker='x', color=c, lw=1.2,
yerr=std * DECIMAL_TO_PCT, ecolor=c_light, elinewidth=1,
label="Quantile {}".format(int(quantile)))
ax.axhline(0.0, color='k', ls='--', lw=0.7, alpha=.5)
ax.set(xlabel='Period Length (trade days)', ylabel='Return (%)',
title="Mean & Std of Return",
xticks=idx)
ax.legend(loc='upper left')
#ax.set(xticks=idx)
def plot_batch_backtest(df, ax):
"""
Parameters
----------
df : pd.DataFrame
ax : axes
"""
df = df.copy()
df.index = jutil.convert_int_to_datetime(df.index)
df.mul(DECIMAL_TO_PCT).plot(# marker='x',
lw=1.2, ax=ax, cmap=COLOR_MAP)
ax.axhline(0.0, color='k', ls='--', lw=0.7, alpha=.5)
ax.set(xlabel="Date", ylabel="Cumulative Return (%)",
title="Cumulative Return for Different Buy Condition", )
| 7,136 | 4 | 373 |
d21a19dcbf763a490eb7bab412158af5465c7820 | 6,846 | py | Python | forms.py | vangdfang/conspace-register | 3d57bb07c49d065afc22826317c2bbbdb53fa2b2 | [
"BSD-2-Clause"
] | 1 | 2021-04-29T16:37:28.000Z | 2021-04-29T16:37:28.000Z | forms.py | vangdfang/conspace-register | 3d57bb07c49d065afc22826317c2bbbdb53fa2b2 | [
"BSD-2-Clause"
] | null | null | null | forms.py | vangdfang/conspace-register | 3d57bb07c49d065afc22826317c2bbbdb53fa2b2 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2014-2015, Doug Kelly
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.extras.widgets import SelectDateWidget
from django.utils import timezone
from register.models import Convention, Registration, PaymentMethod, RegistrationLevel, DealerRegistrationLevel, ShirtSize, CouponCode, CouponUse
from datetime import date, datetime
import re
import os
import codecs
BIRTH_YEAR_CHOICES = list(range(date.today().year, 1900, -1))
| 42.521739 | 172 | 0.667835 | # Copyright (c) 2014-2015, Doug Kelly
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.extras.widgets import SelectDateWidget
from django.utils import timezone
from register.models import Convention, Registration, PaymentMethod, RegistrationLevel, DealerRegistrationLevel, ShirtSize, CouponCode, CouponUse
from datetime import date, datetime
import re
import os
import codecs
BIRTH_YEAR_CHOICES = list(range(date.today().year, 1900, -1))
def validate_birthday(value):
years = date.today().year - value.year
try:
birthdate = date(year=date.today().year, month=value.month, day=value.day)
except ValueError as e:
if value.month == 2 and value.day == 29:
birthdate = date(year=date.today().year, month=2, day=28)
else:
raise e
if date.today() < birthdate:
years -= 1
if years < 18:
raise ValidationError("You must be 18 or older to register")
def build_countries():
fp = codecs.open(os.path.join(os.path.dirname(__file__), 'countries.dat'), mode='r', encoding='utf-8')
countries = fp.read().split(';')
fp.close()
# The Select widget expects a tuple of names and values.
# For us, these are the same...
return [(x,x) for x in countries]
class RegistrationForm(forms.ModelForm):
class Meta:
model = Registration
fields = [
'first_name',
'last_name',
'badge_name',
'email',
'address',
'city',
'state',
'postal_code',
'country',
'registration_level',
'dealer_registration_level',
'birthday',
'shirt_size',
'volunteer',
'volunteer_phone',
]
widgets = {
'birthday': SelectDateWidget(years=BIRTH_YEAR_CHOICES),
'country': forms.Select(choices=build_countries()),
'registration_level': forms.RadioSelect(),
'dealer_registration_level': forms.RadioSelect(),
'shirt_size': forms.RadioSelect(),
}
payment_method = forms.ModelChoiceField(widget=forms.RadioSelect, empty_label=None, queryset=PaymentMethod.objects.filter(active=True).order_by('seq'))
coupon_code = forms.CharField(required=False)
def clean_birthday(self):
data = self.cleaned_data['birthday']
validate_birthday(data)
return data
def clean_badge_name(self):
data = self.cleaned_data['badge_name']
# Ugh. This is some RE magic. space is \x20, and we want to allow all characters thru \x7e (~)
# This will include alphanumerics and simple punctuation.
if re.match('[^\x20-\x7e]', data):
raise ValidationError("Badge name may only contain letters, numbers and punctuation.")
return data
def clean_registration_level(self):
data = self.cleaned_data['registration_level']
if (data.deadline <= timezone.now() or
data.active == False or
(data.limit and len(Registration.objects.filter(registration_level=data)) >= data.limit)):
raise ValidationError("That registration level is no longer available.")
return data
def clean_dealer_registration_level(self):
data = self.cleaned_data['dealer_registration_level']
if data and len(Registration.objects.filter(dealer_registration_level=data)) + data.number_tables > data.convention.dealer_limit:
raise ValidationError("That dealer registration level is no longer available.")
def clean_payment_method(self):
data = self.cleaned_data['payment_method']
if data.active == False:
raise ValidationError("That payment method is no longer available.")
return data
def clean_volunteer_phone(self):
data = self.cleaned_data['volunteer_phone']
if not data and self.cleaned_data['volunteer']:
raise ValidationError("A contact phone number is required for volunteering.")
return data
def clean_coupon_code(self):
data = self.cleaned_data['coupon_code']
if data:
try:
code = CouponCode.objects.get(code=data)
except ObjectDoesNotExist:
code = None
if not code:
raise ValidationError("That coupon code is not valid.")
if code.single_use and CouponUse.objects.filter(coupon=code):
raise ValidationError("That coupon code has already been used.")
return data
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
current_convention = Convention.objects.filter(active=True).order_by('-id')[0]
self.fields['registration_level'].empty_label = None
self.fields['registration_level'].queryset=RegistrationLevel.objects.filter(active=True, deadline__gt=datetime.now(), convention=current_convention).order_by('seq')
self.fields['dealer_registration_level'].empty_label = 'None'
self.fields['dealer_registration_level'].queryset=DealerRegistrationLevel.objects.filter(convention=current_convention).order_by('number_tables')
self.fields['shirt_size'].empty_label = None
self.fields['shirt_size'].queryset=ShirtSize.objects.order_by('seq')
| 3,592 | 1,376 | 69 |
2933f14523cbe60dfeb84d289480526a0d362ffa | 1,424 | py | Python | rcosautomation/discord/scripts/pairing.py | Apexal/rcos-automation | 58561639592261e1bc53ff8181a124e139887ac2 | [
"MIT"
] | 1 | 2020-09-01T20:14:00.000Z | 2020-09-01T20:14:00.000Z | rcosautomation/discord/scripts/pairing.py | Apexal/rcos-bot | 58561639592261e1bc53ff8181a124e139887ac2 | [
"MIT"
] | 8 | 2020-08-26T14:18:24.000Z | 2021-11-18T02:58:47.000Z | rcosautomation/discord/scripts/pairing.py | rcos/rcos-automation | 58561639592261e1bc53ff8181a124e139887ac2 | [
"MIT"
] | null | null | null | # from .constants import *
from rcosautomation.discord.constants import MATTERMOST_USERNAME, MATTERMOST_PASSWORD, VOICE_CHANNEL
from rcosautomation.discord.channels import add_channel_if_not_exists
import requests
from mattermostdriver import Driver
# mattermost = Driver({
# 'url': '54.197.25.170',
# 'login_id': MATTERMOST_USERNAME,
# 'password': MATTERMOST_PASSWORD
# })
# mattermost.login()
# The ID of the Project Pairing category
project_pairing_category_id = '748650123092820140'
# You can copy-paste project names here on each line and it will split and trim them
project_text = '''The Hotbox
Padlock News
Sage
Submitty
Insomnia Dialogue System
Exalendar
DormDesign
RPI Housing Finder
Spiral Football Stats
Lavender Programming Language
useCloudFS
Used Car Data Playground
OpenCircuits
TutorBase
Smartrider
ShuttleTracker
Poll Buddy
Telescope
AIPS
Pipeline
YACS
Venue
Taper'''
projects = list(map(str.strip, project_text.splitlines()))
| 23.344262 | 100 | 0.745084 | # from .constants import *
from rcosautomation.discord.constants import MATTERMOST_USERNAME, MATTERMOST_PASSWORD, VOICE_CHANNEL
from rcosautomation.discord.channels import add_channel_if_not_exists
import requests
from mattermostdriver import Driver
# mattermost = Driver({
# 'url': '54.197.25.170',
# 'login_id': MATTERMOST_USERNAME,
# 'password': MATTERMOST_PASSWORD
# })
# mattermost.login()
# The ID of the Project Pairing category
project_pairing_category_id = '748650123092820140'
# You can copy-paste project names here on each line and it will split and trim them
project_text = '''The Hotbox
Padlock News
Sage
Submitty
Insomnia Dialogue System
Exalendar
DormDesign
RPI Housing Finder
Spiral Football Stats
Lavender Programming Language
useCloudFS
Used Car Data Playground
OpenCircuits
TutorBase
Smartrider
ShuttleTracker
Poll Buddy
Telescope
AIPS
Pipeline
YACS
Venue
Taper'''
projects = list(map(str.strip, project_text.splitlines()))
def run():
print(
f'Creating project pairing text-channels for {len(projects)} projects')
# mattermost.channels.create_channel(options={
# 'team_id': 'rcos',
# 'name': 'pairing-test-project',
# 'display_name': '(pairing) Test Project',
# 'type': 0
# })
for project in projects:
add_channel_if_not_exists(
project, channel_type=VOICE_CHANNEL, parent_id=project_pairing_category_id)
| 437 | 0 | 23 |
5086f0c1e829da39b863eed82b6450fb0fe824b3 | 899 | py | Python | modules/chess-diagrams/test_integration.py | embarced/micro-moves | 90e3dba1d09a50b0f7df3f742a58a6e558bf1500 | [
"Apache-2.0"
] | 9 | 2018-09-30T09:14:55.000Z | 2020-09-06T08:01:29.000Z | modules/chess-diagrams/test_integration.py | embarced/micro-moves | 90e3dba1d09a50b0f7df3f742a58a6e558bf1500 | [
"Apache-2.0"
] | 52 | 2019-06-15T17:50:12.000Z | 2021-08-01T04:16:01.000Z | modules/chess-diagrams/test_integration.py | embarced/micro-moves | 90e3dba1d09a50b0f7df3f742a58a6e558bf1500 | [
"Apache-2.0"
] | 5 | 2018-04-26T14:34:04.000Z | 2020-06-03T12:16:33.000Z | import chess_diagrams
# setup for all tests. See https://docs.pytest.org/en/2.7.3/xunit_setup.html
#
# Test for a single response. See http://flask.pocoo.org/docs/1.0/testing/
#
| 28.09375 | 76 | 0.704116 | import chess_diagrams
# setup for all tests. See https://docs.pytest.org/en/2.7.3/xunit_setup.html
#
def setup_method(self, method):
chess_diagrams.app.testing = True
# Test for a single response. See http://flask.pocoo.org/docs/1.0/testing/
#
def test_index_page():
app = chess_diagrams.app.test_client()
response = app.get('/')
assert response.status_code == 200
assert b'<html>' in response.data
def test_board_image_no_param():
app = chess_diagrams.app.test_client()
response = app.get('/board.png')
assert response.status_code == 200
assert response.mimetype == 'image/png'
def test_board_image_with_param():
app = chess_diagrams.app.test_client()
fen = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
response = app.get('/board.png?fen='+fen)
assert response.status_code == 200
assert response.mimetype == 'image/png'
| 625 | 0 | 90 |
856c4c22632a187d60fed926d89f84c701913ae7 | 2,108 | py | Python | des3CipherPycrypto.py | Erozbliz/Cryptography-Encryption-File | db7b0db405c4eb34dc62de8c2a828b9d74043c4d | [
"MIT"
] | null | null | null | des3CipherPycrypto.py | Erozbliz/Cryptography-Encryption-File | db7b0db405c4eb34dc62de8c2a828b9d74043c4d | [
"MIT"
] | null | null | null | des3CipherPycrypto.py | Erozbliz/Cryptography-Encryption-File | db7b0db405c4eb34dc62de8c2a828b9d74043c4d | [
"MIT"
] | null | null | null | import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import DES3
class TDESCipher(object):
"""
Triple DES (Data Encryption Standard)
Enchaine 3 applications successives de l'algorithme DES sur le meme bloc de donnees de 64 bits, avec 2 ou 3 clef DES differentes.
Le TDES est cryptographiquement securise, il n'est ni aussi sur ni aussi rapide que AES.
Taille(s) du bloc : 64 bits (8 octets)
Longueur(s) de la cle : 168(21)ou 112(14) bits
Nombre de tours 3x16 tours du DES
"""
@staticmethod
#padding permettant d'utiliser n'importe quelle taille de message
@staticmethod
| 34.557377 | 133 | 0.634725 | import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import DES3
class TDESCipher(object):
"""
Triple DES (Data Encryption Standard)
Enchaine 3 applications successives de l'algorithme DES sur le meme bloc de donnees de 64 bits, avec 2 ou 3 clef DES differentes.
Le TDES est cryptographiquement securise, il n'est ni aussi sur ni aussi rapide que AES.
Taille(s) du bloc : 64 bits (8 octets)
Longueur(s) de la cle : 168(21)ou 112(14) bits
Nombre de tours 3x16 tours du DES
"""
def __init__(self, key):
#taille block (en octets)
self.bs = 8
#clef
self.key = key
@staticmethod
def str_to_bytes(data):
u_type = type(b''.decode('utf8'))
if isinstance(data, u_type):
return data.encode('utf8')
return data
#padding permettant d'utiliser n'importe quelle taille de message
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * TDESCipher.str_to_bytes(chr(self.bs - len(s) % self.bs))
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
def encrypt(self, raw):
raw = self._pad(TDESCipher.str_to_bytes(raw))
iv = Random.new().read(DES3.block_size)
cipher = DES3.new(self.key, DES3.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw)).decode('utf-8')
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:DES3.block_size]
cipher = DES3.new(self.key, DES3.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[DES3.block_size:])).decode('utf-8')
def encryptByte(self, raw):
raw = self._pad(TDESCipher.str_to_bytes(raw))
iv = Random.new().read(DES3.block_size)
cipher = DES3.new(self.key, DES3.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw)).decode('utf-8')
def decryptByte(self, enc):
enc = base64.b64decode(enc)
iv = enc[:DES3.block_size]
cipher = DES3.new(self.key, DES3.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[DES3.block_size:]))
| 1,247 | 0 | 212 |
7672618ddc982f86fa8850a37db09d2977e7b70a | 4,495 | py | Python | backend/database.py | michellewei04/ImageProcessorS18 | f9c5a8b4ab64d0f14731926171e9285e3ad84410 | [
"MIT"
] | null | null | null | backend/database.py | michellewei04/ImageProcessorS18 | f9c5a8b4ab64d0f14731926171e9285e3ad84410 | [
"MIT"
] | null | null | null | backend/database.py | michellewei04/ImageProcessorS18 | f9c5a8b4ab64d0f14731926171e9285e3ad84410 | [
"MIT"
] | null | null | null | import os
from pymodm import fields, MongoModel, connect
from pymodm.errors import DoesNotExist
from passlib.hash import pbkdf2_sha256
connect("mongodb://localhost:27017/database")
def add_user(username, password):
"""Creates new user if user does not exist in the mongo database
:param username: user email as string type which serves as user id
:param password: user password as string type
:returns: updates user information in mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
except DoesNotExist:
user = User(username, password=pbkdf2_sha256.hash(password))
user.save()
def get_user(username):
"""Gets user by unique username
:param username: user email as string type which serves as user id
:returns: user information
"""
try:
user = User.objects.raw({'_id': username}).first()
return user
except DoesNotExist:
return None
def delete_user(username):
"""Deletes user from mongo database
:param username: user email as string type which serves as user id
"""
try:
user = User.objects.raw({'_id': username}).first()
user.delete()
except DoesNotExist:
pass
return False
def login_user(username, password):
"""Returns true if user exists and has the correct password
:param username: user email as string type which serves as user id
:param password: user password as string type
:returns: True if password is correct, False if incorrect
"""
try:
user = User.objects.raw({'_id': username}).first()
if user.password and pbkdf2_sha256.verify(password, user.password):
return True
except DoesNotExist:
pass
return False
def save_original_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of a user-uploaded image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of user-uploaded image
:returns: adds uuid of user-uploaded image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.original_image = uuid
user.save()
except DoesNotExist:
return None
def save_processed_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of the processed image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of processed image
:returns: adds uuid of processed image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.processed_image = uuid
user.save()
except DoesNotExist:
return None
def get_original_image(username):
"""Gets the original image uuid for a user
:param username: user email as string type which serves as user id
:returns: uuid of user's original image as a string
"""
try:
user = User.objects.raw({'_id': username}).first()
return user.original_image
except DoesNotExist:
return None
def get_processed_image(username):
"""Gets the processed image uuid for a user
:param username: user email as string type which serves as user id
:returns: uuid (UUID4) of user's processed image as a string
"""
try:
user = User.objects.raw({'_id': username}).first()
return user.processed_image
except DoesNotExist:
return None
def delete_image(name):
"""Deletes image stored in server
:param name: name (uuid) of an image stored in the VM server
"""
for f in os.listdir('images/'):
if f.startswith(name):
os.remove('images/' + f)
return
def remove_images(username):
"""Removes all images associated with a user
:param username: user email as string type which serves as user id
"""
try:
user = User.objects.raw({'_id': username}).first()
if user.original_image is not None:
delete_image(user.original_image)
if user.processed_image is not None:
delete_image(user.processed_image)
return True
except DoesNotExist:
return False
| 30.787671 | 75 | 0.65495 | import os
from pymodm import fields, MongoModel, connect
from pymodm.errors import DoesNotExist
from passlib.hash import pbkdf2_sha256
connect("mongodb://localhost:27017/database")
class User(MongoModel):
username = fields.EmailField(primary_key=True)
password = fields.CharField()
original_image = fields.CharField() # original image
processed_image = fields.CharField()
def add_user(username, password):
"""Creates new user if user does not exist in the mongo database
:param username: user email as string type which serves as user id
:param password: user password as string type
:returns: updates user information in mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
except DoesNotExist:
user = User(username, password=pbkdf2_sha256.hash(password))
user.save()
def get_user(username):
"""Gets user by unique username
:param username: user email as string type which serves as user id
:returns: user information
"""
try:
user = User.objects.raw({'_id': username}).first()
return user
except DoesNotExist:
return None
def delete_user(username):
"""Deletes user from mongo database
:param username: user email as string type which serves as user id
"""
try:
user = User.objects.raw({'_id': username}).first()
user.delete()
except DoesNotExist:
pass
return False
def login_user(username, password):
"""Returns true if user exists and has the correct password
:param username: user email as string type which serves as user id
:param password: user password as string type
:returns: True if password is correct, False if incorrect
"""
try:
user = User.objects.raw({'_id': username}).first()
if user.password and pbkdf2_sha256.verify(password, user.password):
return True
except DoesNotExist:
pass
return False
def save_original_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of a user-uploaded image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of user-uploaded image
:returns: adds uuid of user-uploaded image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.original_image = uuid
user.save()
except DoesNotExist:
return None
def save_processed_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of the processed image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of processed image
:returns: adds uuid of processed image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.processed_image = uuid
user.save()
except DoesNotExist:
return None
def get_original_image(username):
"""Gets the original image uuid for a user
:param username: user email as string type which serves as user id
:returns: uuid of user's original image as a string
"""
try:
user = User.objects.raw({'_id': username}).first()
return user.original_image
except DoesNotExist:
return None
def get_processed_image(username):
"""Gets the processed image uuid for a user
:param username: user email as string type which serves as user id
:returns: uuid (UUID4) of user's processed image as a string
"""
try:
user = User.objects.raw({'_id': username}).first()
return user.processed_image
except DoesNotExist:
return None
def delete_image(name):
"""Deletes image stored in server
:param name: name (uuid) of an image stored in the VM server
"""
for f in os.listdir('images/'):
if f.startswith(name):
os.remove('images/' + f)
return
def remove_images(username):
"""Removes all images associated with a user
:param username: user email as string type which serves as user id
"""
try:
user = User.objects.raw({'_id': username}).first()
if user.original_image is not None:
delete_image(user.original_image)
if user.processed_image is not None:
delete_image(user.processed_image)
return True
except DoesNotExist:
return False
| 0 | 186 | 23 |
4a72ee74bce328f8139eab2be91b970998d48b05 | 639 | py | Python | server.py | KeeKelly/sentiment-analyzer | cf433b726d7ed95e5a3cb33a1c65d79007764dc6 | [
"MIT"
] | null | null | null | server.py | KeeKelly/sentiment-analyzer | cf433b726d7ed95e5a3cb33a1c65d79007764dc6 | [
"MIT"
] | null | null | null | server.py | KeeKelly/sentiment-analyzer | cf433b726d7ed95e5a3cb33a1c65d79007764dc6 | [
"MIT"
] | null | null | null | from flask import Flask, request, send_from_directory, jsonify
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
app = Flask(__name__, static_url_path='/static')
@app.route('/js/<path:path>')
@app.route("/")
@app.route("/get_sentiment", methods=['GET', 'POST'])
if __name__ == '__main__':
app.run()
| 22.821429 | 62 | 0.72457 | from flask import Flask, request, send_from_directory, jsonify
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
app = Flask(__name__, static_url_path='/static')
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
@app.route("/")
def hello():
return app.send_static_file("index.html")
@app.route("/get_sentiment", methods=['GET', 'POST'])
def get_sentiment():
sid = SentimentIntensityAnalyzer()
sentence = request.get_json()['input']
return jsonify(sid.polarity_scores(sentence))
if __name__ == '__main__':
app.run()
| 208 | 0 | 66 |
2ce4f6b0e270478b146694c2913d91f095079ecd | 820 | py | Python | tests/conftest.py | c17r/advent-of-code-data | daac3e81c2d36667ee2bfc7a7473aace8674704f | [
"MIT"
] | null | null | null | tests/conftest.py | c17r/advent-of-code-data | daac3e81c2d36667ee2bfc7a7473aace8674704f | [
"MIT"
] | null | null | null | tests/conftest.py | c17r/advent-of-code-data | daac3e81c2d36667ee2bfc7a7473aace8674704f | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import pytest
@pytest.fixture(autouse=True)
@pytest.fixture
@pytest.fixture(autouse=True)
@pytest.fixture(autouse=True)
| 24.848485 | 62 | 0.747561 | from __future__ import unicode_literals
import pytest
@pytest.fixture(autouse=True)
def mocked_sleep(mocker):
no_sleep_till_brooklyn = mocker.patch("time.sleep")
return no_sleep_till_brooklyn
@pytest.fixture
def aocd_dir(tmp_path):
data_dir = tmp_path / ".config" / "aocd"
data_dir.mkdir(parents=True)
return data_dir
@pytest.fixture(autouse=True)
def remove_user_env(aocd_dir, monkeypatch):
monkeypatch.setattr("aocd.runner.AOCD_DIR", str(aocd_dir))
monkeypatch.setattr("aocd.models.AOCD_DIR", str(aocd_dir))
monkeypatch.delenv("AOC_SESSION", raising=False)
@pytest.fixture(autouse=True)
def test_token(aocd_dir):
token_file = aocd_dir / "token"
token_dir = aocd_dir / "thetesttoken"
token_dir.mkdir()
token_file.write_text("thetesttoken")
return token_file
| 563 | 0 | 88 |
40ec6a2dc285631e9edde9dd3833818343d6513e | 166 | py | Python | data/groups.py | malder5/PyTest | 1f649584223b05945d03d71468bf1589bf79119d | [
"Apache-2.0"
] | null | null | null | data/groups.py | malder5/PyTest | 1f649584223b05945d03d71468bf1589bf79119d | [
"Apache-2.0"
] | null | null | null | data/groups.py | malder5/PyTest | 1f649584223b05945d03d71468bf1589bf79119d | [
"Apache-2.0"
] | null | null | null | from model.group import Group
testdata = [
Group(name='Name1', header='header1', footer='footer1'),
Group(name='Name2', header='header2', footer='footer2')
] | 27.666667 | 60 | 0.680723 | from model.group import Group
testdata = [
Group(name='Name1', header='header1', footer='footer1'),
Group(name='Name2', header='header2', footer='footer2')
] | 0 | 0 | 0 |
248bedb8375a48eeb2051b6d81af7cf740c8cd45 | 2,037 | py | Python | Scaffold_Splitter.py | avneeshbt/wrapThem | 0504a271c2d670e8bbd6bca98f7ce8b21d79c816 | [
"MIT"
] | null | null | null | Scaffold_Splitter.py | avneeshbt/wrapThem | 0504a271c2d670e8bbd6bca98f7ce8b21d79c816 | [
"MIT"
] | null | null | null | Scaffold_Splitter.py | avneeshbt/wrapThem | 0504a271c2d670e8bbd6bca98f7ce8b21d79c816 | [
"MIT"
] | null | null | null |
##### This script splits of the assembly in subcontigs wherever there is a "N" stretch longer than 30N
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import glob
Assemblies = glob.glob("/media/avneesh/AneeshHDDfat/AssembledScaffolds/*")
N_stretch_length = 100
for file in Assemblies:
NewFILEPath = str(file) + str("_splitted")
newAssembly = open(NewFILEPath, "a")
for seq in SeqIO.parse(file, "fasta"):
base = -1
seq_end = "no"
new_sub_number = 0
while base < len(seq.seq)-1:
base += 1
N_count = 0
if seq.seq[base] != "N":
N_count = 0
start = base
for a in range(start, len(seq.seq),1):
if seq.seq[a] != "N":
if a+1 == len(seq.seq):
seq_end = "yes"
else:
for b in range(a, len(seq.seq)+1,1):
if seq.seq[b] == "N":
N_count += 1
else:
base = b-1
break
if N_count > N_stretch_length:
new_sub_number += 1
stop = a
old_split_ID = seq.id.split("_cov_")
old_split_ID[1] = "%s%s%s" % (str(old_split_ID[1]), str("_"), str(new_sub_number))
new_sequence = SeqRecord(Seq(str(seq.seq[start:stop])), id = "_cov_".join(old_split_ID),description="") ### create new SeqRecord object
SeqIO.write(new_sequence, newAssembly, "fasta") ### and write it to the new file
break
elif seq_end == "yes":
new_sub_number += 1
stop = a + 1
base = len(seq.seq) ## stops while loop
old_split_ID = seq.id.split("_cov_")
old_split_ID[1] = "%s%s%s" % (str(old_split_ID[1]), str("_"), str(new_sub_number))
new_sequence = SeqRecord(Seq(str(seq.seq[start:stop])), id = "_cov_".join(old_split_ID),description="") ### create new SeqRecord object
SeqIO.write(new_sequence, newAssembly, "fasta") ### and write it to the new file
break
else:
pass
else:
pass
print "%s%s" % (str(file.split("/")[-1]), " - done!")
| 33.393443 | 144 | 0.594502 |
##### This script splits of the assembly in subcontigs wherever there is a "N" stretch longer than 30N
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import glob
Assemblies = glob.glob("/media/avneesh/AneeshHDDfat/AssembledScaffolds/*")
N_stretch_length = 100
for file in Assemblies:
NewFILEPath = str(file) + str("_splitted")
newAssembly = open(NewFILEPath, "a")
for seq in SeqIO.parse(file, "fasta"):
base = -1
seq_end = "no"
new_sub_number = 0
while base < len(seq.seq)-1:
base += 1
N_count = 0
if seq.seq[base] != "N":
N_count = 0
start = base
for a in range(start, len(seq.seq),1):
if seq.seq[a] != "N":
if a+1 == len(seq.seq):
seq_end = "yes"
else:
for b in range(a, len(seq.seq)+1,1):
if seq.seq[b] == "N":
N_count += 1
else:
base = b-1
break
if N_count > N_stretch_length:
new_sub_number += 1
stop = a
old_split_ID = seq.id.split("_cov_")
old_split_ID[1] = "%s%s%s" % (str(old_split_ID[1]), str("_"), str(new_sub_number))
new_sequence = SeqRecord(Seq(str(seq.seq[start:stop])), id = "_cov_".join(old_split_ID),description="") ### create new SeqRecord object
SeqIO.write(new_sequence, newAssembly, "fasta") ### and write it to the new file
break
elif seq_end == "yes":
new_sub_number += 1
stop = a + 1
base = len(seq.seq) ## stops while loop
old_split_ID = seq.id.split("_cov_")
old_split_ID[1] = "%s%s%s" % (str(old_split_ID[1]), str("_"), str(new_sub_number))
new_sequence = SeqRecord(Seq(str(seq.seq[start:stop])), id = "_cov_".join(old_split_ID),description="") ### create new SeqRecord object
SeqIO.write(new_sequence, newAssembly, "fasta") ### and write it to the new file
break
else:
pass
else:
pass
print "%s%s" % (str(file.split("/")[-1]), " - done!")
| 0 | 0 | 0 |
66f0980fce0c41d4fcc20a241d3fe307f384d2e3 | 1,856 | py | Python | route/component.py | mrcwbr/App-Translation-Tool | ca6f9dab33d91c6228ee02cf0bae382c0a71b88f | [
"Apache-2.0"
] | 3 | 2019-05-22T17:40:37.000Z | 2019-10-21T06:43:25.000Z | route/component.py | mrcwbr/App-Translation-Tool | ca6f9dab33d91c6228ee02cf0bae382c0a71b88f | [
"Apache-2.0"
] | null | null | null | route/component.py | mrcwbr/App-Translation-Tool | ca6f9dab33d91c6228ee02cf0bae382c0a71b88f | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint, render_template, request, jsonify
from helpers.database import db
from model.models import Project, Component
comp = Blueprint('component', __name__)
@comp.route('/component', methods=['GET'])
@comp.route('/component', methods=['POST'])
@comp.route('/component', methods=['PUT'])
@comp.route('/component', methods=['DELETE'])
| 27.701493 | 101 | 0.66056 | from flask import Blueprint, render_template, request, jsonify
from helpers.database import db
from model.models import Project, Component
comp = Blueprint('component', __name__)
@comp.route('/component', methods=['GET'])
def component():
p = Project.query.first()
c = Component.query.filter_by(project_id=p.id).order_by(Component.id.desc()).all()
return render_template('components.html', project=p, components=c)
@comp.route('/component', methods=['POST'])
def add_component():
name = request.form.get('name')
if not name:
return jsonify({'success': False})
p = Project.query.first()
if len(Component.query.filter(Component.project_id == p.id, Component.name == name).all()) != 0 \
or len(name) < 3:
return jsonify({'success': False})
c = Component(name=name, project_id=p.id)
db.session.add_all([c])
db.session.commit()
return jsonify({'success': True, 'newComp': c.to_json_dict()})
@comp.route('/component', methods=['PUT'])
def update_component():
name = request.form.get('name')
comp_id = request.form.get('id')
if not name or len(name) < 3 or not comp_id:
return jsonify({'success': False})
c = Component.query.filter(Component.id == comp_id).first()
check_already_used = Component.query.filter(Component.name == name).all()
if not c or check_already_used:
return jsonify({'success': False})
c.name = name
db.session.commit()
return jsonify({'success': True, 'updateComp': c.to_json_dict()})
@comp.route('/component', methods=['DELETE'])
def delete_component():
comp_id = request.form.get('id')
if not comp_id:
return jsonify({'success': False})
c = Component.query.filter_by(id=comp_id).first()
db.session.delete(c)
db.session.commit()
return jsonify({'success': True})
| 1,403 | 0 | 88 |
089043c413c1f63b11744344848c93ab5efa6197 | 91 | py | Python | cloudferry/lib/base/action/transporter.py | SVilgelm/CloudFerry | 4459c0d21ba7ccffe51176932197b352e426ba63 | [
"Apache-2.0"
] | 6 | 2017-04-20T00:49:49.000Z | 2020-12-20T16:27:10.000Z | cloudferry/lib/base/action/transporter.py | SVilgelm/CloudFerry | 4459c0d21ba7ccffe51176932197b352e426ba63 | [
"Apache-2.0"
] | 3 | 2017-04-08T15:47:16.000Z | 2017-05-18T17:40:59.000Z | cloudferry/lib/base/action/transporter.py | SVilgelm/CloudFerry | 4459c0d21ba7ccffe51176932197b352e426ba63 | [
"Apache-2.0"
] | 8 | 2017-04-07T23:42:36.000Z | 2021-08-10T11:05:10.000Z | from cloudferry.lib.base.action import action
| 15.166667 | 45 | 0.78022 | from cloudferry.lib.base.action import action
class Transporter(action.Action):
pass
| 0 | 21 | 23 |
32218ffec8e6f2f2c4b84d0a2dc6274447e3ae90 | 4,572 | py | Python | tests/core/test_cifar10.py | tjyuyao/ice-learn | 99087181d2d15cb55a3c34004550179366ce601a | [
"MIT"
] | 1 | 2022-03-29T11:06:36.000Z | 2022-03-29T11:06:36.000Z | tests/core/test_cifar10.py | tjyuyao/ice-learn | 99087181d2d15cb55a3c34004550179366ce601a | [
"MIT"
] | null | null | null | tests/core/test_cifar10.py | tjyuyao/ice-learn | 99087181d2d15cb55a3c34004550179366ce601a | [
"MIT"
] | null | null | null | import ice
import torch
from ice.core.loss import LossNode
from ice.core.metric import MetricNode
from torch import autocast, nn
from torch.nn import functional as F
from torch.optim import Adam
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
# arguments
ice.args.setdefault("lr", 0.0001, float, hparam=True)
# initialization
ice.init_autocast()
ice.make_configurable(Adam)
ice.set_gradient_accumulate(2)
# node
@ice.configurable
# define VGG 16
# hypergraph
ice.add("cifar10", make_cifar10(train=True, batch_size=200), tags="train")
ice.add("cifar10", make_cifar10(train=False, batch_size=200), tags="val")
ice.add("net", ice.ModuleNode(
module=Net(),
forward=lambda n, x: n.module(x['cifar10'][0]),
optimizers=ice.Optimizer(Adam(lr=ice.args.lr))
))
ice.add("nll_loss", LossNode(forward=lambda n, x: F.nll_loss(x["net"], x["cifar10"][1])))
ice.add("avg_nll_loss",
ice.MetricNode(
ice.AverageMeter(),
forward=lambda n, x: (x['nll_loss'], x['cifar10'][1].size(0)),
epoch_end=report,
))
ice.print_forward_output("nll_loss", every=200)
# training shedule
ice.run(
[
ice.Repeat([
ice.Task(train=True, epochs=5, tags="train"),
ice.SaveCheckpointTask(),
ice.Task(train=False, epochs=5, tags="val"),
], times=5)
],
devices="cuda:1",
omp_num_threads=6,
monitor_interval=1,
tee="3"
) | 28.936709 | 108 | 0.58399 | import ice
import torch
from ice.core.loss import LossNode
from ice.core.metric import MetricNode
from torch import autocast, nn
from torch.nn import functional as F
from torch.optim import Adam
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
# arguments
ice.args.setdefault("lr", 0.0001, float, hparam=True)
# initialization
ice.init_autocast()
ice.make_configurable(Adam)
ice.set_gradient_accumulate(2)
# node
@ice.configurable
class Net(nn.Module):
# define VGG 16
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(2, 2)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU()
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.conv4 = nn.Conv2d(128, 128, 3, padding=1)
self.pool2 = nn.MaxPool2d(2, 2, padding=1)
self.bn2 = nn.BatchNorm2d(128)
self.relu2 = nn.ReLU()
self.conv5 = nn.Conv2d(128, 128, 3, padding=1)
self.conv6 = nn.Conv2d(128, 128, 3, padding=1)
self.conv7 = nn.Conv2d(128, 128, 1, padding=1)
self.pool3 = nn.MaxPool2d(2, 2, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU()
self.conv8 = nn.Conv2d(128, 256, 3, padding=1)
self.conv9 = nn.Conv2d(256, 256, 3, padding=1)
self.conv10 = nn.Conv2d(256, 256, 1, padding=1)
self.pool4 = nn.MaxPool2d(2, 2, padding=1)
self.bn4 = nn.BatchNorm2d(256)
self.relu4 = nn.ReLU()
self.conv11 = nn.Conv2d(256, 512, 3, padding=1)
self.conv12 = nn.Conv2d(512, 512, 3, padding=1)
self.conv13 = nn.Conv2d(512, 512, 1, padding=1)
self.pool5 = nn.MaxPool2d(2, 2, padding=1)
self.bn5 = nn.BatchNorm2d(512)
self.relu5 = nn.ReLU()
self.fc14 = nn.Linear(512 * 4 * 4, 1024)
self.drop1 = nn.Dropout2d()
self.fc15 = nn.Linear(1024, 1024)
self.drop2 = nn.Dropout2d()
self.fc16 = nn.Linear(1024, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.pool1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.pool2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.conv7(x)
x = self.pool3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.conv8(x)
x = self.conv9(x)
x = self.conv10(x)
x = self.pool4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.conv11(x)
x = self.conv12(x)
x = self.conv13(x)
x = self.pool5(x)
x = self.bn5(x)
x = self.relu5(x)
x = x.view(-1, 512 * 4 * 4)
x = F.relu(self.fc14(x))
x = self.drop1(x)
x = F.relu(self.fc15(x))
x = self.drop2(x)
x = self.fc16(x)
return F.log_softmax(x, dim=-1)
def make_cifar10(train:bool, batch_size:int):
TRANSFORM = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
return ice.DatasetNode(
dataset=CIFAR10(download=True, root="/home/wangling/TMP/cifar10", transform=TRANSFORM, train=train),
batch_size=batch_size,
shuffle=train,
)
def report(n: MetricNode):
if n.training: return
avg_nll_loss = n.metric.evaluate().item()
if n.launcher.rank == 0:
print(f"steps={n.global_train_steps} avg_nll_loss={avg_nll_loss}")
# hypergraph
ice.add("cifar10", make_cifar10(train=True, batch_size=200), tags="train")
ice.add("cifar10", make_cifar10(train=False, batch_size=200), tags="val")
ice.add("net", ice.ModuleNode(
module=Net(),
forward=lambda n, x: n.module(x['cifar10'][0]),
optimizers=ice.Optimizer(Adam(lr=ice.args.lr))
))
ice.add("nll_loss", LossNode(forward=lambda n, x: F.nll_loss(x["net"], x["cifar10"][1])))
ice.add("avg_nll_loss",
ice.MetricNode(
ice.AverageMeter(),
forward=lambda n, x: (x['nll_loss'], x['cifar10'][1].size(0)),
epoch_end=report,
))
ice.print_forward_output("nll_loss", every=200)
# training shedule
ice.run(
[
ice.Repeat([
ice.Task(train=True, epochs=5, tags="train"),
ice.SaveCheckpointTask(),
ice.Task(train=False, epochs=5, tags="val"),
], times=5)
],
devices="cuda:1",
omp_num_threads=6,
monitor_interval=1,
tee="3"
) | 2,980 | 0 | 121 |
b34c3bb33973c7cc37692adccda2ca2ed9b9d87b | 354 | py | Python | datamgt/helpers/getProviderInfo.py | CareHomeHub/CareHomePlatform | d811084bb72810fc0c35c6ccab18745480aefb3d | [
"MIT"
] | 1 | 2021-02-16T00:41:40.000Z | 2021-02-16T00:41:40.000Z | datamgt/helpers/getProviderInfo.py | CareHomeHub/CareHomePlatform | d811084bb72810fc0c35c6ccab18745480aefb3d | [
"MIT"
] | 15 | 2021-02-16T00:34:01.000Z | 2021-04-07T23:33:21.000Z | datamgt/helpers/getProviderInfo.py | CareHomeHub/CareHomePlatform | d811084bb72810fc0c35c6ccab18745480aefb3d | [
"MIT"
] | null | null | null | import requests
| 25.285714 | 76 | 0.644068 | import requests
def find_prov_info(ref='X99XX'):
if ref=='X99XX':
return {'Error': "No Provider reference supplied"}
resp = requests.get(f'https://api.cqc.org.uk/public/v1/providers/{ref}')
print(resp.json())
if resp.status_code !=200:
return {'Error': f"No Provider reference found for {ref}"}
return resp.json()
| 314 | 0 | 23 |
7e9817160a111e028c8bd9291041fd155f57cb68 | 404 | py | Python | tests/contract/test_main.py | langrenn-sprint/sprint-webserver | 065a96d102a6658e5422ea6a0be5abde4b6558e1 | [
"Apache-2.0"
] | null | null | null | tests/contract/test_main.py | langrenn-sprint/sprint-webserver | 065a96d102a6658e5422ea6a0be5abde4b6558e1 | [
"Apache-2.0"
] | 15 | 2021-01-11T19:42:39.000Z | 2021-04-19T21:09:58.000Z | tests/contract/test_main.py | langrenn-sprint/sprint-webserver | 065a96d102a6658e5422ea6a0be5abde4b6558e1 | [
"Apache-2.0"
] | null | null | null | """Contract test cases for main."""
from typing import Any
import pytest
import requests
@pytest.mark.contract
def test_main(http_service: Any) -> None:
"""Should return 200 and html."""
url = f"{http_service}"
response = requests.get(url)
assert response.status_code == 200
assert response.headers["content-type"] == "text/html; charset=utf-8"
assert len(response.text) > 0
| 22.444444 | 73 | 0.690594 | """Contract test cases for main."""
from typing import Any
import pytest
import requests
@pytest.mark.contract
def test_main(http_service: Any) -> None:
"""Should return 200 and html."""
url = f"{http_service}"
response = requests.get(url)
assert response.status_code == 200
assert response.headers["content-type"] == "text/html; charset=utf-8"
assert len(response.text) > 0
| 0 | 0 | 0 |
b149852f384caa06ff26389ed56ca4719a469892 | 1,423 | py | Python | esi_bot/bot.py | lukasni/esi-bot | ebb4d50c247fd8468bb80fa72f86f7a18ddd6575 | [
"MIT"
] | 4 | 2018-06-11T15:21:41.000Z | 2018-12-13T16:06:25.000Z | esi_bot/bot.py | lukasni/esi-bot | ebb4d50c247fd8468bb80fa72f86f7a18ddd6575 | [
"MIT"
] | 9 | 2018-06-08T16:28:40.000Z | 2018-10-04T09:32:45.000Z | esi_bot/bot.py | CarbonAlabel/esi-bot | ebb4d50c247fd8468bb80fa72f86f7a18ddd6575 | [
"MIT"
] | 10 | 2018-06-08T15:57:27.000Z | 2021-08-12T03:54:08.000Z | """ESI slack bot for tweetfleet."""
import os
import time
from slackclient import SlackClient
from esi_bot import ESI
from esi_bot import ESI_CHINA
from esi_bot import LOG
from esi_bot import request
from esi_bot.processor import Processor
from esi_bot.commands import ( # noqa: F401; # pylint: disable=unused-import
get_help, issue_details, issue_new, links, misc, status_esi, status_server, type_info)
def main():
"""Connect to the slack RTM API and pull messages forever."""
LOG.info("ESI bot launched")
request.do_refresh(ESI)
request.do_refresh(ESI_CHINA)
LOG.info("Loaded ESI specs")
slack = SlackClient(os.environ["SLACK_TOKEN"])
processor = Processor(slack)
while True:
if slack.rtm_connect(auto_reconnect=True):
if not processor.on_server_connect():
raise SystemExit("Could not join channels")
LOG.info("Connected to Slack")
cycle = 0
while slack.server.connected is True:
cycle += 1
for msg in slack.rtm_read():
processor.process_event(msg)
if cycle > 10:
processor.garbage_collect()
cycle = 0
time.sleep(1) # rtm_read should block, but it doesn't :/
else:
raise SystemExit("Connection to slack failed :(")
if __name__ == '__main__':
main()
| 27.901961 | 90 | 0.627547 | """ESI slack bot for tweetfleet."""
import os
import time
from slackclient import SlackClient
from esi_bot import ESI
from esi_bot import ESI_CHINA
from esi_bot import LOG
from esi_bot import request
from esi_bot.processor import Processor
from esi_bot.commands import ( # noqa: F401; # pylint: disable=unused-import
get_help, issue_details, issue_new, links, misc, status_esi, status_server, type_info)
def main():
"""Connect to the slack RTM API and pull messages forever."""
LOG.info("ESI bot launched")
request.do_refresh(ESI)
request.do_refresh(ESI_CHINA)
LOG.info("Loaded ESI specs")
slack = SlackClient(os.environ["SLACK_TOKEN"])
processor = Processor(slack)
while True:
if slack.rtm_connect(auto_reconnect=True):
if not processor.on_server_connect():
raise SystemExit("Could not join channels")
LOG.info("Connected to Slack")
cycle = 0
while slack.server.connected is True:
cycle += 1
for msg in slack.rtm_read():
processor.process_event(msg)
if cycle > 10:
processor.garbage_collect()
cycle = 0
time.sleep(1) # rtm_read should block, but it doesn't :/
else:
raise SystemExit("Connection to slack failed :(")
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
3ad815533c2c139f0fbdf82d64ecd11ea3e220e1 | 4,401 | py | Python | fluid/bundler.py | PaulDodd/signac-flow-project-helpers | 208c7c8da52c4b0108c3989c77423cc5ff86ba59 | [
"MIT"
] | 1 | 2017-05-30T14:22:59.000Z | 2017-05-30T14:22:59.000Z | fluid/bundler.py | PaulDodd/signac-flow-project-helpers | 208c7c8da52c4b0108c3989c77423cc5ff86ba59 | [
"MIT"
] | null | null | null | fluid/bundler.py | PaulDodd/signac-flow-project-helpers | 208c7c8da52c4b0108c3989c77423cc5ff86ba59 | [
"MIT"
] | null | null | null | # Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import itertools
from . import scheduler
from signac.common.six import with_metaclass
import uuid
# def _fn_bundle(self, bundle_id):
# return os.path.join(self.root_directory(), '.bundles', bundle_id)
#
# def _store_bundled(self, operations):
# """Store all job session ids part of one bundle.
#
# The job session ids are stored in a text file in the project's
# root directory. This is necessary to be able to identify each
# job's individual status from the bundle id."""
# if len(operations) == 1:
# return operations[0].get_id()
# else:
# h = '.'.join(op.get_id() for op in operations)
# bid = '{}-bundle-{}'.format(self, sha1(h.encode('utf-8')).hexdigest())
# fn_bundle = self._fn_bundle(bid)
# _mkdir_p(os.path.dirname(fn_bundle))
# with open(fn_bundle, 'w') as file:
# for operation in operations:
# file.write(operation.get_id() + '\n')
# return bid
#
# def _expand_bundled_jobs(self, scheduler_jobs):
# "Expand jobs which were submitted as part of a bundle."
# for job in scheduler_jobs:
# if job.name().startswith('{}-bundle-'.format(self)):
# with open(self._fn_bundle(job.name())) as file:
# for line in file:
# yield manage.ClusterJob(line.strip(), job.status())
# else:
# yield job
| 36.983193 | 142 | 0.608498 | # Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import itertools
from . import scheduler
from signac.common.six import with_metaclass
import uuid
# def _fn_bundle(self, bundle_id):
# return os.path.join(self.root_directory(), '.bundles', bundle_id)
#
# def _store_bundled(self, operations):
# """Store all job session ids part of one bundle.
#
# The job session ids are stored in a text file in the project's
# root directory. This is necessary to be able to identify each
# job's individual status from the bundle id."""
# if len(operations) == 1:
# return operations[0].get_id()
# else:
# h = '.'.join(op.get_id() for op in operations)
# bid = '{}-bundle-{}'.format(self, sha1(h.encode('utf-8')).hexdigest())
# fn_bundle = self._fn_bundle(bid)
# _mkdir_p(os.path.dirname(fn_bundle))
# with open(fn_bundle, 'w') as file:
# for operation in operations:
# file.write(operation.get_id() + '\n')
# return bid
#
# def _expand_bundled_jobs(self, scheduler_jobs):
# "Expand jobs which were submitted as part of a bundle."
# for job in scheduler_jobs:
# if job.name().startswith('{}-bundle-'.format(self)):
# with open(self._fn_bundle(job.name())) as file:
# for line in file:
# yield manage.ClusterJob(line.strip(), job.status())
# else:
# yield job
class JobBundle(object):
def __init__(self, jobops, procs_per_job=None):
self._job_ops = list(jobops)
self._name = None;
self._job_names = [];
self._ppj = procs_per_job;
def _submit_name(self, project):
if len(self._job_ops) == 1:
op, job = self._job_ops[0]
return "{}-{}-{}".format(job, op, project);
else:
uid = uuid.uuid4();
return "{}-bundle-{}".format(uid, project);
def dump(self, stream, hostconf, submitconf, project, **kwargs):
assert len(self._job_ops) > 0
if len(self._job_ops) == 1: # just one job so we just write the operation script.
job, op = self._job_ops[0]
self._name = self._make_submit_name(op, job, project)
submitconf.write_preamble(stream, self._name)
stream.write(op.format_header(hostconf, submitconf, project, **kwargs))
stream.write('\n')
stream.write(op.format_script(host, submission, project, job, **kwargs))
stream.write('\n')
else:
self._name = "{project}-bundle-{hex}".format(project, hex)
submitconf.write_preamble(stream, self._name)
_, op = self._job_ops[0];
stream.write(op.format_header(hostconf, submitconf, project, nprocs=self._ppj, **kwargs)) #TODO: fix this.
stream.write('\n')
for job, operation in self._job_ops:
self._job_names(scheduler.make_submit_name(operation, job, project))
stream.write(op.format_script(host, submission, project, job, nprocs=self.procs_per_job, **kwargs).strip())
stream.write(' &\n');
def dumps(self, hostconf, submitconf, project, **kwargs):
stream = io.StringIO()
self.dump(stream, hostconf, submitconf, project, **kwargs);
stream.seek(0)
return stream.read();
def jobops(self):
return self._job_ops
def job_names(self):
return self._job_names
def name(self):
return self._name
def save(self):
pass
def load(self):
pass
class BundlerType(type):
def __init__(cls, name, bases, dct):
if not hasattr(cls, 'registry'):
cls.registry = dict()
else:
cls.registry[name] = cls
return super(BundlerType, cls).__init__(name, bases, dct)
class Bundler(with_metaclass(BundlerType)):
def __init__(self, size):
self._size = size
def bundle(self, hostconf, submitconf, jobops, **kwargs):
jobops = list(jobops);
total_size = len(jobops);
assert total_size % self._size == 0
for i in range(total_size/self._size):
yield JobBundle(itertools.islice(jobops, start=i*self._size, stop=(i+1)*self._size), procs_per_job=(submitconf.nprocs/self._size))
| 2,440 | 28 | 393 |
d3dde07dd81d890a1b7d59dc6dcf3160a339b820 | 162 | py | Python | OpenCV/Joelma/exemplo_cinza.py | matewszz/Python | 18b7fc96d3ed294d2002ed484941a0ee8cf18108 | [
"MIT"
] | null | null | null | OpenCV/Joelma/exemplo_cinza.py | matewszz/Python | 18b7fc96d3ed294d2002ed484941a0ee8cf18108 | [
"MIT"
] | null | null | null | OpenCV/Joelma/exemplo_cinza.py | matewszz/Python | 18b7fc96d3ed294d2002ed484941a0ee8cf18108 | [
"MIT"
] | null | null | null | import cv2 as cv
img = cv.imread("testeOpenCV.jpg")
cinza = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
print(cinza.shape)
cv.imshow("Joelma Cinza", cinza)
cv.waitKey(0)
| 23.142857 | 43 | 0.746914 | import cv2 as cv
img = cv.imread("testeOpenCV.jpg")
cinza = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
print(cinza.shape)
cv.imshow("Joelma Cinza", cinza)
cv.waitKey(0)
| 0 | 0 | 0 |
47f8474c0f61f19e42b7353392ae3fc8607bfe92 | 289 | py | Python | notebooks/mean-temperature.py | JeroenD-BE/PythonDataScienceWorkshops | 713e176ba0602c7b2986308804f77dde238d4384 | [
"MIT"
] | null | null | null | notebooks/mean-temperature.py | JeroenD-BE/PythonDataScienceWorkshops | 713e176ba0602c7b2986308804f77dde238d4384 | [
"MIT"
] | null | null | null | notebooks/mean-temperature.py | JeroenD-BE/PythonDataScienceWorkshops | 713e176ba0602c7b2986308804f77dde238d4384 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('Qt4Agg')
import os
data_folder = "C:\\Users\\jeroe\\PycharmProjects\\PythonDataScienceWorkshops\\data"
os.chdir(data_folder)
temp = pd.read_csv("mean_temperature.csv", delimiter="\t", header=None)
print(temp.head()) | 28.9 | 83 | 0.778547 | import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('Qt4Agg')
import os
data_folder = "C:\\Users\\jeroe\\PycharmProjects\\PythonDataScienceWorkshops\\data"
os.chdir(data_folder)
temp = pd.read_csv("mean_temperature.csv", delimiter="\t", header=None)
print(temp.head()) | 0 | 0 | 0 |
f8a842876fea53b66d89ce9cb06572b2f49462f6 | 2,292 | py | Python | ztom/reporter.py | ztomsy/ztom | 6cfb5e411c47678d3e6ab37aa98ff07803437854 | [
"MIT"
] | 33 | 2019-05-02T13:22:59.000Z | 2022-03-19T22:29:20.000Z | ztom/reporter.py | ztomsy/ztom | 6cfb5e411c47678d3e6ab37aa98ff07803437854 | [
"MIT"
] | 1 | 2021-02-11T06:17:05.000Z | 2021-02-11T06:17:05.000Z | ztom/reporter.py | ztomsy/ztom | 6cfb5e411c47678d3e6ab37aa98ff07803437854 | [
"MIT"
] | 11 | 2019-11-20T06:53:47.000Z | 2021-09-16T16:14:09.000Z | from .stats_influx import StatsInflux
from pymongo import MongoClient, database, collection
from urllib.parse import quote_plus
| 27.614458 | 109 | 0.670157 | from .stats_influx import StatsInflux
from pymongo import MongoClient, database, collection
from urllib.parse import quote_plus
class Reporter:
def __init__(self, server_id, exchange_id):
#self.session_uuid = session_uuid
self.server_id = server_id
self.exchange_id = exchange_id
self.def_indicators = dict() # definition indicators
self.indicators = dict()
self.def_indicators["server_id"] = self.server_id
self.def_indicators["exchange_id"] = self.exchange_id
# self.def_indicators["session_uuid"] = self.session_uuid
def set_indicator(self, key, value):
self.indicators[key] = value
def init_db(self, host, port, database, measurement, user="", password=""):
self.influx = StatsInflux(host, port, database, measurement)
self.influx.set_tags(self.def_indicators)
def push_to_influx(self):
return self.influx.push_fields(self.indicators)
class MongoReporter(Reporter):
def __init__(self, server_id: str, exchange_id: str):
super().__init__(server_id, exchange_id)
self.default_db = None # type: database.Database
self.default_collection = None # type:collection.Collection
self.mongo_client = None # type: MongoClient
def init_db(self, host: str = "localhost", port = None, default_data_base = "", default_collection ="" ):
uri = host
self.mongo_client = MongoClient(uri)
self.default_db = self.mongo_client[default_data_base]
self.default_collection = self.default_db[default_collection]
def push_report(self, report=None, collection: str = None, data_base: str = None):
_data_base = self.default_db if data_base is None else self.mongo_client[data_base]
_collection = self.default_collection if collection is None else _data_base[collection]
if report is not None:
if isinstance(report, list):
result = _collection.insert_many(report)
else:
result = _collection.insert_one(report)
else:
# for r in report:
# self.reporter.set_indicator(r, report[r])
result = self.default_collection.insert_one(self.indicators)
return result
| 1,908 | 3 | 235 |
a139c53c203b0c453f1b801a6bd197763fd63f91 | 210 | py | Python | src/custom/models/resnet50.py | diegoirigaray/CrAdv | 84247449d418cef046e3c045ee529e4b86529e2e | [
"MIT"
] | 5 | 2019-11-22T21:15:44.000Z | 2021-11-25T20:15:59.000Z | src/custom/models/resnet50.py | diegoirigaray/CrAdv | 84247449d418cef046e3c045ee529e4b86529e2e | [
"MIT"
] | 4 | 2021-03-19T04:49:44.000Z | 2022-01-13T01:46:47.000Z | src/custom/models/resnet50.py | diegoirigaray/CrAdv | 84247449d418cef046e3c045ee529e4b86529e2e | [
"MIT"
] | null | null | null | from torchvision.models.resnet import ResNet, Bottleneck, model_urls
| 23.333333 | 68 | 0.7 | from torchvision.models.resnet import ResNet, Bottleneck, model_urls
class ResNet50(ResNet):
model_url = model_urls['resnet50']
def __init__(self):
super().__init__(Bottleneck, [3, 4, 6, 3])
| 49 | 68 | 23 |
2bc014a872b542a9e988b152abf744b92a045cfe | 1,402 | py | Python | Codeforces/Div2C459.py | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | 2 | 2018-12-11T14:37:24.000Z | 2022-01-23T18:11:54.000Z | Codeforces/Div2C459.py | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | null | null | null | Codeforces/Div2C459.py | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | null | null | null |
s = raw_input()
n = len(s)
global dp
dp = [[False]*n for x in range(n)]
count = 0
for i in range(n-1):
if s[i:i+2] in ["()","??","(?","?)"]:
# print "NEtered"
dp[i][i+1] = True
#for i in range(n):
# for j in range(n):
# if dp[i][j]:count+=1;print i,j,s[i:j+1]
if n%2==0:
recur(s,n,0,n-1)
for i in range(4,n+1,2):
for j in range(n-i+1):
recur(s[j:j+i],i,j,j+i-1)
else:
recur(s[1:],n-1,1,n-1)
recur(s[:n-1],n-1,0,n-2)
k = s
s = k[1:]
n = len(s)
for i in range(4,n+1,2):
for j in range(n-i+1):
recur(s[j:j+i],i,j+1,j+i)
s = k[0:n-1]
n = len(k)
for i in range(4,n+1,2):
for j in range(n-i+1):
#print "recur",k[j:j+i]
recur(s[j:j+i],i,j,j+i-1)
s = k
for i in range(n):
for j in range(n):
if dp[i][j]==1:count+=1#;print i,j,s[i:j+1]
print count
| 22.253968 | 74 | 0.46933 | def recur(s,n,i,j):
global dp
#print s,n,i,j
if n<2 or n%2==1:dp[i][j] = -1;return False
elif n==2 and s in ["()","??","(?","?)"]:
return dp[i][j]
elif n==2:dp[i][j] = -1;return False
elif dp[i][j] == -1: return False
elif dp[i][j]==1:return True
for k in range(2,n,2):
if recur(s[:k],k,i,i+k-1) and recur(s[k:],n-k,k+i,j):
dp[i][j] = 1
return dp[i][j]
if s[0]+s[n-1] in ["()","??","(?","?)"] and recur(s[1:n-1],n-2,i+1,j-1):
dp[i][j] = 1
return dp[i][j]
dp[i][j] = -1
return False
s = raw_input()
n = len(s)
global dp
dp = [[False]*n for x in range(n)]
count = 0
for i in range(n-1):
if s[i:i+2] in ["()","??","(?","?)"]:
# print "NEtered"
dp[i][i+1] = True
#for i in range(n):
# for j in range(n):
# if dp[i][j]:count+=1;print i,j,s[i:j+1]
if n%2==0:
recur(s,n,0,n-1)
for i in range(4,n+1,2):
for j in range(n-i+1):
recur(s[j:j+i],i,j,j+i-1)
else:
recur(s[1:],n-1,1,n-1)
recur(s[:n-1],n-1,0,n-2)
k = s
s = k[1:]
n = len(s)
for i in range(4,n+1,2):
for j in range(n-i+1):
recur(s[j:j+i],i,j+1,j+i)
s = k[0:n-1]
n = len(k)
for i in range(4,n+1,2):
for j in range(n-i+1):
#print "recur",k[j:j+i]
recur(s[j:j+i],i,j,j+i-1)
s = k
for i in range(n):
for j in range(n):
if dp[i][j]==1:count+=1#;print i,j,s[i:j+1]
print count
| 511 | 0 | 22 |
4f12806828820ed4aba600f483d5fabf836687ab | 1,715 | py | Python | flask_uio/sidebar.py | mensopheak/flask_uio | 8fd0f0a5ac0f10186d6572fc39c2db712c070cfe | [
"MIT"
] | null | null | null | flask_uio/sidebar.py | mensopheak/flask_uio | 8fd0f0a5ac0f10186d6572fc39c2db712c070cfe | [
"MIT"
] | null | null | null | flask_uio/sidebar.py | mensopheak/flask_uio | 8fd0f0a5ac0f10186d6572fc39c2db712c070cfe | [
"MIT"
] | null | null | null | from .element import Element
from .mixin import ReqInjectScriptMixin
from .menu import Menu, MenuItem
from .icon import Icon
class SideBar(Element, ReqInjectScriptMixin):
"""Sidebar widget (sidebar_menu, nav_menu, content)
Example: append sidebar_menu::
sidebar = uio.SideBar()
sidebar.sidebar_menu.append(
uio.Image(url_for('static', filename='vlogo.png'), _class='ui small centered image'),
uio.MenuHeaderItem('Brand Name'),
uio.MenuItem('Admin', url='admin'),
uio.MenuItem('CRM', url='crm'),
uio.MenuItem('CUS', url='cus'),
)
Example: append nav_menu::
sidebar.nav_menu.append(
uio.MenuHeaderItem('Example'),
uio.MenuItem('System'),
uio.MenuItem('Resource'),
uio.RightMenu(
uio.MenuItem('User Name', 'account', uio.Icon('user icon')),
uio.MenuItem('Logout', 'logout', uio.Icon('sign out alternate icon'))
),
)
""" | 38.977273 | 125 | 0.594752 | from .element import Element
from .mixin import ReqInjectScriptMixin
from .menu import Menu, MenuItem
from .icon import Icon
class SideBar(Element, ReqInjectScriptMixin):
"""Sidebar widget (sidebar_menu, nav_menu, content)
Example: append sidebar_menu::
sidebar = uio.SideBar()
sidebar.sidebar_menu.append(
uio.Image(url_for('static', filename='vlogo.png'), _class='ui small centered image'),
uio.MenuHeaderItem('Brand Name'),
uio.MenuItem('Admin', url='admin'),
uio.MenuItem('CRM', url='crm'),
uio.MenuItem('CUS', url='cus'),
)
Example: append nav_menu::
sidebar.nav_menu.append(
uio.MenuHeaderItem('Example'),
uio.MenuItem('System'),
uio.MenuItem('Resource'),
uio.RightMenu(
uio.MenuItem('User Name', 'account', uio.Icon('user icon')),
uio.MenuItem('Logout', 'logout', uio.Icon('sign out alternate icon'))
),
)
"""
def __init__(self):
super().__init__('')
self.sidebar_menu = Menu(_class='ui sidebar inverted vertical menu', hide_id=False)
self.content = Element('div', _class='pusher')
self.nav_menu = Menu(_class='ui primary inverted large stackable menu custom')
self.toggle = MenuItem('', '', icon=Icon('bars icon'), hide_id=False)
self.nav_menu.append(self.toggle)
# combined
self.content.append(self.nav_menu)
self.append(self.sidebar_menu, self.content)
self.inject_script = f'$("#{self.toggle.id}").click(function () {{$("#{self.sidebar_menu.id}").sidebar("toggle");}})' | 622 | 0 | 26 |
a136d87a93db244c3cc1b07263b095f420641f03 | 968 | py | Python | tests/test_find_functions.py | roniemartinez/DocCron | 58560b3a24e3e211e0f28e3da85ad8f30781170a | [
"MIT"
] | 3 | 2019-05-02T05:59:20.000Z | 2022-03-12T22:28:16.000Z | tests/test_find_functions.py | roniemartinez/DocCron | 58560b3a24e3e211e0f28e3da85ad8f30781170a | [
"MIT"
] | 35 | 2019-04-03T08:09:52.000Z | 2022-03-28T14:38:09.000Z | tests/test_find_functions.py | Code-ReaQtor/DocCron | c4c8217d039b06f88cf35dd07bdb6ed0cf2d9678 | [
"MIT"
] | null | null | null | from datetime import datetime
from freezegun import freeze_time
import doccron
def foo() -> None:
"""
This function prints "foo"
/etc/crontab::
* * * * * 2021
* * * * * 2020
:returns: None
"""
print("foo")
def bar() -> None:
"""
/etc/crontab::
* * * * * 2021
* * * * * 2020
This should not be added
"""
print("bar")
def baz() -> None:
"""
* * * * * 2021
* * * * * 2020
"""
print("baz")
@freeze_time("2020-01-01")
| 17.925926 | 74 | 0.545455 | from datetime import datetime
from freezegun import freeze_time
import doccron
def foo() -> None:
"""
This function prints "foo"
/etc/crontab::
* * * * * 2021
* * * * * 2020
:returns: None
"""
print("foo")
def bar() -> None:
"""
/etc/crontab::
* * * * * 2021
* * * * * 2020
This should not be added
"""
print("bar")
def baz() -> None:
"""
* * * * * 2021
* * * * * 2020
"""
print("baz")
@freeze_time("2020-01-01")
def test_find_functions_with_docstrings() -> None:
run_count = 0
jobs_found = False
for next_schedule, function_object in doccron.run_jobs(simulate=True):
jobs_found = True
assert isinstance(next_schedule, datetime)
assert function_object.__name__ in ("foo", "bar")
assert function_object.__name__ != "baz"
run_count += 1
if run_count == 5:
break
assert jobs_found
| 419 | 0 | 22 |
a8d6cc191993ea5dac5611aeb728aaf8a45ad8d8 | 7,945 | py | Python | tracepy/ray.py | GNiendorf/raypy | 459fe9b8bf7ae46b789a4633738b3f9708ecb10e | [
"MIT"
] | 30 | 2019-08-03T00:24:23.000Z | 2022-03-02T16:01:37.000Z | tracepy/ray.py | GNiendorf/raypy | 459fe9b8bf7ae46b789a4633738b3f9708ecb10e | [
"MIT"
] | 9 | 2019-07-29T03:02:00.000Z | 2020-05-13T05:51:20.000Z | tracepy/ray.py | GNiendorf/raypy | 459fe9b8bf7ae46b789a4633738b3f9708ecb10e | [
"MIT"
] | 11 | 2019-07-29T05:17:09.000Z | 2021-09-15T03:43:27.000Z | # Authors: Gavin Niendorf <gavinniendorf@gmail.com>
#
# Classes and methods for defining rays and their propagation rules.
#
# License: MIT
import numpy as np
from .transforms import *
from .exceptions import NormalizationError, NotOnSurfaceError
class ray:
"""Class for rays and their propagation through surfaces.
Note
----
Also checks whether the direction cosines are normalized.
Attributes
----------
P : np.array of 3 floats/ints
Position of ray in the lab frame.
D : np.array of 3 floats/ints
Direction cosines for the ray in the lab frame.
P_hist : list of P np.arrays
Previous P np.arrays in a list.
D_hist : list of D np.arrays
Previous D np.arrays in a list.
N : float/int
Index of refraction of current material.
wvl: float/int
Wavelength of the ray in microns 550nm --> 0.55.
"""
def transform(self, surface):
""" Updates position and direction of a ray to obj coordinate system. """
self.P, self.D = transform(surface.R, surface, np.array([self.P]), np.array([self.D]))
def find_intersection(self, surface):
"""Finds the intersection point of a ray with a surface.
Note
----
Directly changes the self.P (position) attribute of the ray
that corresponds to the intersection point. Also be aware
that my error definition is different from Spencer's paper.
I found that the more direct error equation of abs(F) allows
me to tune my max error values to get better accuracy.
Parameters
----------
surface : geometry object
Surface to find intersection of ray with.
"""
#Initial guesses, see Spencer, Murty for explanation.
s_0 = -self.P[2]/self.D[2]
X_1 = self.P[0]+self.D[0]*s_0
Y_1 = self.P[1]+self.D[1]*s_0
s_j = [0., 0.]
#Initial error.
error = 1.
n_iter = 0
#Max iterations allowed.
n_max = 1e4
while error > 1e-6 and n_iter < n_max:
X, Y, Z = [X_1, Y_1, 0.]+np.dot(self.D, s_j[0])
try:
#'normal' is the surface direction numbers.
func, normal= surface.get_surface([X, Y, Z])
deriv = np.dot(normal, self.D)
#Newton-raphson method
s_j = s_j[1], s_j[1]-func/deriv
except NotOnSurfaceError:
self.P = None
return None
#Error is how far f(X, Y, Z) is from 0.
error = abs(func)
n_iter += 1
if n_iter == n_max or s_0+s_j[0] < 0 or np.dot(([X, Y, Z]-self.P), self.D) < 0.:
self.P = None
else:
self.normal = normal
self.P = np.array([X, Y, Z])
def interact(self, surface, typeof):
"""Updates new direction of a ray for a given interaction type.
Note
----
High level method that calls the appropriate method for a given
interaction.
Parameters
----------
surface : geometry object
Surface to find intersection of ray with.
typeof : str
Type of interaction
reflection -> Reflect the ray off the surface.
refraction -> Refract the ray into the surface.
stop -> Don't change ray direction.
"""
if hasattr(surface,'glass'):
mu = self.N / surface.glass(self.wvl)
else:
mu = self.N / surface.N
a = mu*np.dot(self.D, self.normal)/pow(np.linalg.norm(self.normal), 2)
b = (pow(mu,2)-1)/pow(np.linalg.norm(self.normal), 2)
if typeof == 'stop':
pass
#Needed for total internal reflection even if typeof is refraction.
elif b > pow(a, 2) or typeof == 'reflection':
self.reflection(surface, a/mu)
elif typeof == 'refraction':
self.refraction(surface, mu, a, b)
def reflection(self, surface, a):
"""Reflects the ray off a surface and updates the ray's direction.
Note
----
This method computes D exactly rather than numerically like in the
refraction method.
Parameters
----------
surface : geometry object
Surface to reflect from.
a : float/int
Constant defined in the interact method.
"""
k, l, m = self.D
K, L, M = self.normal
self.D = np.array([k-2.*a*K, l-2.*a*L, m-2.*a*M])
def refraction(self, surface, mu, a, b):
"""Simulates refraction of a ray into a surface and updates the ray's direction.
Note
----
My error definition is not in Spencer and Murty's paper but is inspired by my
unique intersection error definition. We are solving for roots of a quadratic and
I am defining my error by how far the quadtratic is from 0. See Spencer, Murty for
derivation of the quadratic.
Parameters
----------
surface : geometry object
Surface to refract into.
mu, a, b : float/int
Constants defined in the interact method.
Returns
-------
0
Returns 0 if the number of iterations exceeds the max allowed to converge.
"""
k, l, m = self.D
K, L, M = self.normal
G = [-b/(2*a), -b/(2*a)]
#Initial error.
error = 1.
niter = 0
#Max iterations allowed.
nmax = 1e5
while error > 1e-15 and niter < nmax:
#Newton-raphson method
G = G[1], (pow(G[1],2)-b)/(2*(G[1]+a))
#See Spencer, Murty for where this is inspired by.
error = abs(pow(G[1],2)+2*a*G[1]+b)
niter += 1
if niter==nmax:
self.P = None
return 0.
#Update direction and index of refraction of the current material.
self.D = np.array([mu*k+G[1]*K,mu*l+G[1]*L,mu*m+G[1]*M])
if hasattr(surface,'glass'):
self.N = surface.glass(self.wvl)
else:
self.N = surface.N
def ray_lab_frame(self, surface):
""" Updates position and direction of a ray in the lab frame. """
self.P, self.D = lab_frame(surface.R, surface, np.array([self.P]), np.array([self.D]))
def update(self):
""" Updates the P_hist and D_hist arrays from current P and D arrays. """
self.P_hist.append(self.P)
self.D_hist.append(self.D)
def propagate(self, surfaces):
"""Propagates a ray through a given surfaces list.
Note
----
If self.P is None then the ray failed to converge or
took too many iterations to meet the required accuracy.
Note that this is used (self.P is None) as a flag in
many other functions in TracePy.
Parameters
----------
surfaces : list of geometry objects
Surfaces to propagate through in order of propagation.
"""
for surface in surfaces:
self.transform(surface)
self.find_intersection(surface)
#Results from failure to converge.
if self.P is None:
break
self.interact(surface, surface.action)
#Results from too many iterations.
if self.P is None:
break
self.ray_lab_frame(surface)
#Update current to history arrays.
self.update()
| 33.242678 | 94 | 0.559723 | # Authors: Gavin Niendorf <gavinniendorf@gmail.com>
#
# Classes and methods for defining rays and their propagation rules.
#
# License: MIT
import numpy as np
from .transforms import *
from .exceptions import NormalizationError, NotOnSurfaceError
class ray:
"""Class for rays and their propagation through surfaces.
Note
----
Also checks whether the direction cosines are normalized.
Attributes
----------
P : np.array of 3 floats/ints
Position of ray in the lab frame.
D : np.array of 3 floats/ints
Direction cosines for the ray in the lab frame.
P_hist : list of P np.arrays
Previous P np.arrays in a list.
D_hist : list of D np.arrays
Previous D np.arrays in a list.
N : float/int
Index of refraction of current material.
wvl: float/int
Wavelength of the ray in microns 550nm --> 0.55.
"""
def __init__(self, params, N_0=1):
self.P = np.array(params['P'])
self.D = np.array(params['D'])
self.P_hist = [self.P]
self.D_hist = [self.D]
self.N = N_0
self.wvl = params.get('wvl',0.55) #Added default wavelength 550nm
if abs(np.linalg.norm(self.D)-1.) > .01:
#Ray direction cosines are not normalized.
raise NormalizationError()
def transform(self, surface):
""" Updates position and direction of a ray to obj coordinate system. """
self.P, self.D = transform(surface.R, surface, np.array([self.P]), np.array([self.D]))
def find_intersection(self, surface):
"""Finds the intersection point of a ray with a surface.
Note
----
Directly changes the self.P (position) attribute of the ray
that corresponds to the intersection point. Also be aware
that my error definition is different from Spencer's paper.
I found that the more direct error equation of abs(F) allows
me to tune my max error values to get better accuracy.
Parameters
----------
surface : geometry object
Surface to find intersection of ray with.
"""
#Initial guesses, see Spencer, Murty for explanation.
s_0 = -self.P[2]/self.D[2]
X_1 = self.P[0]+self.D[0]*s_0
Y_1 = self.P[1]+self.D[1]*s_0
s_j = [0., 0.]
#Initial error.
error = 1.
n_iter = 0
#Max iterations allowed.
n_max = 1e4
while error > 1e-6 and n_iter < n_max:
X, Y, Z = [X_1, Y_1, 0.]+np.dot(self.D, s_j[0])
try:
#'normal' is the surface direction numbers.
func, normal= surface.get_surface([X, Y, Z])
deriv = np.dot(normal, self.D)
#Newton-raphson method
s_j = s_j[1], s_j[1]-func/deriv
except NotOnSurfaceError:
self.P = None
return None
#Error is how far f(X, Y, Z) is from 0.
error = abs(func)
n_iter += 1
if n_iter == n_max or s_0+s_j[0] < 0 or np.dot(([X, Y, Z]-self.P), self.D) < 0.:
self.P = None
else:
self.normal = normal
self.P = np.array([X, Y, Z])
def interact(self, surface, typeof):
"""Updates new direction of a ray for a given interaction type.
Note
----
High level method that calls the appropriate method for a given
interaction.
Parameters
----------
surface : geometry object
Surface to find intersection of ray with.
typeof : str
Type of interaction
reflection -> Reflect the ray off the surface.
refraction -> Refract the ray into the surface.
stop -> Don't change ray direction.
"""
if hasattr(surface,'glass'):
mu = self.N / surface.glass(self.wvl)
else:
mu = self.N / surface.N
a = mu*np.dot(self.D, self.normal)/pow(np.linalg.norm(self.normal), 2)
b = (pow(mu,2)-1)/pow(np.linalg.norm(self.normal), 2)
if typeof == 'stop':
pass
#Needed for total internal reflection even if typeof is refraction.
elif b > pow(a, 2) or typeof == 'reflection':
self.reflection(surface, a/mu)
elif typeof == 'refraction':
self.refraction(surface, mu, a, b)
def reflection(self, surface, a):
"""Reflects the ray off a surface and updates the ray's direction.
Note
----
This method computes D exactly rather than numerically like in the
refraction method.
Parameters
----------
surface : geometry object
Surface to reflect from.
a : float/int
Constant defined in the interact method.
"""
k, l, m = self.D
K, L, M = self.normal
self.D = np.array([k-2.*a*K, l-2.*a*L, m-2.*a*M])
def refraction(self, surface, mu, a, b):
"""Simulates refraction of a ray into a surface and updates the ray's direction.
Note
----
My error definition is not in Spencer and Murty's paper but is inspired by my
unique intersection error definition. We are solving for roots of a quadratic and
I am defining my error by how far the quadtratic is from 0. See Spencer, Murty for
derivation of the quadratic.
Parameters
----------
surface : geometry object
Surface to refract into.
mu, a, b : float/int
Constants defined in the interact method.
Returns
-------
0
Returns 0 if the number of iterations exceeds the max allowed to converge.
"""
k, l, m = self.D
K, L, M = self.normal
G = [-b/(2*a), -b/(2*a)]
#Initial error.
error = 1.
niter = 0
#Max iterations allowed.
nmax = 1e5
while error > 1e-15 and niter < nmax:
#Newton-raphson method
G = G[1], (pow(G[1],2)-b)/(2*(G[1]+a))
#See Spencer, Murty for where this is inspired by.
error = abs(pow(G[1],2)+2*a*G[1]+b)
niter += 1
if niter==nmax:
self.P = None
return 0.
#Update direction and index of refraction of the current material.
self.D = np.array([mu*k+G[1]*K,mu*l+G[1]*L,mu*m+G[1]*M])
if hasattr(surface,'glass'):
self.N = surface.glass(self.wvl)
else:
self.N = surface.N
def ray_lab_frame(self, surface):
""" Updates position and direction of a ray in the lab frame. """
self.P, self.D = lab_frame(surface.R, surface, np.array([self.P]), np.array([self.D]))
def update(self):
""" Updates the P_hist and D_hist arrays from current P and D arrays. """
self.P_hist.append(self.P)
self.D_hist.append(self.D)
def propagate(self, surfaces):
"""Propagates a ray through a given surfaces list.
Note
----
If self.P is None then the ray failed to converge or
took too many iterations to meet the required accuracy.
Note that this is used (self.P is None) as a flag in
many other functions in TracePy.
Parameters
----------
surfaces : list of geometry objects
Surfaces to propagate through in order of propagation.
"""
for surface in surfaces:
self.transform(surface)
self.find_intersection(surface)
#Results from failure to converge.
if self.P is None:
break
self.interact(surface, surface.action)
#Results from too many iterations.
if self.P is None:
break
self.ray_lab_frame(surface)
#Update current to history arrays.
self.update()
| 391 | 0 | 27 |
bf84b9e670055a5db480952197387dd0fcb3fb3d | 7,630 | py | Python | common/strutil.py | lewyuejian/Automation | 18122ce2c5debe485fab7dac5f8007f4b7b2d51f | [
"MIT"
] | 1 | 2021-12-07T08:38:54.000Z | 2021-12-07T08:38:54.000Z | common/strutil.py | lewyuejian/ApiAutomation | 18122ce2c5debe485fab7dac5f8007f4b7b2d51f | [
"MIT"
] | null | null | null | common/strutil.py | lewyuejian/ApiAutomation | 18122ce2c5debe485fab7dac5f8007f4b7b2d51f | [
"MIT"
] | 1 | 2021-08-15T07:12:52.000Z | 2021-08-15T07:12:52.000Z | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: lewyuejian@163.com
@file: strutil.py
@time: 2021/7/3 0003 22:19
@desc:
'''
import ujson
import re
import random
import string
import uuid | 31.270492 | 115 | 0.548493 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: lewyuejian@163.com
@file: strutil.py
@time: 2021/7/3 0003 22:19
@desc:
'''
import ujson
import re
import random
import string
import uuid
class StrUtil:
letters = list(string.ascii_letters)
whitespace = list(string.whitespace)
punctuation = list(string.punctuation)
digits = list(string.digits)
# 汉字编码的范围
ch_start = 0x4E00
ch_end = 0x9FA5
@classmethod
def getStringWithLBRB(cls, sourceStr, lbStr, rbStr, offset=0):
"""
根据字符串左右边界获取内容
offset:要获得匹配的第几个数据,默认第一个
:param sourceStr:
:param lbStr:
:param rbStr:
:param offset:
:return:
"""
regex = '([\\s\\S]*?)'
r = re.compile(lbStr + regex + rbStr)
result = r.findall(sourceStr)
if str(offset) == 'all':
return result
else:
if len(result) >= offset and len(result) != 0:
return result[offset]
else:
return None
@classmethod
def addUUID(cls, source):
"""
字符串加上uuid
:param source:
:return:
"""
return source + '_' + str(uuid.uuid4())
@classmethod
def objectToJsonStr(cls, object):
"""
将类对象转为json字符串
:param object:
:return:
"""
return ujson.dumps(object)
@classmethod
def objectToJson(cls, object):
"""
将类对象转为json
:param object:
:return:
"""
return ujson.loads(ujson.dumps(object))
@classmethod
def getSpecifiedStr(cls, length, char):
"""
根据字符获取指定长度的字符串
:param length:
:param char:
:return:
"""
result = ''
for i in range(int(length)):
result = result + str(char)
return result
@classmethod
def addFix(cls, sourceStr, isPre=False, preStr='', isSuffix=False, suffixStr=''):
"""
字符串加前后缀
:param sourceStr:
:param isPre:
:param preStr:
:param isSuffix:
:param suffixStr:
:return:
"""
preStr = str(preStr).strip()
suffixStr = str(suffixStr).strip()
if isPre and isSuffix:
return '{}{}{}'.format(preStr, sourceStr, suffixStr)
elif isSuffix:
return '{}{}'.format(sourceStr, suffixStr)
elif isPre:
return '{}{}'.format(preStr, sourceStr)
else:
return sourceStr
@classmethod
def getRandomChar(cls):
"""
随机获取a-zA-Z的单个字符
:return:
"""
str = string.ascii_letters
return random.choice(str)
@classmethod
def replaceContentWithLBRB(cls, content, new, lbStr, rbStr, replaceOffset=0):
"""
根据左右字符串匹配要替换的内容,支持多处匹配只替换一处的功能
:param content:
:param new: 要替换的新字符串
:param lbStr: 要替换内容的左侧字符串
:param rbStr: 要替换内容的右侧字符串
:param replaceOffset: 需要将第几个匹配的内容进行替换,下标从0开始,所有都替换使用-1
:return:
"""
if lbStr == '' and rbStr == '':
return
regex = '([\\s\\S]*?)'
r = re.compile(lbStr + regex + rbStr)
match_results = r.findall(content)
if int(replaceOffset) == -1:
for result in match_results:
# 为了防止匹配的内容在其他地方也有被替换掉,故需要将匹配的前后字符串加上
content = content.replace(lbStr + result + rbStr, lbStr + new + rbStr)
elif len(match_results) >= replaceOffset and len(match_results) != 0:
# 用于记录匹配到关键字的位置
index = None
for i in range(len(match_results)):
if i == 0:
# 第一次查找匹配所在的位置
index = content.find(lbStr + match_results[i] + rbStr)
else:
# 从上一次匹配的位置开始查找下一次匹配的位置
index = content.find(lbStr + match_results[i] + rbStr, index + 1)
if i == int(replaceOffset):
preContent = content[:index]
centerContent = lbStr + new + rbStr
suffContent = content[index + len(lbStr + match_results[i] + rbStr):]
content = preContent + centerContent + suffContent
break
return content
@classmethod
def random_index(cls, percents):
"""
随机变量的概率函数,返回概率事件的下标索引
:return:
"""
start = 0
index = 0
randnum = random.randint(1, sum(percents))
for index, scope in enumerate(percents):
start += scope
if randnum <= start:
break
return index
@classmethod
def getRandomText(cls, length, ch_percent=90, en_percent=5, digits_percent=3, punctuation_percent=2,
whitespace_percent=0):
"""
获取指定长度文本内容,可设置中文、英文、数字、标点符号、空白字符现的概率
如果字符串包含中文,返回的内容为Unicode
:param length: 生成文本的长度
:param ch_percent: 出现中文字符的概率
:param en_percent: 出现英文字符的概率
:param digits_percent: 出现数字字符的概率
:param punctuation_percent: 出现标点符号的概率
:param whitespace_percent: 出现空白字符的概率
:return:
"""
percents = [ch_percent, en_percent, digits_percent, punctuation_percent, whitespace_percent]
percents_info = ['ch_percent', 'en_percent', 'digits_percent', 'punctuation_percent', 'whitespace_percent']
result = ''
for i in range(length):
info = percents_info[cls.random_index(percents)]
if info == 'ch_percent':
result += chr(random.randint(int(cls.ch_start), int(cls.ch_end)))
elif info == 'en_percent':
result += random.choice(cls.letters)
elif info == 'digits_percent':
result += random.choice(cls.digits)
elif info == 'punctuation_percent':
result += random.choice(cls.punctuation)
elif info == 'whitespace_percent':
result += random.choice(cls.whitespace)
return result
@classmethod
def contentToDict(cls, content: str,result_enter_type:str='\r\n'):
"""
将包含换行符的字符串内容转为字典,目前仅支持格式:key=value
@param content:
@param result_enter_type:存储的换行类型,包括\r\n、\n、\r
@return: {‘key’:{'value':value,'desc':desc}}
"""
content = content.replace('\r\n', '\n')
lines = content.split('\n')
result_dict = {}
tmp_key_desc=''
for i,line in enumerate(lines):
if not line.startswith('#') and not line.startswith('//') and '=' in line:
tmp_line = line.split('=')
result_dict.update({tmp_line[0].strip(): {'value':tmp_line[1].strip(),'desc':tmp_key_desc}})
tmp_key_desc=''
else:
tmp_key_desc+=line
if not i==len(lines)-1:
tmp_key_desc+=result_enter_type
return result_dict
@classmethod
def dictToContent(cls, content_dict:dict,result_enter_type:str='\r\n'):
"""
将def contentToDict(cls, content: str,result_enter_type:str='\r\n')返回的结果拼接为content
key和value使用=拼接
@param content_dict:
@param result_enter_type:存储的换行类型,包括\r\n、\n、\r
@return:
"""
result_content = ''
for key in content_dict.keys():
result_content += content_dict[key]['desc']
result_content += key
result_content += '='
result_content += content_dict[key]['value']
result_content += result_enter_type
return result_content | 0 | 8,266 | 23 |
d9d38eaef0fbf15713eece46cb6a5a87cab7278c | 542 | py | Python | examples/module_04_measure/numba/classes.py | DSE512/twelve | 89ced1db394e5689c617edb4c819aec4138c48c3 | [
"BSD-3-Clause"
] | 3 | 2021-02-09T15:31:53.000Z | 2021-10-31T15:46:51.000Z | examples/module_04_measure/numba/classes.py | yngtodd/twelve | 89ced1db394e5689c617edb4c819aec4138c48c3 | [
"BSD-3-Clause"
] | null | null | null | examples/module_04_measure/numba/classes.py | yngtodd/twelve | 89ced1db394e5689c617edb4c819aec4138c48c3 | [
"BSD-3-Clause"
] | 1 | 2021-12-16T15:33:50.000Z | 2021-12-16T15:33:50.000Z | import numpy as np
from numba import jitclass
from numba import int32, float32
spec = [
('value', int32),
('array', float32[:]),
]
@jitclass(spec)
| 17.483871 | 54 | 0.586716 | import numpy as np
from numba import jitclass
from numba import int32, float32
spec = [
('value', int32),
('array', float32[:]),
]
@jitclass(spec)
class Bag(object):
def __init__(self, value):
self.value = value
self.array = np.zeros(value, dtype=np.float32)
@property
def size(self):
return self.array.size
def increment(self, val):
for i in range(self.size):
self.array[i] += val
return self.array
@staticmethod
def add(x, y):
return x + y
| 224 | 136 | 22 |
c3231233f49b4ef3156e57d32a3fe8fabf402cba | 5,710 | py | Python | quiz/lifeline.py | grombk/millionaire_quiz_terminal | 28ede244acfd50a9443825a074dd40606a9578fc | [
"CC0-1.0"
] | null | null | null | quiz/lifeline.py | grombk/millionaire_quiz_terminal | 28ede244acfd50a9443825a074dd40606a9578fc | [
"CC0-1.0"
] | 2 | 2022-01-25T10:57:58.000Z | 2022-01-27T11:13:55.000Z | quiz/lifeline.py | grombk/millionaire_quiz_terminal | 28ede244acfd50a9443825a074dd40606a9578fc | [
"CC0-1.0"
] | null | null | null | import random
| 60.105263 | 381 | 0.68704 | import random
class Lifeline:
fifty_fifty_ready = True
ask_the_audience_ready = True
skip_question_ready = True
def get_correct_letter_answer(self, question, correct_answer):
from .question_bank import QuestionBank
answers_to_question = QuestionBank.questions_answers[question]
correct_question = ""
if correct_answer == "A":
correct_question += answers_to_question[0]
elif correct_answer == "B":
correct_question += answers_to_question[1]
elif correct_answer == "C":
correct_question += answers_to_question[2]
elif correct_answer == "D":
correct_question += answers_to_question[3]
return correct_question
def get_wrong_letter_answer(self, question, random_one_wrong):
from .question_bank import QuestionBank
answers_to_question = QuestionBank.questions_answers[question]
letter_answer = ""
if random_one_wrong == answers_to_question[0]:
letter_answer += "A"
elif random_one_wrong == answers_to_question[1]:
letter_answer += "B"
elif random_one_wrong == answers_to_question[2]:
letter_answer += "C"
elif random_one_wrong == answers_to_question[3]:
letter_answer += "D"
return letter_answer
def fifty_fifty(self, question, correct_answer):
from .question_bank import QuestionBank
if self.fifty_fifty_ready:
print("\nYou've selected 50/50 - Computer, please take away two random wrong answers!")
answer_list = QuestionBank.questions_answers[question]
correct_question = self.get_correct_letter_answer(question, correct_answer)
random_one_wrong = random.choice(answer_list)
if random_one_wrong == correct_question:
random_one_wrong = random.choice(answer_list)
wrong_letter_answer = self.get_wrong_letter_answer(question, random_one_wrong)
self.fifty_fifty_ready = False
if correct_answer < wrong_letter_answer:
return "{correct_answer}: {correct_question} {wrong_letter_answer}: {random_one_wrong}\n".format(correct_question=correct_question, correct_answer=correct_answer, wrong_letter_answer=wrong_letter_answer, random_one_wrong=random_one_wrong)
else:
return "{wrong_letter_answer}: {random_one_wrong} {correct_answer}: {correct_question}\n".format(correct_question=correct_question, correct_answer=correct_answer, wrong_letter_answer=wrong_letter_answer, random_one_wrong=random_one_wrong)
else:
return "\n=== You've already used your 50/50 lifeline! ===\n"
def ask_the_audience(self, question, correct_answer):
from .question_bank import QuestionBank
if self.ask_the_audience_ready:
print("\nYou've selected Ask the Audience - Audience, please choose A, B, C or D.\n")
answer_list = QuestionBank.questions_answers[question]
percentage_correct = random.randint(55, 96)
first_perc_wrong = random.randint(1, (100 - percentage_correct))
second_perc_wrong = random.randint(1, (100 - percentage_correct - first_perc_wrong))
third_perc_wrong = 100 - (percentage_correct + first_perc_wrong + second_perc_wrong)
self.ask_the_audience_ready = False
# Could I create one string variable here and then insert where the correct percentage should go? This would involve shifting the wrong perc around
if correct_answer == "A":
return "=== A: {answer_list[0]} ({percentage_correct}%) B: {answer_list[1]} ({first_perc_wrong}%) C: {answer_list[2]} ({second_perc_wrong}%) D: {answer_list[3]} ({third_perc_wrong}%) ===\n".format(answer_list=answer_list, percentage_correct=percentage_correct, first_perc_wrong=first_perc_wrong, second_perc_wrong=second_perc_wrong, third_perc_wrong=third_perc_wrong)
elif correct_answer == "B":
return "=== A: {answer_list[0]} ({first_perc_wrong}%) B: {answer_list[1]} ({percentage_correct}%) C: {answer_list[2]} ({second_perc_wrong}%) D: {answer_list[3]} ({third_perc_wrong}%) ===\n".format(answer_list=answer_list, percentage_correct=percentage_correct, first_perc_wrong=first_perc_wrong, second_perc_wrong=second_perc_wrong, third_perc_wrong=third_perc_wrong)
elif correct_answer == "C":
return "=== A: {answer_list[0]} ({first_perc_wrong}%) B: {answer_list[1]} ({second_perc_wrong}%) C: {answer_list[2]} ({percentage_correct}%) D: {answer_list[3]} ({third_perc_wrong}%) ===\n".format(answer_list=answer_list, percentage_correct=percentage_correct, first_perc_wrong=first_perc_wrong, second_perc_wrong=second_perc_wrong, third_perc_wrong=third_perc_wrong)
elif correct_answer == "D":
return "=== A: {answer_list[0]} ({first_perc_wrong}%) B: {answer_list[1]} ({second_perc_wrong}%) C: {answer_list[2]} ({third_perc_wrong}%) D: {answer_list[3]} ({percentage_correct}%) ===\n".format(answer_list=answer_list, percentage_correct=percentage_correct, first_perc_wrong=first_perc_wrong, second_perc_wrong=second_perc_wrong, third_perc_wrong=third_perc_wrong)
else:
return "\n=== You've already used your Ask the Audience lifeline! ===\n"
def skip_question(self):
if self.skip_question_ready:
print("\nYou've selected Skip Question - Let's move on to the next one!")
self.skip_question_ready = False
else:
return "\n=== You've already used your Skip Question lifeline! ===\n"
| 5,434 | 223 | 23 |
2b655300aa05d17138b9baf22a3ffe07bee5fe1c | 10,053 | py | Python | Patrons_v1tov2.py | bulib/alma_patrons_loader | 7b9881cf303a62db42af86fef855d30e0f78ac8d | [
"MIT"
] | null | null | null | Patrons_v1tov2.py | bulib/alma_patrons_loader | 7b9881cf303a62db42af86fef855d30e0f78ac8d | [
"MIT"
] | null | null | null | Patrons_v1tov2.py | bulib/alma_patrons_loader | 7b9881cf303a62db42af86fef855d30e0f78ac8d | [
"MIT"
] | null | null | null | """
Patrons file incoming from IS&T in a version 1 schema to a version 2 schema
written by J Ammerman [jwacooks] (2015-10-09)
edited by A Sawyer [atla5] (2019-09-04)
"""
# coding: utf-8
# requires python 3.x
# load required modules
import codecs
import os
import xml.etree.ElementTree as ET
import glob
from zipfile import ZipFile
from xml.dom import minidom
import csv
# variables
DEFAULT_XML_ENCODING = "Windows-1252" # should be encoded in the first line of the xml
EXTRANEOUS_XML_LINE = 'xmlns:use="http://com/exlibris/digitool/repository/extsystem/xmlbeans" xsi:schemaLocation="http://com/exlibris/digitool/repository/extsystem/xmlbeans user_012513.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
SYM_BEL = '\u0007' # https://unicode.org/cldr/utility/character.jsp?a=0007
SYM_SYN = '\u0016' # https://unicode.org/cldr/utility/character.jsp?a=0016
SYM_SUB = '\u001a' # https://unicode.org/cldr/utility/character.jsp?a=001a
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
if __name__ == "__main__":
#os.chdir('/Volumes/jwa_drive1/git/patrons')
file_list = glob.glob('patrons*.xml')
"""get the list of user group codes and descriptions to read into a to enhance the records with the description"""
reader = csv.DictReader(open('user_groups.csv'))
user_groups = {}
for row in reader:
key = row.pop('Code')
if key in user_groups:
# implement your duplicate row handling here
pass
user_groups[key] = row['Description']
for f in file_list:
# create an empty file to write to
out_file = codecs.open('prep_' + f[len("patrons_"):], 'w', 'utf-8')
users = ET.Element('users')
xml_str = codecs.open(f, 'rb', DEFAULT_XML_ENCODING).read()
xml_str = xml_str.replace(SYM_BEL, '').replace(SYM_SUB, '').replace(SYM_SYN, '')
xml_str = xml_str.replace('use:', '').replace(EXTRANEOUS_XML_LINE, '')
root = ET.fromstring(xml_str)
for child in root:
user = ET.SubElement(users, 'user')
add_user_details(child, user)
#add_notes(child,user)
add_identifiers(child, user)
add_contacts(child, user)
out_file.write(prettify(users))
out_file.close()
file_list = glob.glob('prep*.xml')
with ZipFile('patrons.zip', 'a') as myzip:
for f in file_list:
myzip.write(f)
myzip.close()
| 41.8875 | 244 | 0.499154 | """
Patrons file incoming from IS&T in a version 1 schema to a version 2 schema
written by J Ammerman [jwacooks] (2015-10-09)
edited by A Sawyer [atla5] (2019-09-04)
"""
# coding: utf-8
# requires python 3.x
# load required modules
import codecs
import os
import xml.etree.ElementTree as ET
import glob
from zipfile import ZipFile
from xml.dom import minidom
import csv
# variables
DEFAULT_XML_ENCODING = "Windows-1252" # should be encoded in the first line of the xml
EXTRANEOUS_XML_LINE = 'xmlns:use="http://com/exlibris/digitool/repository/extsystem/xmlbeans" xsi:schemaLocation="http://com/exlibris/digitool/repository/extsystem/xmlbeans user_012513.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
SYM_BEL = '\u0007' # https://unicode.org/cldr/utility/character.jsp?a=0007
SYM_SYN = '\u0016' # https://unicode.org/cldr/utility/character.jsp?a=0016
SYM_SUB = '\u001a' # https://unicode.org/cldr/utility/character.jsp?a=001a
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def add_user_details(u, user):
u_dict = {}
u_dict['recordType'] = 'record_type'
u_dict['userName'] = 'primary_id'
u_dict['firstName'] = 'first_name'
u_dict['middleName'] = 'middle_name'
u_dict['lastName'] = 'last_name'
u_dict['userGroup'] = 'user_group'
u_dict['campusCode'] = 'campus_code'
u_dict['expiryDate'] = 'expiry_date'
u_dict['purgeDate'] = 'purge_date'
u_dict['userType'] = 'account_type'
u_dict['userTitle'] = 'user_title'
u_dict['defaultLanguage'] = 'preferred_language'
full_name = ET.SubElement(user,'full_name')
fname = mname = lname = '' # initialize each name part to empty string
for i in u.findall('userDetails'):
for d in i:
if d.tag == 'firstName':
fname = d.text if d.text and d.text is not None else ''
if d.tag == 'middleName':
mname = ' '+d.text+' ' if d.text and d.text is not None else ' '
if d.tag == 'lastName':
lname = d.text + ' ' if d.text and d.text is not None else ''
if d.tag in u_dict:
d.tag = u_dict[d.tag]
if d.tag == 'record_type':
d.text = 'PUBLIC'
d.set('disc','Public')
if d.tag == 'status':
d.text = d.text.upper()
d.set('disc',d.text.title())
if d.tag == 'expiry_date' or d.tag == 'purge_date':
date = d.text
d.text = '{}-{}-{}Z'.format(date[:4], date[4:6], date[-2:])
e = ET.SubElement(user, d.tag)
e.text = d.text
if e.tag == 'user_group':
e.set('desc', user_groups[e.text])
name = fname + mname + lname
full_name.text = name
def add_notes(u, user):
for i in u.findall('userNoteList'):
for d in i:
e = ET.SubElement(user, d.tag)
e.text = d.text
def add_identifiers(u, user):
for i in u.findall('userIdentifiers'):
UIs = ET.SubElement(user, 'user_identifiers')
for d in i:
e = ET.SubElement(UIs, 'user_identifier')
for child in d:
f = ET.SubElement(e,child.tag.replace('type', 'id_type'))
f.text = child.text
def add_contacts(u, user):
u_dict = {}
u_dict['stateProvince'] = 'state_province'
u_dict['addressNote'] = 'address_note'
u_dict['postalCode'] = 'postal_code'
u_dict['startDate'] = 'start_date'
u_dict['endDate'] = 'end_date'
u_dict['phone'] = 'phone_number'
u_dict['email'] = 'email_address'
for i in u.findall('userAddressList'):
contact_info = ET.SubElement(user,'contact_info')
addresses = ET.SubElement(contact_info,'addresses')
emails = ET.SubElement(contact_info,'emails')
phones = ET.SubElement(contact_info,'phones')
for d in i:
if d.tag == 'userAddress':
address = ET.SubElement(addresses,'address')
address.set('segment_type','External')
address.set('preferred','true')
for child in d:
if child.tag == 'segmentAction':
pass
elif child.tag == 'types':
address_types = ET.SubElement(address,'address_types')
for x in child.findall('userAddressTypes'):
address_type = ET.SubElement(address_types,'address_type')
address_type.text = x.text
if x.text == 'work':
address_type.set('desc', 'Work')
if x.text == 'home':
address_type.set('desc', 'Home')
if x.text == 'school':
address_type.set('desc', 'School')
else:
if child.tag in u_dict:
child.tag = u_dict[child.tag]
f = ET.SubElement(address,child.tag)
f.text = child.text
if f.tag == 'line1' and f.text is None:
addresses.remove(address)
break
if d.tag == 'userPhone':
phone = ET.SubElement(phones,'phone')
phone.set('segment_type','External')
phone.set('preferred','true')
#phone.set('preferredSMS', 'false')
for child in d:
if child.tag == 'segmentAction':
pass
elif child.tag == 'types':
phone_types = ET.SubElement(phone,'phone_types')
for x in child.findall('userPhoneTypes'):
phone_type = ET.SubElement(phone_types,'phone_type')
phone_type.text = x.text
if x.text == 'office':
phone_type.set('desc', 'Office')
if x.text == 'work':
phone_type.set('desc', 'Work')
if x.text == 'home':
phone_type.set('desc', 'Home')
if x.text == 'school':
phone_type.set('desc', 'School')
else:
if child.tag in u_dict:
child.tag = u_dict[child.tag]
f = ET.SubElement(phone,child.tag)
f.text = child.text
if f.text is None:
phones.remove(phone)
#print('No Phone')
if d.tag == 'userEmail':
pass
email = ET.SubElement(emails, 'email')
email.set('segment_type', 'External')
email.set('preferred', 'true')
for child in d:
if child.tag == 'segmentAction':
pass
elif child.tag == 'types':
email_types = ET.SubElement(email, 'email_types')
for x in child.findall('userEmailTypes'):
email_type = ET.SubElement(email_types, 'email_type')
email_type.text = x.text
if x.text == 'office':
email_type.set('desc', 'Office')
if x.text == 'work':
email_type.set('desc', 'Work')
if x.text == 'home':
email_type.set('desc', 'Home')
if x.text == 'school':
email_type.set('desc', 'School')
else:
if child.tag in u_dict:
child.tag = u_dict[child.tag]
f = ET.SubElement(email,child.tag)
f.text = child.text
if f.text is None:
p_id = user.find('primary_id')
f.text = p_id.text+'@bu.edu'
#print(p_id.text+'@bu.edu')
if __name__ == "__main__":
#os.chdir('/Volumes/jwa_drive1/git/patrons')
file_list = glob.glob('patrons*.xml')
"""get the list of user group codes and descriptions to read into a to enhance the records with the description"""
reader = csv.DictReader(open('user_groups.csv'))
user_groups = {}
for row in reader:
key = row.pop('Code')
if key in user_groups:
# implement your duplicate row handling here
pass
user_groups[key] = row['Description']
for f in file_list:
# create an empty file to write to
out_file = codecs.open('prep_' + f[len("patrons_"):], 'w', 'utf-8')
users = ET.Element('users')
xml_str = codecs.open(f, 'rb', DEFAULT_XML_ENCODING).read()
xml_str = xml_str.replace(SYM_BEL, '').replace(SYM_SUB, '').replace(SYM_SYN, '')
xml_str = xml_str.replace('use:', '').replace(EXTRANEOUS_XML_LINE, '')
root = ET.fromstring(xml_str)
for child in root:
user = ET.SubElement(users, 'user')
add_user_details(child, user)
#add_notes(child,user)
add_identifiers(child, user)
add_contacts(child, user)
out_file.write(prettify(users))
out_file.close()
file_list = glob.glob('prep*.xml')
with ZipFile('patrons.zip', 'a') as myzip:
for f in file_list:
myzip.write(f)
myzip.close()
| 7,311 | 0 | 102 |
2b93805938419a556a56e4fdc6969d33a599b84a | 4,430 | py | Python | extract_image_features/VGG19_FC_extract_single_vids.py | schen496/auditory-hallucinations | 31b89df838a9f3c4558c7c3b69dbcd43c7f9de19 | [
"Apache-2.0"
] | 4 | 2018-05-05T10:10:35.000Z | 2021-01-20T22:27:05.000Z | extract_image_features/VGG19_FC_extract_single_vids.py | schen496/auditory-hallucinations | 31b89df838a9f3c4558c7c3b69dbcd43c7f9de19 | [
"Apache-2.0"
] | null | null | null | extract_image_features/VGG19_FC_extract_single_vids.py | schen496/auditory-hallucinations | 31b89df838a9f3c4558c7c3b69dbcd43c7f9de19 | [
"Apache-2.0"
] | 2 | 2018-08-10T02:45:28.000Z | 2018-12-10T05:38:41.000Z | from extract_image_features.video_utils import *
import numpy as np
from extract_image_features.keras_pretrained_models.imagenet_utils import preprocess_input
from keras.models import Model
from keras.preprocessing import image
from extract_image_features.keras_pretrained_models.vgg19 import VGG19
# file saving and loading destinations change whether you are working on laptop or desktop
USE_TITANX = True
### CHANGE THE FILE TO BE READ HERE!!!!
######## LOADING VIDEO FILENAMES
print ("--- Loading video and audio filenames...")
if USE_TITANX:
video_dir = '/home/zanoi/ZANOI/auditory_hallucination_videos'
else: # Working on MacBook Pro
video_dir = "/Volumes/SAMSUNG_SSD_256GB/ADV_CV/2-25_VIDAUD/EXPORTS"
video_files = [os.path.join(video_dir, file_i)
for file_i in os.listdir(video_dir)
if file_i.endswith('.mp4')]
num_videos = len(video_files)
print("num_videos: ", num_videos)
######## LOADING AUDIO FILENAMES
audio_feature_dir = "../audio_vectors"
audio_f_files = [os.path.join(audio_feature_dir, file_i)
for file_i in os.listdir(audio_feature_dir)
if file_i.endswith('.mat')]
num_audio_f = len(audio_f_files)
print (audio_f_files)
print("num_audio_f: ", num_audio_f)
for audio_idx in range(num_audio_f): # Loop over all audio files
audio_prefix, audio_vector_length, audio_features = returnAudioVectors(audio_idx, audio_f_files)
# Find all the linked videos for the given audio vector
linked_video_f = findMatchingVideos(audio_prefix, video_files)
print(audio_f_files[audio_idx])
print(linked_video_f)
for video_filename in linked_video_f:
# Return the angle_name to name the file correctly
angle_name = returnAngleName(video_filename)
print ("angle_name:", angle_name)
# Process the videos linked to a particular audio vector
######## PROCESS VIDEO TO BLACK AND WHITE
print("--- Processing video to greyscale...")
processed_video = processOneVideo(audio_vector_length, video_filename, normalize=False)
print("processed_video.shape:", processed_video.shape)
######### CONCATENATE INTO SPACETIME IMAGE
print ("--- Concatenating into Spacetime image...")
window = 3
space_time_image = createSpaceTimeImagesforOneVideo(processed_video,window) # (1, 8377, 224, 224, 3)
print ("space_time_image.shape:", space_time_image.shape)
########## RUN THE SPACETIME IMAGES THROUGH VGG19
print ("--- Running through VGG19 FC2 layer...")
# Build the model
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input, output=base_model.get_layer('fc1').output) # Only take the FC2 layer output
# Preallocate matrix output
(num_frames, frame_h, frame_w, channels) = space_time_image.shape
CNN_FC_output = np.zeros((num_frames,1,4096)) # (1,8377,1,4096) -> FC2 outputs dimensions (1,4096)
for frame_num in tqdm(range(num_frames)):
img = space_time_image[frame_num]
x = np.expand_dims(img, axis=0)
x = preprocess_input(x)
fc2_features = model.predict(x) # Predict the FC2 features from VGG19, output shape is (1,4096)
CNN_FC_output[frame_num] = fc2_features # Save the FC2 features to a matrix
print("CNN_FC_output.shape:", CNN_FC_output.shape) # (1,8377,1,4096)
########### CREATE FINAL DATASET, concatenate FC output with audio vectors
# Normalization of the audio_vectors occurs in this function -> Hanoi forgot to normalize in MATLAB!!!!
final_audio_vector = createAudioVectorDatasetForOneVid(audio_features, space_time_image.shape) #(8377, 18)
print ("final_audio_vector.shape:", final_audio_vector.shape)
############ PACKAGE AND SAVE THE DATASET
if USE_TITANX:
data_extern_dest = '/home/zanoi/ZANOI/auditory_hallucinations_data/FC_2_data/'
else: # Working on MacBook Pro
data_extern_dest = '/Volumes/SAMSUNG_SSD_256GB/ADV_CV/data/'
file_name = data_extern_dest + audio_prefix + angle_name + '_dataX_dataY.h5'
with h5py.File(file_name, 'w') as hf:
print ("Writing data to file...")
hf.create_dataset('dataX', data=CNN_FC_output)
hf.create_dataset('dataY', data=final_audio_vector)
print ("--- {EVERYTHING COMPLETE HOMIEEEEEEEEE} ---") | 44.3 | 122 | 0.697517 | from extract_image_features.video_utils import *
import numpy as np
from extract_image_features.keras_pretrained_models.imagenet_utils import preprocess_input
from keras.models import Model
from keras.preprocessing import image
from extract_image_features.keras_pretrained_models.vgg19 import VGG19
# file saving and loading destinations change whether you are working on laptop or desktop
USE_TITANX = True
### CHANGE THE FILE TO BE READ HERE!!!!
######## LOADING VIDEO FILENAMES
print ("--- Loading video and audio filenames...")
if USE_TITANX:
video_dir = '/home/zanoi/ZANOI/auditory_hallucination_videos'
else: # Working on MacBook Pro
video_dir = "/Volumes/SAMSUNG_SSD_256GB/ADV_CV/2-25_VIDAUD/EXPORTS"
video_files = [os.path.join(video_dir, file_i)
for file_i in os.listdir(video_dir)
if file_i.endswith('.mp4')]
num_videos = len(video_files)
print("num_videos: ", num_videos)
######## LOADING AUDIO FILENAMES
audio_feature_dir = "../audio_vectors"
audio_f_files = [os.path.join(audio_feature_dir, file_i)
for file_i in os.listdir(audio_feature_dir)
if file_i.endswith('.mat')]
num_audio_f = len(audio_f_files)
print (audio_f_files)
print("num_audio_f: ", num_audio_f)
for audio_idx in range(num_audio_f): # Loop over all audio files
audio_prefix, audio_vector_length, audio_features = returnAudioVectors(audio_idx, audio_f_files)
# Find all the linked videos for the given audio vector
linked_video_f = findMatchingVideos(audio_prefix, video_files)
print(audio_f_files[audio_idx])
print(linked_video_f)
for video_filename in linked_video_f:
# Return the angle_name to name the file correctly
angle_name = returnAngleName(video_filename)
print ("angle_name:", angle_name)
# Process the videos linked to a particular audio vector
######## PROCESS VIDEO TO BLACK AND WHITE
print("--- Processing video to greyscale...")
processed_video = processOneVideo(audio_vector_length, video_filename, normalize=False)
print("processed_video.shape:", processed_video.shape)
######### CONCATENATE INTO SPACETIME IMAGE
print ("--- Concatenating into Spacetime image...")
window = 3
space_time_image = createSpaceTimeImagesforOneVideo(processed_video,window) # (1, 8377, 224, 224, 3)
print ("space_time_image.shape:", space_time_image.shape)
########## RUN THE SPACETIME IMAGES THROUGH VGG19
print ("--- Running through VGG19 FC2 layer...")
# Build the model
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input, output=base_model.get_layer('fc1').output) # Only take the FC2 layer output
# Preallocate matrix output
(num_frames, frame_h, frame_w, channels) = space_time_image.shape
CNN_FC_output = np.zeros((num_frames,1,4096)) # (1,8377,1,4096) -> FC2 outputs dimensions (1,4096)
for frame_num in tqdm(range(num_frames)):
img = space_time_image[frame_num]
x = np.expand_dims(img, axis=0)
x = preprocess_input(x)
fc2_features = model.predict(x) # Predict the FC2 features from VGG19, output shape is (1,4096)
CNN_FC_output[frame_num] = fc2_features # Save the FC2 features to a matrix
print("CNN_FC_output.shape:", CNN_FC_output.shape) # (1,8377,1,4096)
########### CREATE FINAL DATASET, concatenate FC output with audio vectors
# Normalization of the audio_vectors occurs in this function -> Hanoi forgot to normalize in MATLAB!!!!
final_audio_vector = createAudioVectorDatasetForOneVid(audio_features, space_time_image.shape) #(8377, 18)
print ("final_audio_vector.shape:", final_audio_vector.shape)
############ PACKAGE AND SAVE THE DATASET
if USE_TITANX:
data_extern_dest = '/home/zanoi/ZANOI/auditory_hallucinations_data/FC_2_data/'
else: # Working on MacBook Pro
data_extern_dest = '/Volumes/SAMSUNG_SSD_256GB/ADV_CV/data/'
file_name = data_extern_dest + audio_prefix + angle_name + '_dataX_dataY.h5'
with h5py.File(file_name, 'w') as hf:
print ("Writing data to file...")
hf.create_dataset('dataX', data=CNN_FC_output)
hf.create_dataset('dataY', data=final_audio_vector)
print ("--- {EVERYTHING COMPLETE HOMIEEEEEEEEE} ---") | 0 | 0 | 0 |
0dbaa11ed5859d73993510b1194d56c10c209dd7 | 2,392 | py | Python | backend/database.py | Nnadozie/Undergraduate-Project-2020 | 5abc610ed744c2079aadea15fb63a1ea7a8a4a41 | [
"MIT"
] | null | null | null | backend/database.py | Nnadozie/Undergraduate-Project-2020 | 5abc610ed744c2079aadea15fb63a1ea7a8a4a41 | [
"MIT"
] | null | null | null | backend/database.py | Nnadozie/Undergraduate-Project-2020 | 5abc610ed744c2079aadea15fb63a1ea7a8a4a41 | [
"MIT"
] | 1 | 2020-11-12T19:31:55.000Z | 2020-11-12T19:31:55.000Z | import sqlite3 | 36.8 | 125 | 0.641304 | import sqlite3
class Database:
def __init__(self, db):
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.cur.execute("CREATE TABLE IF NOT EXISTS fruits (id INTEGER PRIMARY KEY, product_name text, demand INTEGER)")
self.cur.execute("CREATE TABLE IF NOT EXISTS cerials (id INTEGER PRIMARY KEY, product_name text, demand INTEGER)")
self.cur.execute("CREATE TABLE IF NOT EXISTS vegetables (id INTEGER PRIMARY KEY, product_name text, demand INTEGER)")
self.conn.commit()
def insert_f(self, product_name, demand):
self.cur.execute("INSERT INTO fruits VALUES (NULL,?,?)",(product_name,demand))
self.conn.commit()
def insert_c(self, product_name, demand):
self.cur.execute("INSERT INTO cerials VALUES (NULL,?,?)",(product_name,demand))
self.conn.commit()
def insert_v(self, product_name, demand):
self.cur.execute("INSERT INTO vegetables VALUES (NULL,?,?)",(product_name,demand))
self.conn.commit()
def view_f(self):
self.cur.execute("SELECT * FROM fruits")
rows = self.cur.fetchall()
return rows
def view_c(self):
self.cur.execute("SELECT * FROM cerials")
rows = self.cur.fetchall()
return rows
def view_v(self):
self.cur.execute("SELECT * FROM vegetables")
rows = self.cur.fetchall()
return rows
def delete_f(self, id):
self.cur.execute("DELETE FROM fruits WHERE id=?",(id,))
self.conn.commit()
def delete_c(self, id):
self.cur.execute("DELETE FROM cerials WHERE id=?",(id,))
self.conn.commit()
def delete_v(self, id):
self.cur.execute("DELETE FROM vegetables WHERE id=?",(id,))
self.conn.commit()
def update_f(self, id, product_name, demand):
self.cur.execute("UPDATE fruits SET product_name=?, demand=? WHERE id=?", (product_name,demand,id))
self.conn.commit()
def update_c(self, id, product_name, demand):
self.cur.execute("UPDATE cerials SET product_name=?, demand=? WHERE id=?", (product_name,demand,id))
self.conn.commit()
def update_v(self, id, product_name, demand):
self.cur.execute("UPDATE vegetables SET product_name=?, demand=? WHERE id=?", (product_name,demand,id))
self.conn.commit()
def __del__(self):
self.conn.close() | 1,979 | -6 | 405 |
cbd19e46dc730e1067925df857b34d00b8de51bc | 715 | py | Python | 82-remove-duplicates-from-sorted-list-ii/82-remove-duplicates-from-sorted-list-ii.py | MayaScarlet/leetcode-python | 8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a | [
"MIT"
] | null | null | null | 82-remove-duplicates-from-sorted-list-ii/82-remove-duplicates-from-sorted-list-ii.py | MayaScarlet/leetcode-python | 8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a | [
"MIT"
] | null | null | null | 82-remove-duplicates-from-sorted-list-ii/82-remove-duplicates-from-sorted-list-ii.py | MayaScarlet/leetcode-python | 8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next | 34.047619 | 79 | 0.517483 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = ListNode(0, head)
prev = dummy
curr = head
while curr and curr.next:
if curr.val == curr.next.val:
while curr and curr.next and curr.val == curr.next.val:
curr = curr.next
curr = curr.next
prev.next = curr
else:
prev.next = curr
prev = prev.next
curr = curr.next
return dummy.next | 523 | -6 | 48 |
2ac6aab29a56292d86d7087e633f8d550e9012d1 | 224 | py | Python | homeassistant/components/command_line/const.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/command_line/const.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 383 | 2020-03-06T13:01:14.000Z | 2022-03-11T13:14:13.000Z | homeassistant/components/command_line/const.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Allows to configure custom shell commands to turn a value for a sensor."""
CONF_COMMAND_TIMEOUT = "command_timeout"
DEFAULT_TIMEOUT = 15
DOMAIN = "command_line"
PLATFORMS = ["binary_sensor", "cover", "sensor", "switch"]
| 32 | 77 | 0.745536 | """Allows to configure custom shell commands to turn a value for a sensor."""
CONF_COMMAND_TIMEOUT = "command_timeout"
DEFAULT_TIMEOUT = 15
DOMAIN = "command_line"
PLATFORMS = ["binary_sensor", "cover", "sensor", "switch"]
| 0 | 0 | 0 |
855440e9643f0eb06772ef2973ca69d4c22c006a | 1,437 | py | Python | wenku8collector/util.py | lightyears1998/wenku8-collector | 4e167581aea77ef9f8650a1c30abb2bc43407ae0 | [
"Unlicense"
] | null | null | null | wenku8collector/util.py | lightyears1998/wenku8-collector | 4e167581aea77ef9f8650a1c30abb2bc43407ae0 | [
"Unlicense"
] | null | null | null | wenku8collector/util.py | lightyears1998/wenku8-collector | 4e167581aea77ef9f8650a1c30abb2bc43407ae0 | [
"Unlicense"
] | null | null | null | import os
import sys
import hashlib
| 24.775862 | 76 | 0.682672 | import os
import sys
import hashlib
def prepare_catalog_url(url: str):
return url.strip()
def prepare_chapter_url(catalog_url, chapter_url: str):
from urllib.parse import urljoin
return urljoin(catalog_url, chapter_url)
def normalize_filename(filename: str, scheme: str) -> str:
scheme_suffix = {
'yaml': 'yml',
'markdown': 'md',
'pandoc-markdown': 'md'
}
suffix = scheme_suffix[scheme]
return filename if filename.endswith(suffix) else f'{filename}.{suffix}'
def exit_when_file_exists(output_dir):
if os.path.exists(output_dir):
print(f"{output_dir}文件已存在。如需更新文件请使用--override参数。")
sys.exit(1)
def make_output_dir(output_dir):
try:
os.makedirs(output_dir)
except FileExistsError:
pass
def count_volumes_and_chapters(novel):
volume_count, chapter_count = 0, 0
for volume in novel['volumes']:
volume_count = volume_count + 1
chapter_count = chapter_count + len(volume['chapters'])
return volume_count, chapter_count
def get_sha256_hash(stuff: str) -> str:
sha256_hash = hashlib.sha256()
sha256_hash.update(stuff.encode(encoding='utf8'))
return sha256_hash.hexdigest()
def get_local_image_filename(image_url: str) -> str:
dot_pos = image_url.rfind('.')
suffix = ''
if dot_pos > 0:
suffix = image_url[dot_pos:]
return 'images/' + get_sha256_hash(image_url) + suffix
| 1,245 | 0 | 184 |
95b7ba2937cd3ef14ebddac91072193e0dc872dc | 625 | py | Python | common/data_refinery_common/migrations/0056_auto_20200529_1230.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 106 | 2018-03-05T16:24:47.000Z | 2022-03-19T19:12:25.000Z | common/data_refinery_common/migrations/0056_auto_20200529_1230.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 1,494 | 2018-02-27T17:02:21.000Z | 2022-03-24T15:10:30.000Z | common/data_refinery_common/migrations/0056_auto_20200529_1230.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 15 | 2019-02-03T01:34:59.000Z | 2022-03-29T01:59:13.000Z | # Generated by Django 2.2.10 on 2020-05-29 12:30
from django.db import migrations, models
| 27.173913 | 79 | 0.6368 | # Generated by Django 2.2.10 on 2020-05-29 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0055_auto_20200528_1946"),
]
operations = [
migrations.AlterModelOptions(
name="datasetannotation", options={"base_manager_name": "objects"},
),
migrations.AlterModelManagers(name="datasetannotation", managers=[],),
migrations.AlterField(
model_name="datasetannotation",
name="is_public",
field=models.BooleanField(default=False),
),
]
| 0 | 510 | 23 |
2a61b2598599a83af65a14cb78ea99ba1dc3b505 | 543 | py | Python | test_nsmc.py | monologg/KoELECTRA-Pipeline | 65f465419d0fffcac2c8df709dc57bf671dc39cd | [
"Apache-2.0"
] | 38 | 2020-05-13T09:34:46.000Z | 2022-01-11T09:04:28.000Z | test_nsmc.py | odus05/KoELECTRA-Pipeline | 65f465419d0fffcac2c8df709dc57bf671dc39cd | [
"Apache-2.0"
] | 2 | 2020-05-14T02:14:43.000Z | 2020-09-20T14:30:14.000Z | test_nsmc.py | odus05/KoELECTRA-Pipeline | 65f465419d0fffcac2c8df709dc57bf671dc39cd | [
"Apache-2.0"
] | 6 | 2020-05-25T07:22:05.000Z | 2022-01-06T05:35:24.000Z | from transformers import ElectraTokenizer, ElectraForSequenceClassification, pipeline
from pprint import pprint
tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-finetuned-nsmc")
model = ElectraForSequenceClassification.from_pretrained("monologg/koelectra-small-finetuned-nsmc")
nsmc = pipeline("sentiment-analysis", tokenizer=tokenizer, model=model)
texts = [
"이 영화는 미쳤다. 넷플릭스가 일상화된 시대에 극장이 존재해야하는 이유를 증명해준다.",
"촬영감독의 영혼까지 갈아넣은 마스터피스",
"보면서 화가날수있습니다.",
"아니 그래서 무슨말이 하고싶은거야 ㅋㅋㅋ",
]
pprint(nsmc(texts))
| 31.941176 | 99 | 0.777164 | from transformers import ElectraTokenizer, ElectraForSequenceClassification, pipeline
from pprint import pprint
tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-finetuned-nsmc")
model = ElectraForSequenceClassification.from_pretrained("monologg/koelectra-small-finetuned-nsmc")
nsmc = pipeline("sentiment-analysis", tokenizer=tokenizer, model=model)
texts = [
"이 영화는 미쳤다. 넷플릭스가 일상화된 시대에 극장이 존재해야하는 이유를 증명해준다.",
"촬영감독의 영혼까지 갈아넣은 마스터피스",
"보면서 화가날수있습니다.",
"아니 그래서 무슨말이 하고싶은거야 ㅋㅋㅋ",
]
pprint(nsmc(texts))
| 0 | 0 | 0 |
f99d0196877c84d97b6a4e7ef1eb5f6afaab3c8c | 4,034 | py | Python | experiment_tester.py | GregTheRick/Deep-Learning-for-Fast-Low-Light-Imaging | c2a3f869f9e9a4691900962a0541b41fc17f2f0c | [
"MIT"
] | null | null | null | experiment_tester.py | GregTheRick/Deep-Learning-for-Fast-Low-Light-Imaging | c2a3f869f9e9a4691900962a0541b41fc17f2f0c | [
"MIT"
] | null | null | null | experiment_tester.py | GregTheRick/Deep-Learning-for-Fast-Low-Light-Imaging | c2a3f869f9e9a4691900962a0541b41fc17f2f0c | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
import tensorflow as tf
from SIDLoader import SIDLoader
from ModelBuilder import ModelBuilder
from Experiment import Experiment
import time,datetime,os,glob
path_prefix = '.'
checkpoint_dir = path_prefix+'/chk'
dataset_dir = path_prefix+'/dataset'
black_level = 512
seed = 1337
tensorboard_dir = path_prefix+'/tensorboard/'
#Set initial seed
np.random.seed(seed)
#Load flat matrix
dataset = SIDLoader(dataset_dir, patch_fn=None,keep_raw=False,keep_gt=True, set_id='test')
#Set up experiments
expList = []
expList.append(Experiment(name='Sony',model_fn={'fn':ModelBuilder.build_loadable_cchen},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir='../checkpoint',dataset=dataset))
#expList.append(Experiment(name='cchen_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_noflip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_flip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_self_amp2',model_fn={'fn':ModelBuilder.build_unet_self_scale},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_amp_infer2',model_fn={'fn':ModelBuilder.build_unet_amp_infer},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
epoch = 0
dataset.start()
try:
#test loop
for exp in expList:
exp.create_test_writer()
while(epoch < 1):
#Get batch from batchloader
(x,y,r) = dataset.get_batch()
#start running training step on each GPU
for exp in expList:
exp.test_action(x,y,r)
#Wait for all to finish
for exp in expList:
exp.finish_test_action()
epoch = dataset.readEpoch
if(dataset.readC == 0): #It is the end of the epoch
for exp in expList:
exp.end_of_epoch_test()
except KeyboardInterrupt:
print('Keyboard interrupt accepted')
finally:
print("Stopping dataset")
dataset.stop()
for exp in expList:
exp.model['sess'].close()
| 65.064516 | 220 | 0.789291 | from __future__ import division
import numpy as np
import tensorflow as tf
from SIDLoader import SIDLoader
from ModelBuilder import ModelBuilder
from Experiment import Experiment
import time,datetime,os,glob
path_prefix = '.'
checkpoint_dir = path_prefix+'/chk'
dataset_dir = path_prefix+'/dataset'
black_level = 512
seed = 1337
tensorboard_dir = path_prefix+'/tensorboard/'
#Set initial seed
np.random.seed(seed)
#Load flat matrix
dataset = SIDLoader(dataset_dir, patch_fn=None,keep_raw=False,keep_gt=True, set_id='test')
#Set up experiments
expList = []
expList.append(Experiment(name='Sony',model_fn={'fn':ModelBuilder.build_loadable_cchen},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir='../checkpoint',dataset=dataset))
#expList.append(Experiment(name='cchen_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_noflip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_flip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_self_amp2',model_fn={'fn':ModelBuilder.build_unet_self_scale},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_amp_infer2',model_fn={'fn':ModelBuilder.build_unet_amp_infer},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
epoch = 0
dataset.start()
try:
#test loop
for exp in expList:
exp.create_test_writer()
while(epoch < 1):
#Get batch from batchloader
(x,y,r) = dataset.get_batch()
#start running training step on each GPU
for exp in expList:
exp.test_action(x,y,r)
#Wait for all to finish
for exp in expList:
exp.finish_test_action()
epoch = dataset.readEpoch
if(dataset.readC == 0): #It is the end of the epoch
for exp in expList:
exp.end_of_epoch_test()
except KeyboardInterrupt:
print('Keyboard interrupt accepted')
finally:
print("Stopping dataset")
dataset.stop()
for exp in expList:
exp.model['sess'].close()
| 0 | 0 | 0 |
b16c535a31d4b66caf044c7804a1d432985d6987 | 331 | py | Python | day-05/part2.py | ViljoenJG/aoc-2017 | 771f067c7d4cfb05db6740ce143fde3d275453a8 | [
"MIT"
] | null | null | null | day-05/part2.py | ViljoenJG/aoc-2017 | 771f067c7d4cfb05db6740ce143fde3d275453a8 | [
"MIT"
] | null | null | null | day-05/part2.py | ViljoenJG/aoc-2017 | 771f067c7d4cfb05db6740ce143fde3d275453a8 | [
"MIT"
] | null | null | null | with open('./input.txt') as infile:
jumps = [int(i.rstrip('\n')) for i in infile.readlines()]
steps = 0
idx = 0
while idx < (len(jumps)):
step = jumps[idx]
if step >= 3:
jumps[idx] -= 1
else:
jumps[idx] += 1
idx += step
steps += 1
print(steps)
| 19.470588 | 61 | 0.456193 | with open('./input.txt') as infile:
jumps = [int(i.rstrip('\n')) for i in infile.readlines()]
steps = 0
idx = 0
while idx < (len(jumps)):
step = jumps[idx]
if step >= 3:
jumps[idx] -= 1
else:
jumps[idx] += 1
idx += step
steps += 1
print(steps)
| 0 | 0 | 0 |
4431da9614cf1ad20bd6027a99f9498fc0bb54f3 | 2,179 | py | Python | geneal/applications/template.py | NeveIsa/geneal | 064b0409912088886bf56fe9a729d74dac92a235 | [
"MIT"
] | 47 | 2020-07-10T14:28:52.000Z | 2022-03-25T17:20:52.000Z | geneal/applications/template.py | NeveIsa/geneal | 064b0409912088886bf56fe9a729d74dac92a235 | [
"MIT"
] | 10 | 2020-08-08T16:35:40.000Z | 2022-03-08T00:07:19.000Z | geneal/applications/template.py | NeveIsa/geneal | 064b0409912088886bf56fe9a729d74dac92a235 | [
"MIT"
] | 14 | 2020-08-07T20:49:18.000Z | 2022-03-31T17:55:47.000Z | from geneal.genetic_algorithms import ContinuousGenAlgSolver, BinaryGenAlgSolver
| 38.910714 | 99 | 0.685636 | from geneal.genetic_algorithms import ContinuousGenAlgSolver, BinaryGenAlgSolver
class TemplateChildClass(ContinuousGenAlgSolver, BinaryGenAlgSolver):
def __init__(self, *args, **kwargs):
BinaryGenAlgSolver.__init__(self, *args, **kwargs)
ContinuousGenAlgSolver.__init__(self, *args, **kwargs)
def fitness_function(self, chromosome):
"""
Implements the logic that calculates the fitness
measure of an individual.
:param chromosome: chromosome of genes representing an individual
:return: the fitness of the individual
"""
pass
def initialize_population(self, pop_size, n_genes):
"""
Initializes the population of the problem
:param pop_size: number of individuals in the population
:param n_genes: number of genes representing the problem. In case of the binary
solver, it represents the number of genes times the number of bits per gene
:return: a numpy array with a randomized initialized population
"""
pass
def create_offspring(
self, first_parent, sec_parent, crossover_pt, offspring_number
):
"""
Creates an offspring from 2 parents. It uses the crossover point(s)
to determine how to perform the crossover
:param first_parent: first parent's chromosome
:param sec_parent: second parent's chromosome
:param crossover_pt: point(s) at which to perform the crossover
:param offspring_number: whether it's the first or second offspring from a pair of parents.
Important if there's different logic to be applied to each case.
:return: the resulting offspring.
"""
pass
def mutate_population(self, population, n_mutations):
"""
Mutates the population according to a given user defined rule.
:param population: the population at a given iteration
:param n_mutations: number of mutations to be performed. This number is
calculated according to mutation_rate, but can be adjusted as needed inside this function
:return: the mutated population
"""
pass
| 137 | 1,937 | 23 |
d25e8b5134ec584d57913d2ac0a49ce64bb0e438 | 16,192 | py | Python | trajectories/plot_rnn.py | johannah/trajectories | 282a5bcb5c33e0c75251397f778abac1d5aa1cb6 | [
"MIT"
] | 7 | 2018-07-15T14:17:40.000Z | 2021-05-05T23:46:04.000Z | trajectories/plot_rnn.py | johannah/trajectories | 282a5bcb5c33e0c75251397f778abac1d5aa1cb6 | [
"MIT"
] | null | null | null | trajectories/plot_rnn.py | johannah/trajectories | 282a5bcb5c33e0c75251397f778abac1d5aa1cb6 | [
"MIT"
] | 2 | 2018-07-17T23:20:27.000Z | 2021-05-05T23:46:07.000Z | # from KK
import matplotlib
matplotlib.use('Agg')
from rnn import RNN
from copy import deepcopy
import time
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.clip_grad import clip_grad_norm
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.init as init
from IPython import embed
import shutil
from datasets import EpisodicFroggerDataset, EpisodicDiffFroggerDataset
from collections import OrderedDict
from imageio import imread, imwrite
from glob import glob
from vq_vae_small import AutoEncoder
from conv_vae import Encoder, Decoder, VAE
from utils import discretized_mix_logistic_loss
from utils import sample_from_discretized_mix_logistic
worst_inds = np.load('worst_inds.npz')['arr_0']
all_inds = range(800)
best_inds = np.array([w for w in all_inds if w not in list(worst_inds)])
torch.manual_seed(139)
pcad = np.load('pca_components_vae.npz')
V = pcad['V']
vae_mu_mean = pcad['Xmean']
vae_mu_std = pcad['Xstd']
Xpca_std = pcad['Xpca_std']
dparams = np.load('vae_diff_params.npz')
mu_diff_mean = dparams['mu_diff_mean'][best_inds]
mu_diff_std = dparams['mu_diff_std'][best_inds]
sig_diff_mean = dparams['sig_diff_mean'][best_inds]
sig_diff_std = dparams['sig_diff_std'][best_inds]
if __name__ == '__main__':
import argparse
default_base_datadir = '/localdata/jhansen/trajectories_frames/dataset/'
default_base_savedir = '/localdata/jhansen/trajectories_frames/saved/'
default_vae_model_loadpath = os.path.join(default_base_savedir, 'conv_vae.pkl')
#default_rnn_model_loadpath = os.path.join(default_base_savedir, 'rnn_vae.pkl')
default_rnn_model_loadpath = os.path.join(default_base_savedir, 'rnn_model_epoch_000152_loss0.000166.pkl')
parser = argparse.ArgumentParser(description='train vq-vae for frogger images')
parser.add_argument('-c', '--cuda', action='store_true', default=False)
parser.add_argument('-d', '--datadir', default=default_base_datadir)
parser.add_argument('-v', '--vae_model_loadpath', default=default_vae_model_loadpath)
parser.add_argument('-t', '--transform', default='std')
parser.add_argument('-r', '--rnn_model_loadpath', default=default_rnn_model_loadpath)
parser.add_argument('-dt', '--data_type', default='diff')
parser.add_argument('-hs', '--hidden_size', default=512, type=int)
parser.add_argument('-n', '--num_train_limit', default=-1, help='debug flag for limiting number of training images to use. defaults to using all images', type=int)
parser.add_argument('-g', '--generate_results', action='store_true', default=False, help='generate dataset of codes')
args = parser.parse_args()
use_cuda = args.cuda
dsize = 40
nr_mix = nr_logistic_mix = 10
## mean and scale for each components and weighting bt components (10+2*10)
probs_size = (2*nr_mix)+nr_mix
latent_size = 32
encoder = Encoder(latent_size)
decoder = Decoder(latent_size, probs_size)
vae = VAE(encoder, decoder, use_cuda)
if use_cuda:
print("using gpu")
vae = vae.cuda()
vae.encoder = vae.encoder.cuda()
vae.decoder = vae.decoder.cuda()
vae_epoch = 0
if args.vae_model_loadpath is not None:
if os.path.exists(args.vae_model_loadpath):
vae_model_dict = torch.load(args.vae_model_loadpath)
vae.load_state_dict(vae_model_dict['state_dict'])
vae_epoch = vae_model_dict['epoch']
print('loaded vae checkpoint at epoch: {} from {}'.format(vae_epoch, args.vae_model_loadpath))
else:
print('could not find checkpoint at {}'.format(args.vae_model_loadpath))
embed()
else:
print("no VAE path provided")
# setup rnn
hidden_size = args.hidden_size
# input after only good parts of vae taken
input_size = 50
seq_length = 168
lr = 1e-4
rnn = RNN(input_size,hidden_size)
optim = optim.Adam(rnn.parameters(), lr=lr, weight_decay=1e-6)
if use_cuda:
rnn.cuda()
rnn_epoch = 0
if args.rnn_model_loadpath is not None:
if os.path.exists(args.rnn_model_loadpath):
rnn_model_dict = torch.load(args.rnn_model_loadpath)
rnn.load_state_dict(rnn_model_dict['state_dict'])
rnn_epoch = rnn_model_dict['epoch']
print('loaded rnn checkpoint at epoch: {} from {}'.format(rnn_epoch, args.rnn_model_loadpath))
else:
print('could not find rnn checkpoint at {}'.format(args.rnn_model_loadpath))
embed()
else:
print("no RNN path provided")
#test_dir = 'episodic_vae_test_results'
#test_dir = 'episodic_vae_test_tiny/'
test_dir = 'episodic_vae_test_tiny/'
train_dir = test_dir.replace('test', 'train')
gen_test_dir = test_dir.replace('episodic_', 'episodic_rnn_')
gen_train_dir = train_dir.replace('episodic_', 'episodic_rnn_')
test_data_path = os.path.join(args.datadir,test_dir)
train_data_path = os.path.join(args.datadir,train_dir)
if args.data_type == 'diff':
test_data_loader = DataLoader(EpisodicDiffFroggerDataset(test_data_path, transform=args.transform), batch_size=32, shuffle=True)
#train_data_loader = DataLoader(EpisodicDiffFroggerDataset(train_data_path, transform=args.transform, limit=args.num_train_limit), shuffle=True)
else:
test_data_loader = DataLoader(EpisodicFroggerDataset(test_data_path, transform=args.transform), batch_size=32, shuffle=True)
#train_data_loader = DataLoader(EpisodicFroggerDataset(train_data_path, transform=args.transform, limit=args.num_train_limit), shuffle=True)
test_true_data_path = os.path.join(args.datadir, 'imgs_test')
#train_true_data_path = os.path.join(args.datadir, 'imgs_train')
generate_imgs(test_data_loader,os.path.join(args.datadir, gen_test_dir), test_true_data_path, args.data_type, args.transform)
#generate_imgs(train_data_loader,os.path.join(args.datadir, gen_train_dir), train_true_data_path)
embed()
| 48.190476 | 167 | 0.661623 | # from KK
import matplotlib
matplotlib.use('Agg')
from rnn import RNN
from copy import deepcopy
import time
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.clip_grad import clip_grad_norm
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.init as init
from IPython import embed
import shutil
from datasets import EpisodicFroggerDataset, EpisodicDiffFroggerDataset
from collections import OrderedDict
from imageio import imread, imwrite
from glob import glob
from vq_vae_small import AutoEncoder
from conv_vae import Encoder, Decoder, VAE
from utils import discretized_mix_logistic_loss
from utils import sample_from_discretized_mix_logistic
worst_inds = np.load('worst_inds.npz')['arr_0']
all_inds = range(800)
best_inds = np.array([w for w in all_inds if w not in list(worst_inds)])
torch.manual_seed(139)
pcad = np.load('pca_components_vae.npz')
V = pcad['V']
vae_mu_mean = pcad['Xmean']
vae_mu_std = pcad['Xstd']
Xpca_std = pcad['Xpca_std']
dparams = np.load('vae_diff_params.npz')
mu_diff_mean = dparams['mu_diff_mean'][best_inds]
mu_diff_std = dparams['mu_diff_std'][best_inds]
sig_diff_mean = dparams['sig_diff_mean'][best_inds]
sig_diff_std = dparams['sig_diff_std'][best_inds]
def get_cuts(length,window_size):
assert(window_size<length)
st_pts = list(np.arange(0,length,window_size,dtype=np.int))
end_pts = st_pts[1:]
if end_pts[-1] != length:
end_pts.append(length)
else:
print("cutting start")
st_pts = st_pts[:-1]
return zip(st_pts, end_pts)
def generate_imgs(dataloader,output_filepath,true_img_path,data_type,transform):
if not os.path.exists(output_filepath):
os.makedirs(output_filepath)
for batch_idx, (data_mu_diff_scaled, data_mu_diff, data_mu_orig, data_sigma_diff_scaled, data_sigma_diff, data_sigma_orig, name) in enumerate(dataloader):
# data_mu_orig will be one longer than the diff versions
batch_size = data_mu_diff_scaled.shape[0]
# predict one less time step than availble (first is input)
n_timesteps = data_mu_diff_scaled.shape[1]
vae_input_size = 800
#######################
# get rnn details
#######################
rnn_data = data_mu_diff_scaled.permute(1,0,2)
seq = Variable(torch.FloatTensor(rnn_data), requires_grad=False)
h1_tm1 = Variable(torch.FloatTensor(np.zeros((batch_size, hidden_size))), requires_grad=False)
c1_tm1 = Variable(torch.FloatTensor(np.zeros((batch_size, hidden_size))), requires_grad=False)
h2_tm1 = Variable(torch.FloatTensor(np.zeros((batch_size, hidden_size))), requires_grad=False)
c2_tm1 = Variable(torch.FloatTensor(np.zeros((batch_size, hidden_size))), requires_grad=False)
if use_cuda:
mus_vae = mus_vae.cuda()
seq = seq.cuda()
out_mu = out_mu.cuda()
h1_tm1 = h1_tm1.cuda()
c1_tm1 = c1_tm1.cuda()
h2_tm1 = h2_tm1.cuda()
c2_tm1 = c2_tm1.cuda()
# get time offsets correct
x = seq[:-1]
# put initial step in
rnn_outputs = [seq[0]]
gt_outputs = [seq[0]]
nrnn_outputs = [seq[0].cpu().data.numpy()]
ngt_outputs = [seq[0].cpu().data.numpy()]
for i in range(len(x)):
# number of frames to start with
#if i < 4:
output, h1_tm1, c1_tm1, h2_tm1, c2_tm1 = rnn(x[i], h1_tm1, c1_tm1, h2_tm1, c2_tm1)
#else:
# output, h1_tm1, c1_tm1, h2_tm1, c2_tm1 = rnn(output, h1_tm1, c1_tm1, h2_tm1, c2_tm1)
nrnn_outputs+=[output.cpu().data.numpy()]
rnn_outputs+=[output]
# put ground truth in to check pipeline
ngt_outputs+=[seq[i+1].cpu().data.numpy()]
gt_outputs+=[seq[i+1]]
print(output.sum().data[0],seq[i+1].sum().data[0])
# vae data shoud be batch,timestep(example),features
# 0th frame is the same here
gt_rnn_pred = torch.stack(gt_outputs, 0)
rnn_pred = torch.stack(rnn_outputs, 0)
# 0th frame is the same here
rnn_mu_diff_scaled = rnn_pred.permute(1,0,2).data.numpy()
gt_rnn_mu_diff_scaled = gt_rnn_pred.permute(1,0,2).data.numpy()
nrnn_mu_diff_scaled = np.swapaxes(np.array(nrnn_outputs),0,1)
ngt_rnn_mu_diff_scaled = np.swapaxes(np.array(ngt_outputs),0,1)
# only use relevant mus
orig_mu_placeholder = Variable(torch.FloatTensor(np.zeros((n_timesteps, vae_input_size))), requires_grad=False)
diff_mu_placeholder = Variable(torch.FloatTensor(np.zeros((n_timesteps, vae_input_size))), requires_grad=False)
diff_mu_unscaled_placeholder = Variable(torch.FloatTensor(np.zeros((n_timesteps, vae_input_size))), requires_grad=False)
diff_mu_unscaled_rnn_placeholder = Variable(torch.FloatTensor(np.zeros((n_timesteps, vae_input_size))), requires_grad=False)
gt_diff_mu_unscaled_rnn_placeholder = Variable(torch.FloatTensor(np.zeros((n_timesteps, vae_input_size))), requires_grad=False)
if transform == "std":
print("removing standard deviation transform")
# convert to numpy so broadcasting works
rnn_mu_diff_unscaled = torch.FloatTensor((rnn_mu_diff_scaled*mu_diff_std)+mu_diff_mean[None])
gt_rnn_mu_diff_unscaled = torch.FloatTensor((gt_rnn_mu_diff_scaled*mu_diff_std)+mu_diff_mean[None])
data_mu_diff_unscaled = torch.FloatTensor((data_mu_diff_scaled.numpy()*mu_diff_std)+mu_diff_mean[None])
else:
print("no transform")
rnn_mu_diff_unscaled = rnn_mu_diff_scaled
gt_rnn_mu_diff_unscaled = gt_rnn_mu_diff_scaled
data_mu_diff_unscaled = data_mu_diff_scaled
# go through each distinct episode (should be length of 167)
for e in range(batch_size):
basename = os.path.split(name[e])[1].replace('.npz', '')
if not e:
print("starting %s"%basename)
basepath = os.path.join(output_filepath, basename)
# reconstruct rnn vae
# now the size going through the decoder is 169x32x5x5
# original data is one longer since there was no diff applied
ep_mu_orig = data_mu_orig[e,1:]
ep_mu_diff = data_mu_diff[e]
ep_mu_diff_unscaled = data_mu_diff_unscaled[e]
ep_mu_diff_unscaled_rnn = rnn_mu_diff_unscaled[e]
gt_ep_mu_diff_unscaled_rnn = gt_rnn_mu_diff_unscaled[e]
primer_frame = data_mu_orig[e,0,:]
# need to reconstruct from original
# get the first frame from the original dataset to add diffs to
# data_mu_orig will be one frame longer
# unscale the scaled version
ep_mu_diff[0] += primer_frame
ep_mu_diff_unscaled[0] += primer_frame
ep_mu_diff_unscaled_rnn[0] += primer_frame
gt_ep_mu_diff_unscaled_rnn[0] += primer_frame
print("before diff add")
for diff_frame in range(1,n_timesteps):
#print("adding diff to %s" %diff_frame)
ep_mu_diff[diff_frame] += ep_mu_diff[diff_frame-1]
ep_mu_diff_unscaled[diff_frame] += ep_mu_diff_unscaled[diff_frame-1]
ep_mu_diff_unscaled_rnn[diff_frame] += ep_mu_diff_unscaled_rnn[diff_frame-1]
gt_ep_mu_diff_unscaled_rnn[diff_frame] += gt_ep_mu_diff_unscaled_rnn[diff_frame-1]
rnn_mu_img = ep_mu_diff_unscaled_rnn.numpy()
gt_rnn_mu_img = gt_ep_mu_diff_unscaled_rnn.numpy()
ff,axf = plt.subplots(1,2, figsize=(5,10))
axf[0].imshow(gt_rnn_mu_img, origin='lower')
axf[0].set_title("gt_rnn_mu")
axf[1].imshow(rnn_mu_img, origin='lower')
axf[1].set_title("rnn_mu")
ff.tight_layout()
fimg_name = basepath+'_rnn_mu_plot.png'
fimg_name = fimg_name.replace('_frame_%05d'%0, '')
print("plotted %s" %fimg_name)
plt.savefig(fimg_name)
plt.close()
orig_mu_placeholder[:,best_inds] = Variable(torch.FloatTensor(ep_mu_orig))
diff_mu_placeholder[:,best_inds] = Variable(torch.FloatTensor(ep_mu_diff))
diff_mu_unscaled_placeholder[:,best_inds] = Variable(torch.FloatTensor(ep_mu_diff_unscaled))
diff_mu_unscaled_rnn_placeholder[:,best_inds] = Variable(torch.FloatTensor(ep_mu_diff_unscaled_rnn))
gt_diff_mu_unscaled_rnn_placeholder[:,best_inds] = Variable(torch.FloatTensor(gt_ep_mu_diff_unscaled_rnn))
#for i in range(1,diff_mu_unscaled_rnn_placeholder.shape[0]):
# diff_mu_unscaled_rnn_placeholder[i] = gt_diff_mu_unscaled_rnn_placeholder[i]
# add a placeholder here if you want to process it
mu_types = OrderedDict([('orig',orig_mu_placeholder),
# ('diff',diff_mu_placeholder),
# ('diff_unscaled',diff_mu_unscaled_placeholder),
('gtrnn',gt_diff_mu_unscaled_rnn_placeholder),
('rnn',diff_mu_unscaled_rnn_placeholder),
])
mu_reconstructed = OrderedDict()
# get reconstructed image for each type
for xx, mu_output_name in enumerate(mu_types.keys()):
mu_output = mu_types[mu_output_name]
cuts = get_cuts(mu_output.shape[0], 1)
print(mu_output_name, mu_output.sum().data[0], mu_output[0].sum().data[0])
x_tildes = []
for (s,e) in cuts:
mu_batch = mu_output[s:e]
# only put part of the episdoe through
x_d = vae.decoder(mu_batch.contiguous().view(mu_batch.shape[0], 32, 5, 5))
x_tilde = sample_from_discretized_mix_logistic(x_d, nr_logistic_mix, deterministic=True)
x_tildes.append(x_tilde.cpu().data.numpy())
nx_tilde = np.array(x_tildes)[:,0,0]
inx_tilde = ((0.5*nx_tilde+0.5)*255).astype(np.uint8)
mu_reconstructed[mu_output_name] = inx_tilde
for frame_num in range(n_timesteps):
true_img_name = os.path.join(true_img_path, basename.replace('_conv_vae', '.png')).replace('frame_%05d'%0, 'frame_%05d'%frame_num)
true_img = imread(true_img_name)
print("true img %s" %true_img_name)
num_imgs = len(mu_reconstructed.keys())+1
f, ax = plt.subplots(1,num_imgs, figsize=(3*num_imgs,3))
ax[0].imshow(true_img, origin='lower')
ax[0].set_title('true frame %04d'%frame_num)
for ii, mu_output_name in enumerate(mu_reconstructed.keys()):
ax[ii+1].imshow(mu_reconstructed[mu_output_name][frame_num], origin='lower')
ax[ii+1].set_title(mu_output_name)
f.tight_layout()
img_name = basepath+'_rnn_plot.png'
img_name = img_name.replace('frame_%05d'%0, 'frame_%05d'%frame_num)
print("plotted %s" %img_name)
plt.savefig(img_name)
plt.close()
if __name__ == '__main__':
import argparse
default_base_datadir = '/localdata/jhansen/trajectories_frames/dataset/'
default_base_savedir = '/localdata/jhansen/trajectories_frames/saved/'
default_vae_model_loadpath = os.path.join(default_base_savedir, 'conv_vae.pkl')
#default_rnn_model_loadpath = os.path.join(default_base_savedir, 'rnn_vae.pkl')
default_rnn_model_loadpath = os.path.join(default_base_savedir, 'rnn_model_epoch_000152_loss0.000166.pkl')
parser = argparse.ArgumentParser(description='train vq-vae for frogger images')
parser.add_argument('-c', '--cuda', action='store_true', default=False)
parser.add_argument('-d', '--datadir', default=default_base_datadir)
parser.add_argument('-v', '--vae_model_loadpath', default=default_vae_model_loadpath)
parser.add_argument('-t', '--transform', default='std')
parser.add_argument('-r', '--rnn_model_loadpath', default=default_rnn_model_loadpath)
parser.add_argument('-dt', '--data_type', default='diff')
parser.add_argument('-hs', '--hidden_size', default=512, type=int)
parser.add_argument('-n', '--num_train_limit', default=-1, help='debug flag for limiting number of training images to use. defaults to using all images', type=int)
parser.add_argument('-g', '--generate_results', action='store_true', default=False, help='generate dataset of codes')
args = parser.parse_args()
use_cuda = args.cuda
dsize = 40
nr_mix = nr_logistic_mix = 10
## mean and scale for each components and weighting bt components (10+2*10)
probs_size = (2*nr_mix)+nr_mix
latent_size = 32
encoder = Encoder(latent_size)
decoder = Decoder(latent_size, probs_size)
vae = VAE(encoder, decoder, use_cuda)
if use_cuda:
print("using gpu")
vae = vae.cuda()
vae.encoder = vae.encoder.cuda()
vae.decoder = vae.decoder.cuda()
vae_epoch = 0
if args.vae_model_loadpath is not None:
if os.path.exists(args.vae_model_loadpath):
vae_model_dict = torch.load(args.vae_model_loadpath)
vae.load_state_dict(vae_model_dict['state_dict'])
vae_epoch = vae_model_dict['epoch']
print('loaded vae checkpoint at epoch: {} from {}'.format(vae_epoch, args.vae_model_loadpath))
else:
print('could not find checkpoint at {}'.format(args.vae_model_loadpath))
embed()
else:
print("no VAE path provided")
# setup rnn
hidden_size = args.hidden_size
# input after only good parts of vae taken
input_size = 50
seq_length = 168
lr = 1e-4
rnn = RNN(input_size,hidden_size)
optim = optim.Adam(rnn.parameters(), lr=lr, weight_decay=1e-6)
if use_cuda:
rnn.cuda()
rnn_epoch = 0
if args.rnn_model_loadpath is not None:
if os.path.exists(args.rnn_model_loadpath):
rnn_model_dict = torch.load(args.rnn_model_loadpath)
rnn.load_state_dict(rnn_model_dict['state_dict'])
rnn_epoch = rnn_model_dict['epoch']
print('loaded rnn checkpoint at epoch: {} from {}'.format(rnn_epoch, args.rnn_model_loadpath))
else:
print('could not find rnn checkpoint at {}'.format(args.rnn_model_loadpath))
embed()
else:
print("no RNN path provided")
#test_dir = 'episodic_vae_test_results'
#test_dir = 'episodic_vae_test_tiny/'
test_dir = 'episodic_vae_test_tiny/'
train_dir = test_dir.replace('test', 'train')
gen_test_dir = test_dir.replace('episodic_', 'episodic_rnn_')
gen_train_dir = train_dir.replace('episodic_', 'episodic_rnn_')
test_data_path = os.path.join(args.datadir,test_dir)
train_data_path = os.path.join(args.datadir,train_dir)
if args.data_type == 'diff':
test_data_loader = DataLoader(EpisodicDiffFroggerDataset(test_data_path, transform=args.transform), batch_size=32, shuffle=True)
#train_data_loader = DataLoader(EpisodicDiffFroggerDataset(train_data_path, transform=args.transform, limit=args.num_train_limit), shuffle=True)
else:
test_data_loader = DataLoader(EpisodicFroggerDataset(test_data_path, transform=args.transform), batch_size=32, shuffle=True)
#train_data_loader = DataLoader(EpisodicFroggerDataset(train_data_path, transform=args.transform, limit=args.num_train_limit), shuffle=True)
test_true_data_path = os.path.join(args.datadir, 'imgs_test')
#train_true_data_path = os.path.join(args.datadir, 'imgs_train')
generate_imgs(test_data_loader,os.path.join(args.datadir, gen_test_dir), test_true_data_path, args.data_type, args.transform)
#generate_imgs(train_data_loader,os.path.join(args.datadir, gen_train_dir), train_true_data_path)
embed()
| 10,025 | 0 | 46 |
ba8d1abbc22eae30181b8f1a8dde0a4a86f63b74 | 3,391 | py | Python | qctests/AOML_climatology_test.py | BillMills/AutoQC | cb56fa5bb2115170ec204edd84e2d69ce84be820 | [
"MIT"
] | 17 | 2015-01-31T00:35:58.000Z | 2020-10-26T19:01:46.000Z | qctests/AOML_climatology_test.py | castelao/AutoQC | eb85422c1a6a5ff965a1ef96b3cb29240a66b506 | [
"MIT"
] | 163 | 2015-01-21T03:44:42.000Z | 2022-01-09T22:03:12.000Z | qctests/AOML_climatology_test.py | BillMills/AutoQC | cb56fa5bb2115170ec204edd84e2d69ce84be820 | [
"MIT"
] | 11 | 2015-06-04T14:32:22.000Z | 2021-04-11T05:18:09.000Z | # climatology test adpated from Patrick Halsall's
# ftp://ftp.aoml.noaa.gov/phod/pub/bringas/XBT/AQC/AOML_AQC_2018/codes/qc_checks/clima_checker.py
import sys, numpy
import util.AOMLinterpolation as interp_helper
import util.AOMLnetcdf as read_netcdf
def climatology_check(temperature, interpMNTemp, interpSDTemp, sigmaFactor=5.0):
"""
temperature: Float for temperature
interpMNTemp: interpolated temperature from climatology file
interpSDTemp: interpolated standard deviation from climatology file
sigmaFactor: tolerated deviation from climatological temperature, in standard deviations.
"""
if interpMNTemp == 99999.99 or interpSDTemp == 99999.99 or interpSDTemp <= 0.0:
return 0
if abs(temperature-interpMNTemp)/interpSDTemp <= sigmaFactor:
return 1
else:
return 4
def subset_climatology_data(longitude, latitude, statType, coordRange=1, filePathName='data/woa13_00_025.nc'):
"""
longitude: float
latitude: float
statType: either 'analyzed mean' or 'standard deviations'
coordRange: degrees plus / minus around longitude and latitude to consider.
filePathName: relative path from root of climatology file
Return list of lists with temperatures that maps one to one with list
of lists with tuples of latitude and longitude coordinates, list for
depth measurements, and list of lists with tuples of latitude and
longitude coordinates that maps one to one with list of lists with
temperature
Return an empty list, an empty list, and an empty list if exception
"""
if statType == "analyzed mean":
fieldType = "t_an"
elif statType == "standard deviations":
fieldType = "t_sd"
else:
sys.stderr.write("Cannot process climatology file with a statistical "
"field as " + statType + "\n")
return [], [], []
latLonDepthTempList, depthColumns, latLonList, time = read_netcdf.subset_data(longitude, latitude, filePathName, coordRange, True, fieldType)
return latLonDepthTempList, depthColumns, latLonList
| 40.855422 | 193 | 0.730168 | # climatology test adpated from Patrick Halsall's
# ftp://ftp.aoml.noaa.gov/phod/pub/bringas/XBT/AQC/AOML_AQC_2018/codes/qc_checks/clima_checker.py
import sys, numpy
import util.AOMLinterpolation as interp_helper
import util.AOMLnetcdf as read_netcdf
def test(p, parameters):
qc = numpy.zeros(p.n_levels(), dtype=bool)
# check for gaps in data
isTemperature = (p.t().mask==False)
isDepth = (p.z().mask==False)
isData = isTemperature & isDepth
# extract climatology data
lonlatWithTempsList1, depthColumns1, latLonsList1 = subset_climatology_data(p.longitude(), p.latitude(), "analyzed mean")
lonlatWithTempsList2, depthColumns2, latLonsList2 = subset_climatology_data(p.longitude(), p.latitude(), "standard deviations")
for i in range(p.n_levels()):
# find best interpolated temperature and standard deviation at this depth
if not isData[i]: continue
interpTemp = interp_helper.temperature_interpolation_process(p.longitude(), p.latitude(), p.z()[i], depthColumns1, latLonsList1, lonlatWithTempsList1, False, "climaInterpTemperature")
if interpTemp == 99999.99:
continue
interpTempSD = interp_helper.temperature_interpolation_process(p.longitude(), p.latitude(), p.z()[i], depthColumns2, latLonsList2, lonlatWithTempsList2, False, "climaInterpStandardDev")
if interpTempSD == 99999.99:
continue
# check if temperature at this depth is sufficiently close to the climatological expectation
qc[i] = climatology_check(p.t()[i], interpTemp, interpTempSD) >= 4
return qc
def climatology_check(temperature, interpMNTemp, interpSDTemp, sigmaFactor=5.0):
"""
temperature: Float for temperature
interpMNTemp: interpolated temperature from climatology file
interpSDTemp: interpolated standard deviation from climatology file
sigmaFactor: tolerated deviation from climatological temperature, in standard deviations.
"""
if interpMNTemp == 99999.99 or interpSDTemp == 99999.99 or interpSDTemp <= 0.0:
return 0
if abs(temperature-interpMNTemp)/interpSDTemp <= sigmaFactor:
return 1
else:
return 4
def subset_climatology_data(longitude, latitude, statType, coordRange=1, filePathName='data/woa13_00_025.nc'):
"""
longitude: float
latitude: float
statType: either 'analyzed mean' or 'standard deviations'
coordRange: degrees plus / minus around longitude and latitude to consider.
filePathName: relative path from root of climatology file
Return list of lists with temperatures that maps one to one with list
of lists with tuples of latitude and longitude coordinates, list for
depth measurements, and list of lists with tuples of latitude and
longitude coordinates that maps one to one with list of lists with
temperature
Return an empty list, an empty list, and an empty list if exception
"""
if statType == "analyzed mean":
fieldType = "t_an"
elif statType == "standard deviations":
fieldType = "t_sd"
else:
sys.stderr.write("Cannot process climatology file with a statistical "
"field as " + statType + "\n")
return [], [], []
latLonDepthTempList, depthColumns, latLonList, time = read_netcdf.subset_data(longitude, latitude, filePathName, coordRange, True, fieldType)
return latLonDepthTempList, depthColumns, latLonList
| 1,328 | 0 | 23 |
123d1128c16997f0e67d237277bf905f3036c99c | 177 | py | Python | faraday/hub/admin.py | stashito/Faraday | 1cd232f349195454bed32592930e381444b51f71 | [
"MIT"
] | 2 | 2021-02-28T07:34:43.000Z | 2021-02-28T16:29:33.000Z | faraday/hub/admin.py | stashito/Faraday | 1cd232f349195454bed32592930e381444b51f71 | [
"MIT"
] | 1 | 2021-02-28T18:43:15.000Z | 2021-02-28T18:43:15.000Z | faraday/hub/admin.py | stashito/Faraday | 1cd232f349195454bed32592930e381444b51f71 | [
"MIT"
] | 1 | 2021-02-28T16:30:10.000Z | 2021-02-28T16:30:10.000Z | from django.contrib import admin
from .models import *
admin.site.register(Scientist)
admin.site.register(Employer)
admin.site.register(DataPool)
admin.site.register(DataEntry) | 25.285714 | 32 | 0.824859 | from django.contrib import admin
from .models import *
admin.site.register(Scientist)
admin.site.register(Employer)
admin.site.register(DataPool)
admin.site.register(DataEntry) | 0 | 0 | 0 |
896b11c2b407471293f262e28c59f88385695cda | 989 | py | Python | abcTau/distance_functions.py | roxana-zeraati/abcTau | ce4352062ee7821c80ac1c660641f41fef023e14 | [
"BSD-3-Clause"
] | 8 | 2021-06-29T14:36:56.000Z | 2022-03-27T18:18:10.000Z | abcTau/distance_functions.py | roxana-zeraati/abcTau | ce4352062ee7821c80ac1c660641f41fef023e14 | [
"BSD-3-Clause"
] | null | null | null | abcTau/distance_functions.py | roxana-zeraati/abcTau | ce4352062ee7821c80ac1c660641f41fef023e14 | [
"BSD-3-Clause"
] | 4 | 2021-06-03T13:53:21.000Z | 2022-03-27T18:18:01.000Z | """
Module containing different distance functions.
"""
import numpy as np
from scipy import stats
def linear_distance(data, synth_data):
""" compute linear distance between autocorrelations.
Parameters
-----------
data : 1d array
autocorrelation of real data.
synth_data : 1d array
autocorrelation of synthetic data.
Returns
-------
d : float
linear ditance between autocorrelations.
"""
d = np.nanmean(np.power(((data) - (synth_data)),2))
return d
def logarithmic_distance(data, synth_data):
""" compute logarithmic distance between autocorrelations.
Parameters
-----------
data : 1d array
autocorrelation of real data.
synth_data : 1d array
autocorrelation of synthetic data.
Returns
-------
d : float
logarithmic ditance between autocorrelations.
"""
d = np.nanmean(np.power((np.log(data) - np.log(synth_data)),2))
return d | 22.477273 | 67 | 0.622851 | """
Module containing different distance functions.
"""
import numpy as np
from scipy import stats
def linear_distance(data, synth_data):
""" compute linear distance between autocorrelations.
Parameters
-----------
data : 1d array
autocorrelation of real data.
synth_data : 1d array
autocorrelation of synthetic data.
Returns
-------
d : float
linear ditance between autocorrelations.
"""
d = np.nanmean(np.power(((data) - (synth_data)),2))
return d
def logarithmic_distance(data, synth_data):
""" compute logarithmic distance between autocorrelations.
Parameters
-----------
data : 1d array
autocorrelation of real data.
synth_data : 1d array
autocorrelation of synthetic data.
Returns
-------
d : float
logarithmic ditance between autocorrelations.
"""
d = np.nanmean(np.power((np.log(data) - np.log(synth_data)),2))
return d | 0 | 0 | 0 |
161fe56b92d67e8836143c96825910d5b7527d1f | 8,085 | py | Python | tests/unit/test_sortedset.py | justinsb/python-driver | 418947cd619afcfc541c00403f131a18a17c66c2 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_sortedset.py | justinsb/python-driver | 418947cd619afcfc541c00403f131a18a17c66c2 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_sortedset.py | justinsb/python-driver | 418947cd619afcfc541c00403f131a18a17c66c2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.util import sortedset
from cassandra.cqltypes import EMPTY
| 32.083333 | 85 | 0.592208 | # Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.util import sortedset
from cassandra.cqltypes import EMPTY
class SortedSetTest(unittest.TestCase):
def test_init(self):
input = [5, 4, 3, 2, 1, 1, 1]
expected = sorted(set(input))
ss = sortedset(input)
self.assertEqual(len(ss), len(expected))
self.assertEqual(list(ss), expected)
def test_contains(self):
input = [5, 4, 3, 2, 1, 1, 1]
expected = sorted(set(input))
ss = sortedset(input)
for i in expected:
self.assertTrue(i in ss)
self.assertFalse(i not in ss)
hi = max(expected)+1
lo = min(expected)-1
self.assertFalse(hi in ss)
self.assertFalse(lo in ss)
def test_mutable_contents(self):
ba = bytearray(b'some data here')
ss = sortedset([ba, ba])
self.assertEqual(list(ss), [ba])
def test_clear(self):
ss = sortedset([1, 2, 3])
ss.clear()
self.assertEqual(len(ss), 0)
def test_equal(self):
s1 = set([1])
s12 = set([1, 2])
ss1 = sortedset(s1)
ss12 = sortedset(s12)
self.assertEqual(ss1, s1)
self.assertEqual(ss12, s12)
self.assertNotEqual(ss1, ss12)
self.assertNotEqual(ss12, ss1)
self.assertNotEqual(ss1, s12)
self.assertNotEqual(ss12, s1)
self.assertNotEqual(ss1, EMPTY)
def test_copy(self):
class comparable(object):
def __lt__(self, other):
return id(self) < id(other)
o = comparable()
ss = sortedset([comparable(), o])
ss2 = ss.copy()
self.assertNotEqual(id(ss), id(ss2))
self.assertTrue(o in ss)
self.assertTrue(o in ss2)
def test_isdisjoint(self):
# set, ss
s12 = set([1, 2])
s2 = set([2])
ss1 = sortedset([1])
ss13 = sortedset([1, 3])
ss3 = sortedset([3])
# s ss disjoint
self.assertTrue(s2.isdisjoint(ss1))
self.assertTrue(s2.isdisjoint(ss13))
# s ss not disjoint
self.assertFalse(s12.isdisjoint(ss1))
self.assertFalse(s12.isdisjoint(ss13))
# ss s disjoint
self.assertTrue(ss1.isdisjoint(s2))
self.assertTrue(ss13.isdisjoint(s2))
# ss s not disjoint
self.assertFalse(ss1.isdisjoint(s12))
self.assertFalse(ss13.isdisjoint(s12))
# ss ss disjoint
self.assertTrue(ss1.isdisjoint(ss3))
self.assertTrue(ss3.isdisjoint(ss1))
# ss ss not disjoint
self.assertFalse(ss1.isdisjoint(ss13))
self.assertFalse(ss13.isdisjoint(ss1))
self.assertFalse(ss3.isdisjoint(ss13))
self.assertFalse(ss13.isdisjoint(ss3))
def test_issubset(self):
s12 = set([1, 2])
ss1 = sortedset([1])
ss13 = sortedset([1, 3])
ss3 = sortedset([3])
self.assertTrue(ss1.issubset(s12))
self.assertTrue(ss1.issubset(ss13))
self.assertFalse(ss1.issubset(ss3))
self.assertFalse(ss13.issubset(ss3))
self.assertFalse(ss13.issubset(ss1))
self.assertFalse(ss13.issubset(s12))
def test_issuperset(self):
s12 = set([1, 2])
ss1 = sortedset([1])
ss13 = sortedset([1, 3])
ss3 = sortedset([3])
self.assertTrue(s12.issuperset(ss1))
self.assertTrue(ss13.issuperset(ss3))
self.assertTrue(ss13.issuperset(ss13))
self.assertFalse(s12.issuperset(ss13))
self.assertFalse(ss1.issuperset(ss3))
self.assertFalse(ss1.issuperset(ss13))
def test_union(self):
s1 = set([1])
ss12 = sortedset([1, 2])
ss23 = sortedset([2, 3])
self.assertEqual(sortedset().union(s1), sortedset([1]))
self.assertEqual(ss12.union(s1), sortedset([1, 2]))
self.assertEqual(ss12.union(ss23), sortedset([1, 2, 3]))
self.assertEqual(ss23.union(ss12), sortedset([1, 2, 3]))
self.assertEqual(ss23.union(s1), sortedset([1, 2, 3]))
def test_intersection(self):
s12 = set([1, 2])
ss23 = sortedset([2, 3])
self.assertEqual(s12.intersection(ss23), set([2]))
self.assertEqual(ss23.intersection(s12), sortedset([2]))
self.assertEqual(ss23.intersection(s12, [2], (2,)), sortedset([2]))
self.assertEqual(ss23.intersection(s12, [900], (2,)), sortedset())
def test_difference(self):
s1 = set([1])
ss12 = sortedset([1, 2])
ss23 = sortedset([2, 3])
self.assertEqual(sortedset().difference(s1), sortedset())
self.assertEqual(ss12.difference(s1), sortedset([2]))
self.assertEqual(ss12.difference(ss23), sortedset([1]))
self.assertEqual(ss23.difference(ss12), sortedset([3]))
self.assertEqual(ss23.difference(s1), sortedset([2, 3]))
def test_symmetric_difference(self):
s = set([1, 3, 5])
ss = sortedset([2, 3, 4])
ss2 = sortedset([5, 6, 7])
self.assertEqual(ss.symmetric_difference(s), sortedset([1, 2, 4, 5]))
self.assertFalse(ss.symmetric_difference(ss))
self.assertEqual(ss.symmetric_difference(s), sortedset([1, 2, 4, 5]))
self.assertEqual(ss2.symmetric_difference(ss), sortedset([2, 3, 4, 5, 6, 7]))
def test_pop(self):
ss = sortedset([2, 1])
self.assertEqual(ss.pop(), 2)
self.assertEqual(ss.pop(), 1)
try:
ss.pop()
self.fail("Error not thrown")
except (KeyError, IndexError) as e:
pass
def test_remove(self):
ss = sortedset([2, 1])
self.assertEqual(len(ss), 2)
self.assertRaises(KeyError, ss.remove, 3)
self.assertEqual(len(ss), 2)
ss.remove(1)
self.assertEqual(len(ss), 1)
ss.remove(2)
self.assertFalse(ss)
self.assertRaises(KeyError, ss.remove, 2)
self.assertFalse(ss)
def test_operators(self):
ss1 = sortedset([1])
ss12 = sortedset([1, 2])
# __ne__
self.assertFalse(ss12 != ss12)
self.assertFalse(ss12 != sortedset([1, 2]))
self.assertTrue(ss12 != sortedset())
# __le__
self.assertTrue(ss1 <= ss12)
self.assertTrue(ss12 <= ss12)
self.assertFalse(ss12 <= ss1)
# __lt__
self.assertTrue(ss1 < ss12)
self.assertFalse(ss12 < ss12)
self.assertFalse(ss12 < ss1)
# __ge__
self.assertFalse(ss1 >= ss12)
self.assertTrue(ss12 >= ss12)
self.assertTrue(ss12 >= ss1)
# __gt__
self.assertFalse(ss1 > ss12)
self.assertFalse(ss12 > ss12)
self.assertTrue(ss12 > ss1)
# __and__
self.assertEqual(ss1 & ss12, ss1)
self.assertEqual(ss12 & ss12, ss12)
self.assertEqual(ss12 & set(), sortedset())
# __or__
self.assertEqual(ss1 | ss12, ss12)
self.assertEqual(ss12 | ss12, ss12)
self.assertEqual(ss12 | set(), ss12)
self.assertEqual(sortedset() | ss1 | ss12, ss12)
# __sub__
self.assertEqual(ss1 - ss12, set())
self.assertEqual(ss12 - ss12, set())
self.assertEqual(ss12 - set(), ss12)
self.assertEqual(ss12 - ss1, sortedset([2]))
# __xor__
self.assertEqual(ss1 ^ ss12, set([2]))
self.assertEqual(ss12 ^ ss1, set([2]))
self.assertEqual(ss12 ^ ss12, set())
self.assertEqual(ss12 ^ set(), ss12)
| 6,871 | 18 | 454 |
330a418a735a0acb7fbfef77dc25aeed37b9cc73 | 27,216 | py | Python | env/cards.py | alxwdm/TichuAgent | d498d1050264d13c920018006e3dcc2a04bc61df | [
"MIT"
] | null | null | null | env/cards.py | alxwdm/TichuAgent | d498d1050264d13c920018006e3dcc2a04bc61df | [
"MIT"
] | null | null | null | env/cards.py | alxwdm/TichuAgent | d498d1050264d13c920018006e3dcc2a04bc61df | [
"MIT"
] | null | null | null | """ This module contains a class to represent multiple Tichu Cards. """
BOMBS = ['four_bomb', 'straight_bomb']
class Cards():
"""
A class to represent multiple Tichu Cards.
Can either be a hand (i.e. no specific combination)
or a combination (e.g. pair, straight, ...).
The type is determined automatically when adding or removing cards.
Inspired by the following sources:
- https://github.com/hundredblocks/ticher
- https://github.com/sylee421/TichuRL
Attributes
----------
cards: list of Card
A list containing all Card objects in this Cards instance.
phoenix_flag: bool
Whether this Cards instance contains a Phoenix.
size: int
The number of Cards in this instance.
points: int
The points of the card.
In Tichu, only 5, 10, K, Phoenix and Dragon give points.
type: str
The type of this Cards instance (e.g. hand, pair, straight)
power: float
The power of this Cards instance. It depends on the type
and the highest Card.
For example: A hand has 0 power, a pair of 10s has power 10.
points: int
The aggregated Card points in this instance.
Methods
-------
show:
Prints all the Cards using the Card.image attribute.
get_available_combinations:
Outputs a list of all possible combinations.
contains(other):
Checks whether other (list of Card objects) are contained
in this Cards instance.
remove(card):
Removes a Card from this Cards instance.
"""
size = None
cards = None
phoenix_flag = None
def __init__(self, card_list):
"""
Constructs a Cards instance.
Paramter
--------
card_list: A list of Card objects.
"""
# dispatch table for type checking function
self.dispatch_type = {0: self._typecheck_pass,
1: self._typecheck_solo,
2: self._typecheck_pair,
3: self._typecheck_triple,
4: self._typecheck_four_bomb,
5: self._typecheck_full_straight,
6: self._typecheck_pair_seq}
# set attributes
self.phoenix_flag = False
self.cards = list()
for i in card_list:
self.cards.append(i)
if i.name == 'Phoenix':
self.phoenix_flag = True
self.cards.sort()
self.size = len(self.cards)
self.type = None
self.power = 0
# run init functions
self._set_type_and_power()
self._set_points()
def show(self):
""" A nice visualization of all cards in the set. """
if self.size == 0:
print(' PASS')
else:
for i in range(5):
for crd in range(self.size):
print(self.cards[crd].image[i], end='')
print()
def _set_points(self):
""" Set number of game points of this card set. """
if self.type != 'pass':
self.points = sum([crd.points for crd in self.cards])
else:
self.points = 0
def _set_type_and_power(self):
""" Determines which combination (if any) is this card set. """
self.type = 'unk'
# check for all but pair sequence depending on card length
self.dispatch_type[min(len(self.cards),5)]()
# if type is still unkown, check for pair sequence
if self.type == 'unk':
self.dispatch_type[6]()
# if type is still unkown, it must be a hand
if self.type == 'unk':
self.type = 'hand'
self.power = 0
def get_available_combinations(self):
""" Get all available combinations form this card set. """
solo = self._get_available_solo()
pair = self._get_available_pair()
triple = self._get_available_triple()
four_bomb = self._get_available_four_bomb()
full = self._get_available_full()
straight, straight_bomb = self._get_available_straight()
pair_seq = self._get_available_pair_seq()
return [solo, pair, triple, four_bomb,
full, straight, straight_bomb, pair_seq]
def contains(self, other):
""" Checks if this instance contains all cards from other. """
this_cards = [(crd.name, crd.suit) for crd in self.cards]
other_cards = [(crd.name, crd.suit) for crd in other.cards]
return all([elem in this_cards for elem in other_cards])
def remove(self, card):
""" Remove a single Card and update this Cards instance. """
try:
self.cards.remove(card)
except ValueError: # if card is not in cards, return False
return False
self.cards.sort()
if card.name == 'Phoenix':
self.phoenix_flag = False
self.size = self.size - 1
self._set_type_and_power()
self._set_points()
return True
def _typecheck_pass(self):
""" Checks whether Cards is of type pass. """
if len(self.cards)==0:
self.type = 'pass'
self.power = 0
def _typecheck_solo(self):
""" Checks whether Cards is of type solo. """
if len(self.cards)==1:
self.type = 'solo'
self.power = self.cards[0].power
def _typecheck_pair(self):
""" Checks whether Cards is of type pair. """
if len(self.cards)==2:
# regular pair
if self.cards[0].power == self.cards[1].power:
self.type = 'pair'
self.power = self.cards[0].power
return
# phoenix pair
elif (self.phoenix_flag and
not (self.cards[1].name == 'Dragon' or
self.cards[1].name == 'Dog')):
self.type = 'pair'
self.power = self.cards[1].power
def _typecheck_triple(self):
""" Checks whether Cards is of type triple. """
if len(self.cards)==3:
# regular triple
if (self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power):
self.type = 'triple'
self.power = self.cards[0].power
# phoenix triple
elif self.phoenix_flag and self.cards[1].power == self.cards[2].power:
self.type = 'triple'
self.power = self.cards[1].power
def _typecheck_four_bomb(self):
""" Checks whether Cards is of type four bomb. """
if (len(self.cards)==4 and self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power and
self.cards[2].power == self.cards[3].power):
self.type = 'four_bomb'
self.power = 50 + self.cards[0].power
def _typecheck_full_straight(self):
""" Checks whether Cards is of type full house or straight. """
self._typecheck_full()
self._typecheck_straight()
def _typecheck_full(self):
""" Checks whether Cards is of type full house. """
if len(self.cards)==5:
# regular full house with triple higher than pair
if (self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[0].power
# regular full house with pair higher than triple
elif (self.cards[0].power == self.cards[1].power and
self.cards[2].power == self.cards[3].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[2].power
# phoenix full house with phoenix triple
elif (self.phoenix_flag and
self.cards[1].power == self.cards[2].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[3].power
# phoenix full house with phoenix pair
elif self.phoenix_flag:
if (self.cards[1].power == self.cards[2].power and
self.cards[2].power == self.cards[3].power):
self.type = 'full'
self.power = self.cards[1].power
elif (self.cards[2].power == self.cards[3].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[2].power
def _typecheck_straight(self):
"""
Checks whether Cards is of type straight.
Can be a straight with regular cards, straight with Phoenix,
or straight bomb.
"""
self._typecheck_regular_straight()
self._typecheck_phoenix_straight()
def _typecheck_regular_straight(self):
""" Checks whether Cards is of type straight (w/o Phoenix). """
if len(self.cards)>=5:
is_straight = True
is_flush = True
for i in range(len(self.cards)-1):
if self.cards[i].power + 1 == self.cards[i+1].power:
if self.cards[i].suit == self.cards[i+1].suit:
pass
else:
is_flush = False
else:
is_straight = False
break
# if it is a straight and all suits are equal, it is a bomb
if is_straight and is_flush:
self.type = 'straight_bomb'
self.power = 100 + self.cards[-1].power
return
if is_straight:
self.type = 'straight'
self.power = self.cards[-1].power
def _typecheck_phoenix_straight(self):
""" Checks whether Cards is of type straight (with Phoenix). """
if len(self.cards)>=5 and self.phoenix_flag:
phoenix_used = False
phoenix_idx = -1
is_straight = True
for i in range(len(self.cards)-2):
if self.cards[i+1].power+1 == self.cards[i+2].power:
pass
elif (not(phoenix_used) and
(self.cards[i+1].power+2 == self.cards[i+2].power)):
phoenix_used = True
phoenix_idx = i+1
else:
is_straight = False
if is_straight:
self.type = 'straight'
# phoenix is last card of straight: power is last card + 1
if not(phoenix_used) or (phoenix_idx == len(self.cards)):
self.power = self.cards[-1].power+1
# phoenix is not last card of straight: power is last card
else:
self.power = self.cards[-1].power
def _typecheck_pair_seq(self):
""" Checks whether Cards is of type pair sequence. """
self._typecheck_regular_pair_seq()
self._typecheck_phoenix_pair_seq()
def _typecheck_regular_pair_seq(self):
""" Checks whether Cards is of type pair_seq (w/o Phoenix). """
if (len(self.cards)>=4 and len(self.cards)%2==0 and
not(any((crd.name == 'Dog' or crd.name == 'Dragon')
for crd in self.cards))):
is_pair_regular = True
for i in range(len(self.cards)-1):
if i%2 == 0 and self.cards[i].power == self.cards[i+1].power:
pass
elif i%2 == 1 and self.cards[i].power+1 == self.cards[i+1].power:
pass
else:
is_pair_regular = False
break
if is_pair_regular:
self.type = 'pair_seq'
self.power = self.cards[-1].power
def _typecheck_phoenix_pair_seq(self):
"""
Checks whether Cards is of type pair_seq (with Phoenix).
For a phoenix pair sequence, the algorithm is quite complicated,
because there are a lot of possible combinations.
Phoenix can be used in the first pair, in any middle pair, or in
the last pair.
Depending on where the Phoenix is used, either all equal or all
unequal indices are increments of 1 in a valid pair sequence.
If the Phoenix is used as a replacement for an equal indexed card,
then the logic turns around ("toggles") and all subsequent cards
need to be increments of the previous card in unequal indices.
"""
# return if pair sequence is not possible
if not (len(self.cards)>=4 and len(self.cards)%2==0 and
not(any((crd.name == 'Dog' or crd.name == 'Dragon')
for crd in self.cards)) and
self.phoenix_flag):
return
# return if card sequence (excluding Phoenix) does not increase by 1
unique_power = sorted({crd.power for crd in self.cards})
unique_power.pop(0) # remove phoenix from set
if not (all(x+1==y for x, y in zip(unique_power, unique_power[1:])
) and len(unique_power)>1):
return
# continue and prepare local variables if preconditions are met
phoenix_used = False
is_pair_equal = True
is_pair_unequal = True
# check for phoenix use in equal card list index
toggle = 1
antitoggle = 0
for i in range(1,len(self.cards)-1):
if (i%2 == toggle and
self.cards[i].power == self.cards[i+1].power):
pass
elif (i%2 == antitoggle and
self.cards[i].power + 1 == self.cards[i+1].power):
if i+1 >= len(self.cards)-1 and not phoenix_used:
# phoenix used as the highest pair of sequence
phoenix_used = True
elif phoenix_used: # phoenix cannot be used twice
is_pair_unequal = False
break
else:
# if phoenix is used in the middle of the sequence,
# change matching behavior of toggle/antitoggle
# so that i%2 matches next element
phoenix_used = True
toggle = 0
antitoggle = 1
# check for phoenix use in equal card list index
if not is_pair_unequal:
phoenix_used = False
for i in range(1,len(self.cards)-1):
if (i%2 == 0 and
self.cards[i].power == self.cards[i+1].power):
pass
elif (i%2 == 1 and
self.cards[i].power+1 == self.cards[i+1].power):
# check if phoenix is first card in sequence
if i == 1:
phoenix_used = True
elif phoenix_used: # phoenix cannot be used twice
is_pair_equal = False
break
else:
phoenix_used = True
if is_pair_unequal or is_pair_equal:
self.type = 'pair_seq'
self.power = self.cards[-1].power
def _get_available_solo(self):
""" Returns a list with all possible solo combinations. """
solo = list()
for i in range(len(self.cards)):
solo_list = self.cards[i]
solo_cards = Cards([solo_list])
if solo_cards.type == 'solo':
solo.append(solo_cards)
return solo
def _get_available_pair(self):
""" Returns a list with all possible pair combinations. """
pair = list()
for i in range(len(self.cards)-1):
# regular pairs
if self.cards[i].power == self.cards[i+1].power:
pair_list = [self.cards[i], self.cards[i+1]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
# phoenix pairs
if self.phoenix_flag and self.cards[i+1].suit != 'Special':
pair_list = [self.cards[0], self.cards[i+1]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
# multiple pairs
try:
if self.cards[i].power == self.cards[i+2].power:
pair_list = [self.cards[i], self.cards[i+2]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
if self.cards[i].power == self.cards[i+3].power:
pair_list = [self.cards[i], self.cards[i+3]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
except IndexError:
pass
return pair
def _get_available_triple(self):
""" Returns a list with all possible triple combinations. """
triple = list()
for i in range(len(self.cards)-2):
# regular triple
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+2].power):
triple_candidate = [self.cards[i], self.cards[i+1],
self.cards[i+2]]
triple = check_and_append_triple(triple_candidate, triple)
# phoenix triple
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+2].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+2]]
triple = check_and_append_triple(triple_candidate, triple)
# multiple triples
try:
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+3].power):
triple_candidate = [self.cards[i], self.cards[i+1],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.cards[i].power == self.cards[i+2].power and
self.cards[i+2].power == self.cards[i+3].power):
triple_candidate = [self.cards[i], self.cards[i+2],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+3].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+4].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+4]]
triple = check_and_append_triple(triple_candidate, triple)
except IndexError:
pass
return triple
def _get_available_four_bomb(self):
""" Returns a list with all possible four bomb combinations. """
four_bomb = list()
for i in range(len(self.cards)-3):
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+2].power and
self.cards[i+2].power == self.cards[i+3].power):
four_list = [self.cards[i], self.cards[i+1],
self.cards[i+2], self.cards[i+3]]
four_cards = Cards(four_list)
if four_cards.type == 'four_bomb':
four_bomb.append(four_cards)
return four_bomb
def _get_available_full(self):
""" Returns a list with all possible full house combinations. """
full = list()
pair = self._get_available_pair()
triple = self._get_available_triple()
for i in pair:
for j in triple:
if i.power != j.power:
full_list = list()
full_list.extend(i.cards)
full_list.extend(j.cards)
full_cards = Cards(full_list)
if full_cards.type == 'full':
full.append(full_cards)
return full
def _get_available_straight(self):
""" Returns a list with all possible straight combinations. """
straight = list()
straight_bomb = list()
for i in range(len(self.cards)-4):
candidate_list = list()
phoenix_available = self.phoenix_flag
for j in range(i,len(self.cards)):
# add first card of possible straight
if len(candidate_list)==0:
candidate_list.append(self.cards[j])
if self.cards[j].name == 'Phoenix':
phoenix_available = False
# no check if Phoenix is last entry
elif candidate_list[-1].name == 'Phoenix':
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
# add subsequent cards
elif candidate_list[-1].power+1 == self.cards[j].power:
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
# skip pairs
elif candidate_list[-1].power == self.cards[j].power:
pass
# use phoenix mid straight if available
elif (phoenix_available and
candidate_list[-1].power+2 == self.cards[j].power):
candidate_list.append(self.cards[0])
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
phoenix_available = False
# use phoenix as first/last card if available
elif phoenix_available:
candidate_list.append(self.cards[0])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
phoenix_available = False
# no straight possible
else:
break
return straight, straight_bomb
def _get_available_pair_seq(self):
""" Returns a list with all possible pair sequence combinations. """
pair_seq = list()
pair = self._get_available_pair()
for i in range(len(pair)-1):
candidate_list = list()
for j in range(i,len(pair)):
# add first element to candidate list
if len(candidate_list) == 0:
candidate_list.extend(pair[j].cards)
# add subsequent pairs
elif candidate_list[-1].power+1 == pair[j].power:
candidate_list.extend(pair[j].cards)
if len(candidate_list) > 1:
pair_seq_cards = Cards(candidate_list)
if pair_seq_cards.type == 'pair_seq':
pair_seq.append(pair_seq_cards)
# skip double pairs
elif candidate_list[-1].power == pair[j].power:
pass
# break if no pair_seq possible
else:
break
return pair_seq
| 41.298938 | 82 | 0.532113 | """ This module contains a class to represent multiple Tichu Cards. """
BOMBS = ['four_bomb', 'straight_bomb']
class Cards():
"""
A class to represent multiple Tichu Cards.
Can either be a hand (i.e. no specific combination)
or a combination (e.g. pair, straight, ...).
The type is determined automatically when adding or removing cards.
Inspired by the following sources:
- https://github.com/hundredblocks/ticher
- https://github.com/sylee421/TichuRL
Attributes
----------
cards: list of Card
A list containing all Card objects in this Cards instance.
phoenix_flag: bool
Whether this Cards instance contains a Phoenix.
size: int
The number of Cards in this instance.
points: int
The points of the card.
In Tichu, only 5, 10, K, Phoenix and Dragon give points.
type: str
The type of this Cards instance (e.g. hand, pair, straight)
power: float
The power of this Cards instance. It depends on the type
and the highest Card.
For example: A hand has 0 power, a pair of 10s has power 10.
points: int
The aggregated Card points in this instance.
Methods
-------
show:
Prints all the Cards using the Card.image attribute.
get_available_combinations:
Outputs a list of all possible combinations.
contains(other):
Checks whether other (list of Card objects) are contained
in this Cards instance.
remove(card):
Removes a Card from this Cards instance.
"""
size = None
cards = None
phoenix_flag = None
def __init__(self, card_list):
"""
Constructs a Cards instance.
Paramter
--------
card_list: A list of Card objects.
"""
# dispatch table for type checking function
self.dispatch_type = {0: self._typecheck_pass,
1: self._typecheck_solo,
2: self._typecheck_pair,
3: self._typecheck_triple,
4: self._typecheck_four_bomb,
5: self._typecheck_full_straight,
6: self._typecheck_pair_seq}
# set attributes
self.phoenix_flag = False
self.cards = list()
for i in card_list:
self.cards.append(i)
if i.name == 'Phoenix':
self.phoenix_flag = True
self.cards.sort()
self.size = len(self.cards)
self.type = None
self.power = 0
# run init functions
self._set_type_and_power()
self._set_points()
def show(self):
""" A nice visualization of all cards in the set. """
if self.size == 0:
print(' PASS')
else:
for i in range(5):
for crd in range(self.size):
print(self.cards[crd].image[i], end='')
print()
def _set_points(self):
""" Set number of game points of this card set. """
if self.type != 'pass':
self.points = sum([crd.points for crd in self.cards])
else:
self.points = 0
def _set_type_and_power(self):
""" Determines which combination (if any) is this card set. """
self.type = 'unk'
# check for all but pair sequence depending on card length
self.dispatch_type[min(len(self.cards),5)]()
# if type is still unkown, check for pair sequence
if self.type == 'unk':
self.dispatch_type[6]()
# if type is still unkown, it must be a hand
if self.type == 'unk':
self.type = 'hand'
self.power = 0
def get_available_combinations(self):
""" Get all available combinations form this card set. """
solo = self._get_available_solo()
pair = self._get_available_pair()
triple = self._get_available_triple()
four_bomb = self._get_available_four_bomb()
full = self._get_available_full()
straight, straight_bomb = self._get_available_straight()
pair_seq = self._get_available_pair_seq()
return [solo, pair, triple, four_bomb,
full, straight, straight_bomb, pair_seq]
def contains(self, other):
""" Checks if this instance contains all cards from other. """
this_cards = [(crd.name, crd.suit) for crd in self.cards]
other_cards = [(crd.name, crd.suit) for crd in other.cards]
return all([elem in this_cards for elem in other_cards])
def remove(self, card):
""" Remove a single Card and update this Cards instance. """
try:
self.cards.remove(card)
except ValueError: # if card is not in cards, return False
return False
self.cards.sort()
if card.name == 'Phoenix':
self.phoenix_flag = False
self.size = self.size - 1
self._set_type_and_power()
self._set_points()
return True
def _typecheck_pass(self):
""" Checks whether Cards is of type pass. """
if len(self.cards)==0:
self.type = 'pass'
self.power = 0
def _typecheck_solo(self):
""" Checks whether Cards is of type solo. """
if len(self.cards)==1:
self.type = 'solo'
self.power = self.cards[0].power
def _typecheck_pair(self):
""" Checks whether Cards is of type pair. """
if len(self.cards)==2:
# regular pair
if self.cards[0].power == self.cards[1].power:
self.type = 'pair'
self.power = self.cards[0].power
return
# phoenix pair
elif (self.phoenix_flag and
not (self.cards[1].name == 'Dragon' or
self.cards[1].name == 'Dog')):
self.type = 'pair'
self.power = self.cards[1].power
def _typecheck_triple(self):
""" Checks whether Cards is of type triple. """
if len(self.cards)==3:
# regular triple
if (self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power):
self.type = 'triple'
self.power = self.cards[0].power
# phoenix triple
elif self.phoenix_flag and self.cards[1].power == self.cards[2].power:
self.type = 'triple'
self.power = self.cards[1].power
def _typecheck_four_bomb(self):
""" Checks whether Cards is of type four bomb. """
if (len(self.cards)==4 and self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power and
self.cards[2].power == self.cards[3].power):
self.type = 'four_bomb'
self.power = 50 + self.cards[0].power
def _typecheck_full_straight(self):
""" Checks whether Cards is of type full house or straight. """
self._typecheck_full()
self._typecheck_straight()
def _typecheck_full(self):
""" Checks whether Cards is of type full house. """
if len(self.cards)==5:
# regular full house with triple higher than pair
if (self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[0].power
# regular full house with pair higher than triple
elif (self.cards[0].power == self.cards[1].power and
self.cards[2].power == self.cards[3].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[2].power
# phoenix full house with phoenix triple
elif (self.phoenix_flag and
self.cards[1].power == self.cards[2].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[3].power
# phoenix full house with phoenix pair
elif self.phoenix_flag:
if (self.cards[1].power == self.cards[2].power and
self.cards[2].power == self.cards[3].power):
self.type = 'full'
self.power = self.cards[1].power
elif (self.cards[2].power == self.cards[3].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[2].power
def _typecheck_straight(self):
"""
Checks whether Cards is of type straight.
Can be a straight with regular cards, straight with Phoenix,
or straight bomb.
"""
self._typecheck_regular_straight()
self._typecheck_phoenix_straight()
def _typecheck_regular_straight(self):
""" Checks whether Cards is of type straight (w/o Phoenix). """
if len(self.cards)>=5:
is_straight = True
is_flush = True
for i in range(len(self.cards)-1):
if self.cards[i].power + 1 == self.cards[i+1].power:
if self.cards[i].suit == self.cards[i+1].suit:
pass
else:
is_flush = False
else:
is_straight = False
break
# if it is a straight and all suits are equal, it is a bomb
if is_straight and is_flush:
self.type = 'straight_bomb'
self.power = 100 + self.cards[-1].power
return
if is_straight:
self.type = 'straight'
self.power = self.cards[-1].power
def _typecheck_phoenix_straight(self):
""" Checks whether Cards is of type straight (with Phoenix). """
if len(self.cards)>=5 and self.phoenix_flag:
phoenix_used = False
phoenix_idx = -1
is_straight = True
for i in range(len(self.cards)-2):
if self.cards[i+1].power+1 == self.cards[i+2].power:
pass
elif (not(phoenix_used) and
(self.cards[i+1].power+2 == self.cards[i+2].power)):
phoenix_used = True
phoenix_idx = i+1
else:
is_straight = False
if is_straight:
self.type = 'straight'
# phoenix is last card of straight: power is last card + 1
if not(phoenix_used) or (phoenix_idx == len(self.cards)):
self.power = self.cards[-1].power+1
# phoenix is not last card of straight: power is last card
else:
self.power = self.cards[-1].power
def _typecheck_pair_seq(self):
""" Checks whether Cards is of type pair sequence. """
self._typecheck_regular_pair_seq()
self._typecheck_phoenix_pair_seq()
def _typecheck_regular_pair_seq(self):
""" Checks whether Cards is of type pair_seq (w/o Phoenix). """
if (len(self.cards)>=4 and len(self.cards)%2==0 and
not(any((crd.name == 'Dog' or crd.name == 'Dragon')
for crd in self.cards))):
is_pair_regular = True
for i in range(len(self.cards)-1):
if i%2 == 0 and self.cards[i].power == self.cards[i+1].power:
pass
elif i%2 == 1 and self.cards[i].power+1 == self.cards[i+1].power:
pass
else:
is_pair_regular = False
break
if is_pair_regular:
self.type = 'pair_seq'
self.power = self.cards[-1].power
def _typecheck_phoenix_pair_seq(self):
"""
Checks whether Cards is of type pair_seq (with Phoenix).
For a phoenix pair sequence, the algorithm is quite complicated,
because there are a lot of possible combinations.
Phoenix can be used in the first pair, in any middle pair, or in
the last pair.
Depending on where the Phoenix is used, either all equal or all
unequal indices are increments of 1 in a valid pair sequence.
If the Phoenix is used as a replacement for an equal indexed card,
then the logic turns around ("toggles") and all subsequent cards
need to be increments of the previous card in unequal indices.
"""
# return if pair sequence is not possible
if not (len(self.cards)>=4 and len(self.cards)%2==0 and
not(any((crd.name == 'Dog' or crd.name == 'Dragon')
for crd in self.cards)) and
self.phoenix_flag):
return
# return if card sequence (excluding Phoenix) does not increase by 1
unique_power = sorted({crd.power for crd in self.cards})
unique_power.pop(0) # remove phoenix from set
if not (all(x+1==y for x, y in zip(unique_power, unique_power[1:])
) and len(unique_power)>1):
return
# continue and prepare local variables if preconditions are met
phoenix_used = False
is_pair_equal = True
is_pair_unequal = True
# check for phoenix use in equal card list index
toggle = 1
antitoggle = 0
for i in range(1,len(self.cards)-1):
if (i%2 == toggle and
self.cards[i].power == self.cards[i+1].power):
pass
elif (i%2 == antitoggle and
self.cards[i].power + 1 == self.cards[i+1].power):
if i+1 >= len(self.cards)-1 and not phoenix_used:
# phoenix used as the highest pair of sequence
phoenix_used = True
elif phoenix_used: # phoenix cannot be used twice
is_pair_unequal = False
break
else:
# if phoenix is used in the middle of the sequence,
# change matching behavior of toggle/antitoggle
# so that i%2 matches next element
phoenix_used = True
toggle = 0
antitoggle = 1
# check for phoenix use in equal card list index
if not is_pair_unequal:
phoenix_used = False
for i in range(1,len(self.cards)-1):
if (i%2 == 0 and
self.cards[i].power == self.cards[i+1].power):
pass
elif (i%2 == 1 and
self.cards[i].power+1 == self.cards[i+1].power):
# check if phoenix is first card in sequence
if i == 1:
phoenix_used = True
elif phoenix_used: # phoenix cannot be used twice
is_pair_equal = False
break
else:
phoenix_used = True
if is_pair_unequal or is_pair_equal:
self.type = 'pair_seq'
self.power = self.cards[-1].power
def _get_available_solo(self):
""" Returns a list with all possible solo combinations. """
solo = list()
for i in range(len(self.cards)):
solo_list = self.cards[i]
solo_cards = Cards([solo_list])
if solo_cards.type == 'solo':
solo.append(solo_cards)
return solo
def _get_available_pair(self):
""" Returns a list with all possible pair combinations. """
pair = list()
for i in range(len(self.cards)-1):
# regular pairs
if self.cards[i].power == self.cards[i+1].power:
pair_list = [self.cards[i], self.cards[i+1]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
# phoenix pairs
if self.phoenix_flag and self.cards[i+1].suit != 'Special':
pair_list = [self.cards[0], self.cards[i+1]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
# multiple pairs
try:
if self.cards[i].power == self.cards[i+2].power:
pair_list = [self.cards[i], self.cards[i+2]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
if self.cards[i].power == self.cards[i+3].power:
pair_list = [self.cards[i], self.cards[i+3]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
except IndexError:
pass
return pair
def _get_available_triple(self):
""" Returns a list with all possible triple combinations. """
def check_and_append_triple(cards_list, triple):
triple_cards = Cards(cards_list)
if triple_cards.type == 'triple':
triple.append(triple_cards)
return triple
triple = list()
for i in range(len(self.cards)-2):
# regular triple
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+2].power):
triple_candidate = [self.cards[i], self.cards[i+1],
self.cards[i+2]]
triple = check_and_append_triple(triple_candidate, triple)
# phoenix triple
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+2].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+2]]
triple = check_and_append_triple(triple_candidate, triple)
# multiple triples
try:
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+3].power):
triple_candidate = [self.cards[i], self.cards[i+1],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.cards[i].power == self.cards[i+2].power and
self.cards[i+2].power == self.cards[i+3].power):
triple_candidate = [self.cards[i], self.cards[i+2],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+3].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+4].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+4]]
triple = check_and_append_triple(triple_candidate, triple)
except IndexError:
pass
return triple
def _get_available_four_bomb(self):
""" Returns a list with all possible four bomb combinations. """
four_bomb = list()
for i in range(len(self.cards)-3):
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+2].power and
self.cards[i+2].power == self.cards[i+3].power):
four_list = [self.cards[i], self.cards[i+1],
self.cards[i+2], self.cards[i+3]]
four_cards = Cards(four_list)
if four_cards.type == 'four_bomb':
four_bomb.append(four_cards)
return four_bomb
def _get_available_full(self):
""" Returns a list with all possible full house combinations. """
full = list()
pair = self._get_available_pair()
triple = self._get_available_triple()
for i in pair:
for j in triple:
if i.power != j.power:
full_list = list()
full_list.extend(i.cards)
full_list.extend(j.cards)
full_cards = Cards(full_list)
if full_cards.type == 'full':
full.append(full_cards)
return full
def _get_available_straight(self):
""" Returns a list with all possible straight combinations. """
def check_candidate(candidate_list, straight, straight_bomb):
if len(candidate_list) > 4:
straight_cards = Cards(candidate_list)
if straight_cards.type == 'straight':
straight.append(straight_cards)
elif straight_cards.type == 'straight_bomb':
straight_bomb.append(straight_cards)
else:
pass
return straight, straight_bomb
straight = list()
straight_bomb = list()
for i in range(len(self.cards)-4):
candidate_list = list()
phoenix_available = self.phoenix_flag
for j in range(i,len(self.cards)):
# add first card of possible straight
if len(candidate_list)==0:
candidate_list.append(self.cards[j])
if self.cards[j].name == 'Phoenix':
phoenix_available = False
# no check if Phoenix is last entry
elif candidate_list[-1].name == 'Phoenix':
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
# add subsequent cards
elif candidate_list[-1].power+1 == self.cards[j].power:
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
# skip pairs
elif candidate_list[-1].power == self.cards[j].power:
pass
# use phoenix mid straight if available
elif (phoenix_available and
candidate_list[-1].power+2 == self.cards[j].power):
candidate_list.append(self.cards[0])
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
phoenix_available = False
# use phoenix as first/last card if available
elif phoenix_available:
candidate_list.append(self.cards[0])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
phoenix_available = False
# no straight possible
else:
break
return straight, straight_bomb
def _get_available_pair_seq(self):
""" Returns a list with all possible pair sequence combinations. """
pair_seq = list()
pair = self._get_available_pair()
for i in range(len(pair)-1):
candidate_list = list()
for j in range(i,len(pair)):
# add first element to candidate list
if len(candidate_list) == 0:
candidate_list.extend(pair[j].cards)
# add subsequent pairs
elif candidate_list[-1].power+1 == pair[j].power:
candidate_list.extend(pair[j].cards)
if len(candidate_list) > 1:
pair_seq_cards = Cards(candidate_list)
if pair_seq_cards.type == 'pair_seq':
pair_seq.append(pair_seq_cards)
# skip double pairs
elif candidate_list[-1].power == pair[j].power:
pass
# break if no pair_seq possible
else:
break
return pair_seq
def __add__(self, card_list_to_add):
this_card_list = self.cards
this_card_list.append(card_list_to_add)
new_cards = Cards(card_list=this_card_list)
return new_cards
def __sub__(self, cards):
this_card_list = self.cards
for crd in cards:
this_card_list.remove(crd)
new_cards = Cards(card_list=this_card_list)
return new_cards
def __ge__(self, other):
# equal types or bombs, compare power
if ((self.type == other.type and
self.size == other.size) or
self.type in BOMBS or
other.type in BOMBS):
return self.power >= other.power
# unequal types, return False (opt: raise error)
else:
return False
def __le__(self, other):
# equal types or bombs, compare power
if ((self.type == other.type and
self.size == other.size) or
self.type in BOMBS or
other.type in BOMBS):
return self.power <= other.power
# unequal types, return False (opt: raise error)
else:
return False
def __gt__(self, other):
# equal types or bombs, compare power
if ((self.type == other.type and
self.size == other.size) or
self.type in BOMBS or
other.type in BOMBS):
return self.power > other.power
# unequal types, return False (opt: raise error)
else:
return False
def __lt__(self, other):
# equal types or bombs, compare power
if ((self.type == other.type and
self.size == other.size) or
self.type in BOMBS or
other.type in BOMBS):
return self.power < other.power
# unequal types, return False (opt: raise error)
else:
return False
def __eq__(self, other):
return (self.type == other.type and
self.size == other.size and
self.power == other.power)
def __ne__(self, other):
return (self.type != other.type and
self.size != other.size and
self.power != other.power)
def __repr__(self):
card_str = ''
for crd in self.cards:
card_str = card_str + str(crd.name) + ' ' + str(crd.suit) + ', '
return str({'type': self.type,
'size': self.size,
'cards': card_str})
| 2,863 | 0 | 303 |
94ec6bb8a553e341c64bb05758629ca5d89785fd | 1,397 | py | Python | misc/python/materialize/feature_benchmark/executor.py | bobbyiliev/materialize | 44e3bcae151179075232ad436ae72f5883361fd1 | [
"MIT"
] | 1 | 2022-03-19T21:08:19.000Z | 2022-03-19T21:08:19.000Z | misc/python/materialize/feature_benchmark/executor.py | bobbyiliev/materialize | 44e3bcae151179075232ad436ae72f5883361fd1 | [
"MIT"
] | 203 | 2022-01-04T00:16:23.000Z | 2022-03-30T17:34:01.000Z | misc/python/materialize/feature_benchmark/executor.py | guswynn/materialize | f433173ed71f511d91311769ec58c2d427dd6c3b | [
"MIT"
] | null | null | null | # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from typing import Any, Callable, List
from materialize.mzcompose import Composition
| 28.510204 | 70 | 0.621331 | # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from typing import Any, Callable, List
from materialize.mzcompose import Composition
class Executor:
def Lambda(self, _lambda: Callable[["Executor"], float]) -> float:
return _lambda(self)
class Docker(Executor):
def __init__(
self,
composition: Composition,
seed: int,
) -> None:
self._composition = composition
self._seed = seed
def RestartMz(self) -> None:
self._composition.kill("materialized")
self._composition.up("materialized")
return None
def Td(self, input: str) -> Any:
return self._composition.exec(
"testdrive",
"--no-reset",
f"--seed={self._seed}",
"--initial-backoff=10ms",
"--backoff-factor=0",
stdin=input,
capture=True,
).stdout
def Kgen(self, topic: str, args: List[str]) -> Any:
return self._composition.run(
"kgen", f"--topic=testdrive-{topic}-{self._seed}", *args
)
| 757 | -4 | 179 |
d046624d689e6a494b82bd433005a9d13e3ee823 | 1,456 | py | Python | Table.py | emulhall/RSA | cef434464c101002a3ed5a19f7f7a97d35500338 | [
"Python-2.0"
] | null | null | null | Table.py | emulhall/RSA | cef434464c101002a3ed5a19f7f7a97d35500338 | [
"Python-2.0"
] | null | null | null | Table.py | emulhall/RSA | cef434464c101002a3ed5a19f7f7a97d35500338 | [
"Python-2.0"
] | null | null | null | import itertools
import Utterance
import PossibleWorld
#this table contains all the possible worlds
#this adds up all of the possible world probabilities in the rows and columns of a table
#re-adds up all of the columns and rows so that normalization is accurate
#important function for normalizing so that we can look at probability distributions
| 28.54902 | 89 | 0.744505 | import itertools
import Utterance
import PossibleWorld
#this table contains all the possible worlds
class Table:
def __init__(self, possible_worlds, U, W):
self.size=len(possible_worlds)
self.possible_worlds=possible_worlds
self.column_sums=[0]*self.size
self.row_sums=[0]*self.size
self.utterances=U
self.worlds=W
self.fill_sums(possible_worlds)
#this adds up all of the possible world probabilities in the rows and columns of a table
def fill_sums(self, possible_worlds):
for world in possible_worlds:
row=world.row
column=world.column
self.column_sums[column]+=world.probability
self.row_sums[row]+=world.probability
#re-adds up all of the columns and rows so that normalization is accurate
def re_sum(self):
self.column_sums=[0]*self.size
self.row_sums=[0]*self.size
self.fill_sums(self.possible_worlds)
#important function for normalizing so that we can look at probability distributions
def normalize(self, column):
self.re_sum()
for world in self.possible_worlds:
if column:
n=world.column
if world.probability==0 or self.column_sums[n]==0:
pass
else:
world.probability=(world.probability)/(self.column_sums[n])
else:
n=world.row
if world.probability==0 or self.row_sums[n]==0:
pass
else:
world.probability=(world.probability)/(self.row_sums[n])
def copy(self):
output=Table(self.possible_worlds, self.utterances, self.worlds)
return output
| 971 | -9 | 138 |
1f86c9dc378f8854106823557cba547f2ca36664 | 5,013 | py | Python | dataloaders/datasets/pascal.py | mzhaoshuai/RMI | 10a40cdbeb58bdd1bd7125fde73b48b12f9452c7 | [
"MIT"
] | 242 | 2019-10-25T08:06:41.000Z | 2022-03-11T08:44:17.000Z | dataloaders/datasets/pascal.py | mzhaoshuai/RMI | 10a40cdbeb58bdd1bd7125fde73b48b12f9452c7 | [
"MIT"
] | 32 | 2019-11-10T15:34:54.000Z | 2022-03-16T16:17:08.000Z | dataloaders/datasets/pascal.py | mzhaoshuai/RMI | 10a40cdbeb58bdd1bd7125fde73b48b12f9452c7 | [
"MIT"
] | 39 | 2019-10-29T02:55:55.000Z | 2022-02-25T07:15:22.000Z | # coding=utf-8
"""
dataloader for PASCAL VOC 2012 dataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset
from RMI.dataloaders import custom_transforms as tr
# PASCAL VOC 2012 dataset statistics
_PASCAL_R_MEAN = 116
_PASCAL_G_MEAN = 113
_PASCAL_B_MEAN = 104
_PASCAL_R_STD = 69.58
_PASCAL_G_STD = 68.68
_PASCAL_B_STD = 72.67
class VOCSegmentation(Dataset):
"""PASCAL VOC 2012 dataset
"""
NUM_CLASSES = 21
def __init__(self,
data_dir,
crop_size=513,
split='train',
min_scale=0.5,
max_scale=2.0,
step_size=0.25):
"""
Args:
data_dir: path to VOC dataset directory.
crop_size: the crop size.
split: ["trainaug", "train", "trainval", "val", "test"].
"""
super().__init__()
# dataset dir
self.data_dir = data_dir
self.iamge_dir = os.path.join(self.data_dir, 'JPEGImages')
self.label_dir = os.path.join(self.data_dir, 'SegmentationClassAug')
assert split in ["trainaug", "train", "trainval", "val", "test"]
self.split = split
# txt lists of images
list_file_dir = os.path.join(self.data_dir, 'ImageSets/Segmentation')
# crop size and scales
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.step_size = step_size
# dataset info
self.mean = (_PASCAL_R_MEAN, _PASCAL_G_MEAN, _PASCAL_B_MEAN)
self.std = (_PASCAL_R_STD, _PASCAL_G_STD, _PASCAL_B_STD)
self.ignore_label = 255
self.image_ids = []
self.image_lists = []
self.label_lists = []
# read the dataset file
with open(os.path.join(os.path.join(list_file_dir, self.split + '.txt')), "r") as f:
lines = f.read().splitlines()
for line in lines:
image_filename = os.path.join(self.iamge_dir, line + ".jpg")
label_filename = os.path.join(self.label_dir, line + ".png")
assert os.path.isfile(image_filename)
if 'test' not in self.split:
assert os.path.isfile(label_filename)
self.image_ids.append(line)
self.image_lists.append(image_filename)
self.label_lists.append(label_filename)
assert (len(self.image_lists) == len(self.label_lists))
# print the dataset info
print('Number of image_lists in {}: {:d}'.format(split, len(self.image_lists)))
def __len__(self):
"""len() method"""
return len(self.image_lists)
def __getitem__(self, index):
"""index method"""
_image, _label = self._make_img_gt_point_pair(index)
# different transforms for different splits
if 'train' in self.split:
sample = {'image': _image, 'label': _label}
return self.transform_train(sample)
elif 'val' in self.split:
sample = {'image': _image, 'label': _label}
return self.transform_val(sample)
elif 'test' in self.split:
sample = {'image': _image}
return self.transform_test(sample)
else:
raise NotImplementedError
def _make_img_gt_point_pair(self, index):
"""open the image and the gorund truth"""
_image = Image.open(self.image_lists[index]).convert('RGB')
if 'test' not in self.split:
_label = Image.open(self.label_lists[index])
else:
_label = None
return _image, _label
def transform_val(self, sample):
"""transform for validation"""
composed_transforms = transforms.Compose([
tr.Normalize(mean=self.mean, std=self.std),
tr.ToTensor()])
return composed_transforms(sample)
def transform_test(self, sample):
"""transform for validation"""
composed_transforms = transforms.Compose([
tr.Normalize_Image(mean=self.mean, std=self.std),
tr.ToTensor_Image()])
return composed_transforms(sample)
if __name__ == '__main__':
# data dir
data_dir = os.path.join("/home/zhaoshuai/dataset/VOCdevkit/VOC2012")
print(data_dir)
dataset = VOCSegmentation(data_dir)
#print(dataset.image_lists)
image_mean = np.array([0.0, 0.0, 0.0])
cov_sum = np.array([0.0, 0.0, 0.0])
pixel_nums = 0.0
# mean
for filename in dataset.image_lists:
image = Image.open(filename).convert('RGB')
image = np.array(image).astype(np.float32)
pixel_nums += image.shape[0] * image.shape[1]
image_mean += np.sum(image, axis=(0, 1))
image_mean = image_mean / pixel_nums
print(image_mean)
# covariance
for filename in dataset.image_lists:
image = Image.open(filename).convert('RGB')
image = np.array(image).astype(np.float32)
cov_sum += np.sum(np.square(image - image_mean), axis=(0, 1))
image_cov = np.sqrt(cov_sum / (pixel_nums - 1))
print(image_cov)
| 28.322034 | 86 | 0.712946 | # coding=utf-8
"""
dataloader for PASCAL VOC 2012 dataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset
from RMI.dataloaders import custom_transforms as tr
# PASCAL VOC 2012 dataset statistics
_PASCAL_R_MEAN = 116
_PASCAL_G_MEAN = 113
_PASCAL_B_MEAN = 104
_PASCAL_R_STD = 69.58
_PASCAL_G_STD = 68.68
_PASCAL_B_STD = 72.67
class VOCSegmentation(Dataset):
"""PASCAL VOC 2012 dataset
"""
NUM_CLASSES = 21
def __init__(self,
data_dir,
crop_size=513,
split='train',
min_scale=0.5,
max_scale=2.0,
step_size=0.25):
"""
Args:
data_dir: path to VOC dataset directory.
crop_size: the crop size.
split: ["trainaug", "train", "trainval", "val", "test"].
"""
super().__init__()
# dataset dir
self.data_dir = data_dir
self.iamge_dir = os.path.join(self.data_dir, 'JPEGImages')
self.label_dir = os.path.join(self.data_dir, 'SegmentationClassAug')
assert split in ["trainaug", "train", "trainval", "val", "test"]
self.split = split
# txt lists of images
list_file_dir = os.path.join(self.data_dir, 'ImageSets/Segmentation')
# crop size and scales
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.step_size = step_size
# dataset info
self.mean = (_PASCAL_R_MEAN, _PASCAL_G_MEAN, _PASCAL_B_MEAN)
self.std = (_PASCAL_R_STD, _PASCAL_G_STD, _PASCAL_B_STD)
self.ignore_label = 255
self.image_ids = []
self.image_lists = []
self.label_lists = []
# read the dataset file
with open(os.path.join(os.path.join(list_file_dir, self.split + '.txt')), "r") as f:
lines = f.read().splitlines()
for line in lines:
image_filename = os.path.join(self.iamge_dir, line + ".jpg")
label_filename = os.path.join(self.label_dir, line + ".png")
assert os.path.isfile(image_filename)
if 'test' not in self.split:
assert os.path.isfile(label_filename)
self.image_ids.append(line)
self.image_lists.append(image_filename)
self.label_lists.append(label_filename)
assert (len(self.image_lists) == len(self.label_lists))
# print the dataset info
print('Number of image_lists in {}: {:d}'.format(split, len(self.image_lists)))
def __len__(self):
"""len() method"""
return len(self.image_lists)
def __getitem__(self, index):
"""index method"""
_image, _label = self._make_img_gt_point_pair(index)
# different transforms for different splits
if 'train' in self.split:
sample = {'image': _image, 'label': _label}
return self.transform_train(sample)
elif 'val' in self.split:
sample = {'image': _image, 'label': _label}
return self.transform_val(sample)
elif 'test' in self.split:
sample = {'image': _image}
return self.transform_test(sample)
else:
raise NotImplementedError
def _make_img_gt_point_pair(self, index):
"""open the image and the gorund truth"""
_image = Image.open(self.image_lists[index]).convert('RGB')
if 'test' not in self.split:
_label = Image.open(self.label_lists[index])
else:
_label = None
return _image, _label
def transform_train(self, sample):
composed_transforms = transforms.Compose([
tr.RandomRescale(self.min_scale, self.max_scale, self.step_size),
tr.RandomPadOrCrop(crop_height=self.crop_size, crop_width=self.crop_size,
ignore_label=self.ignore_label, mean=self.mean),
tr.RandomHorizontalFlip(),
tr.Normalize(mean=self.mean, std=self.std),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
"""transform for validation"""
composed_transforms = transforms.Compose([
tr.Normalize(mean=self.mean, std=self.std),
tr.ToTensor()])
return composed_transforms(sample)
def transform_test(self, sample):
"""transform for validation"""
composed_transforms = transforms.Compose([
tr.Normalize_Image(mean=self.mean, std=self.std),
tr.ToTensor_Image()])
return composed_transforms(sample)
def __str__(self):
return 'VOC2012(split=' + str(self.split) + ')'
if __name__ == '__main__':
# data dir
data_dir = os.path.join("/home/zhaoshuai/dataset/VOCdevkit/VOC2012")
print(data_dir)
dataset = VOCSegmentation(data_dir)
#print(dataset.image_lists)
image_mean = np.array([0.0, 0.0, 0.0])
cov_sum = np.array([0.0, 0.0, 0.0])
pixel_nums = 0.0
# mean
for filename in dataset.image_lists:
image = Image.open(filename).convert('RGB')
image = np.array(image).astype(np.float32)
pixel_nums += image.shape[0] * image.shape[1]
image_mean += np.sum(image, axis=(0, 1))
image_mean = image_mean / pixel_nums
print(image_mean)
# covariance
for filename in dataset.image_lists:
image = Image.open(filename).convert('RGB')
image = np.array(image).astype(np.float32)
cov_sum += np.sum(np.square(image - image_mean), axis=(0, 1))
image_cov = np.sqrt(cov_sum / (pixel_nums - 1))
print(image_cov)
| 442 | 0 | 48 |
7b468c16e48409845d92e2a11b5db7befc6b212c | 2,057 | py | Python | source/dataload.py | eda-ricercatore/dal-bhat-cv | e9b05afa11dbb749afd5fc957e829290e04b7331 | [
"MIT"
] | 1 | 2019-02-06T02:20:35.000Z | 2019-02-06T02:20:35.000Z | source/dataload.py | eda-ricercatore/dal-bhat-cv | e9b05afa11dbb749afd5fc957e829290e04b7331 | [
"MIT"
] | null | null | null | source/dataload.py | eda-ricercatore/dal-bhat-cv | e9b05afa11dbb749afd5fc957e829290e04b7331 | [
"MIT"
] | null | null | null | '''
Dataloader.py
'''
import cv2
import sys,os
import xml.etree.ElementTree as ET
import numpy as np
print(os.listdir())
'''
Gets the coordinates of the bounding box of the object
returns the bounding box
'''
'''
Returns the one hot encoded label list as a numpy array
'''
'''
This is the function that should be called to extract the data
Returns bounding box coordinates, labels, and actual images of all
data points in that order
'''
if __name__ == '__main__':
proc()
| 28.971831 | 67 | 0.619835 | '''
Dataloader.py
'''
import cv2
import sys,os
import xml.etree.ElementTree as ET
import numpy as np
print(os.listdir())
'''
Gets the coordinates of the bounding box of the object
returns the bounding box
'''
def get_coords(path):
#gets the XML source
root = ET.parse(path).getroot()
#different required data params for bbox
bbt = ['xmax','xmin','ymax','ymin']
bbx = []
#extract all the data
for i in bbt:
val = root.find('object/bndbox/'+i)
bbx.append(int(val.text))
return bbx
'''
Returns the one hot encoded label list as a numpy array
'''
def get_label(cat,lbl_list):
#extract text based label from folder name
clean_cat =''.join(list(filter(lambda x: x.isalpha(),cat)))
#create onehot 0s array
one_hot = np.zeros(len(lbl_list))
#set index to 1
one_hot[lbl_list.index(clean_cat)] = 1
#return one hot array
return one_hot
'''
This is the function that should be called to extract the data
Returns bounding box coordinates, labels, and actual images of all
data points in that order
'''
def proc():
#cat stores the possible text based categories
cat = []
#all folders
dirs = os.listdir()
#iterate through all folders and extract text labels
for i in os.listdir():
cat.append(''.join(list(filter(lambda x: x.isalpha(),i))))
#remove redundancies and unnecessary files
cat = list(set(cat))
cat.remove('dataloadpy')
dirs.remove('dataload.py')
#storage images, labels, bbs lists
images = []
labels = []
bbs = []
#for every folder in directory
for folder in dirs:
files = os.listdir(folder)
files = [i[:-4] for i in files if i.endswith('.xml')]
for file in files:
images.append(cv2.imread(folder+'/'+file+'.jpg'))
labels.append(get_label(folder,cat))
bbs.append(get_coords(folder+'/'+file+'.xml'))
return bbs,labels,imgs
if __name__ == '__main__':
proc()
| 1,487 | 0 | 69 |
12161282f00bf925135425e32f4e1581147a1bd1 | 643 | py | Python | dictionary/migrations/0009_auto_20160410_2232.py | nirvaris/nirvaris-dictionary | f9ccf0376c9581c25fd0be8b24167d9e17dee133 | [
"MIT"
] | 3 | 2016-03-06T15:41:18.000Z | 2021-04-02T04:17:31.000Z | dictionary/migrations/0009_auto_20160410_2232.py | nirvaris/nirvaris-dictionary | f9ccf0376c9581c25fd0be8b24167d9e17dee133 | [
"MIT"
] | null | null | null | dictionary/migrations/0009_auto_20160410_2232.py | nirvaris/nirvaris-dictionary | f9ccf0376c9581c25fd0be8b24167d9e17dee133 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-10 22:32
from __future__ import unicode_literals
from django.db import migrations, models
| 25.72 | 123 | 0.628305 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-10 22:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0008_auto_20160410_2229'),
]
operations = [
migrations.RemoveField(
model_name='wordcontent',
name='references',
),
migrations.AddField(
model_name='wordcontent',
name='references',
field=models.ManyToManyField(null=True, related_name='words_referenced', to='dictionary.WordContentReference'),
),
]
| 0 | 467 | 23 |
8d8a913a80b603df3b4322fab1851f5e978bf7dd | 594 | py | Python | Diena_1_4_thonny/d2_u1_d10.py | edzya/Python_RTU_08_20 | d2921d998c611c18328dd523daf976a27ce858c1 | [
"MIT"
] | 8 | 2020-08-31T16:10:54.000Z | 2021-11-24T06:37:37.000Z | Diena_1_4_thonny/d2_u1_d10.py | edzya/Python_RTU_08_20 | d2921d998c611c18328dd523daf976a27ce858c1 | [
"MIT"
] | 8 | 2021-06-08T22:30:29.000Z | 2022-03-12T00:48:55.000Z | Diena_1_4_thonny/d2_u1_d10.py | edzya/Python_RTU_08_20 | d2921d998c611c18328dd523daf976a27ce858c1 | [
"MIT"
] | 12 | 2020-09-28T17:06:52.000Z | 2022-02-17T12:12:46.000Z | # # 1 uzdevums
name = input("Enter your name: ")
age = int(input(name + ", how old are you?"))
import datetime
currentYear = datetime.datetime.now().year
print("You will be 100 in", 100-age, "years and that will be year", currentYear+(100-age))
# name = input("What is your name?")
# age = input (f"What is your age {name}?")
# age_till_100 = 100 - int(age)
#
# import datetime
# current_year = datetime.datetime.now().year
# # current_year = 2020
#
# year_with_100 = current_year + age_till_100
# print(f"{name}, after {age_till_100} years in {year_with_100} you will be 100 years old!") | 33 | 92 | 0.688552 | # # 1 uzdevums
name = input("Enter your name: ")
age = int(input(name + ", how old are you?"))
import datetime
currentYear = datetime.datetime.now().year
print("You will be 100 in", 100-age, "years and that will be year", currentYear+(100-age))
# name = input("What is your name?")
# age = input (f"What is your age {name}?")
# age_till_100 = 100 - int(age)
#
# import datetime
# current_year = datetime.datetime.now().year
# # current_year = 2020
#
# year_with_100 = current_year + age_till_100
# print(f"{name}, after {age_till_100} years in {year_with_100} you will be 100 years old!") | 0 | 0 | 0 |
915d4d7fd550cf5f1822021026cb06a518cfce2b | 5,350 | py | Python | code/lecture6-styletransfer.py | pengxj/DeepLearningCourse | 107b50fafd873e58a302a81f9f0107a0e7cf5e09 | [
"Apache-2.0"
] | 1 | 2022-03-06T06:46:07.000Z | 2022-03-06T06:46:07.000Z | code/lecture6-styletransfer.py | pengxj/DeepLearningCourse | 107b50fafd873e58a302a81f9f0107a0e7cf5e09 | [
"Apache-2.0"
] | null | null | null | code/lecture6-styletransfer.py | pengxj/DeepLearningCourse | 107b50fafd873e58a302a81f9f0107a0e7cf5e09 | [
"Apache-2.0"
] | 1 | 2022-03-06T02:20:32.000Z | 2022-03-06T02:20:32.000Z | # coding: utf-8
import time
import torch
import torch.nn.functional as F
import torchvision
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import sys
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 均已测试
print(device, torch.__version__)
# 读取内容图像和样式图像
content_img = Image.open('data/rainier.jpg')
plt.imshow(content_img);
plt.show()
style_img = Image.open('data/autumn_oak.jpg')
plt.imshow(style_img);
plt.show()
# 预处理和后处理图像
rgb_mean = np.array([0.485, 0.456, 0.406])
rgb_std = np.array([0.229, 0.224, 0.225])
# 抽取特征
pretrained_net = torchvision.models.vgg19(pretrained=True, progress=True)
style_layers, content_layers = [0, 5, 10, 19, 28], [25]
net_list = []
for i in range(max(content_layers + style_layers) + 1):
net_list.append(pretrained_net.features[i])
net = torch.nn.Sequential(*net_list)
# 定义损失函数
# 内容损失
# 样式损失
# 总变差损失
# 损失函数
content_weight, style_weight, tv_weight = 1, 1e3, 10
# #创建和初始化合成图像
# 训练
image_shape = (150, 225)
# image_shape = (50, 75)
net = net.to(device)
content_X, contents_Y = get_contents(image_shape, device)
style_X, styles_Y = get_styles(image_shape, device)
output = train(content_X, contents_Y, styles_Y, device, 0.01, 500, 200)
plt.imshow(postprocess(output))
plt.show()
# image_shape = (300, 450)
# _, content_Y = get_contents(image_shape, device)
# _, style_Y = get_styles(image_shape, device)
# X = preprocess(postprocess(output), image_shape).to(device)
# big_output = train(X, content_Y, style_Y, device, 0.01, 500, 200)
# d2l.set_figsize((7, 5))
# d2l.plt.imshow(postprocess(big_output)); | 31.104651 | 85 | 0.671776 | # coding: utf-8
import time
import torch
import torch.nn.functional as F
import torchvision
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import sys
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 均已测试
print(device, torch.__version__)
# 读取内容图像和样式图像
content_img = Image.open('data/rainier.jpg')
plt.imshow(content_img);
plt.show()
style_img = Image.open('data/autumn_oak.jpg')
plt.imshow(style_img);
plt.show()
# 预处理和后处理图像
rgb_mean = np.array([0.485, 0.456, 0.406])
rgb_std = np.array([0.229, 0.224, 0.225])
def preprocess(PIL_img, image_shape):
process = torchvision.transforms.Compose([
torchvision.transforms.Resize(image_shape),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=rgb_mean, std=rgb_std)])
return process(PIL_img).unsqueeze(dim = 0) # (batch_size, 3, H, W)
def postprocess(img_tensor):
inv_normalize = torchvision.transforms.Normalize(
mean= -rgb_mean / rgb_std,
std= 1/rgb_std)
to_PIL_image = torchvision.transforms.ToPILImage()
return to_PIL_image(inv_normalize(img_tensor[0].cpu()).clamp(0, 1))
# 抽取特征
pretrained_net = torchvision.models.vgg19(pretrained=True, progress=True)
style_layers, content_layers = [0, 5, 10, 19, 28], [25]
net_list = []
for i in range(max(content_layers + style_layers) + 1):
net_list.append(pretrained_net.features[i])
net = torch.nn.Sequential(*net_list)
def extract_features(X, content_layers, style_layers):
contents = []
styles = []
for i in range(len(net)):
X = net[i](X)
if i in style_layers:
styles.append(X)
if i in content_layers:
contents.append(X)
return contents, styles
def get_contents(image_shape, device):
content_X = preprocess(content_img, image_shape).to(device)
contents_Y, _ = extract_features(content_X, content_layers, style_layers)
return content_X, contents_Y
def get_styles(image_shape, device):
style_X = preprocess(style_img, image_shape).to(device)
_, styles_Y = extract_features(style_X, content_layers, style_layers)
return style_X, styles_Y
# 定义损失函数
# 内容损失
def content_loss(Y_hat, Y):
return F.mse_loss(Y_hat, Y)
# 样式损失
def gram(X):
num_channels, n = X.shape[1], X.shape[2] * X.shape[3]
X = X.view(num_channels, n)
return torch.matmul(X, X.t()) / (num_channels * n)
def style_loss(Y_hat, gram_Y):
return F.mse_loss(gram(Y_hat), gram_Y)
# 总变差损失
def tv_loss(Y_hat):
return 0.5 * (F.l1_loss(Y_hat[:, :, 1:, :], Y_hat[:, :, :-1, :]) +
F.l1_loss(Y_hat[:, :, :, 1:], Y_hat[:, :, :, :-1]))
# 损失函数
content_weight, style_weight, tv_weight = 1, 1e3, 10
def compute_loss(X, contents_Y_hat, styles_Y_hat, contents_Y, styles_Y_gram):
# 分别计算内容损失、样式损失和总变差损失
contents_l = [content_loss(Y_hat, Y) * content_weight for Y_hat, Y in zip(
contents_Y_hat, contents_Y)]
styles_l = [style_loss(Y_hat, Y) * style_weight for Y_hat, Y in zip(
styles_Y_hat, styles_Y_gram)]
tv_l = tv_loss(X) * tv_weight
# 对所有损失求和
l = sum(styles_l) + sum(contents_l) + tv_l
return contents_l, styles_l, tv_l, l
# #创建和初始化合成图像
class GeneratedImage(torch.nn.Module):
def __init__(self, img_shape):
super(GeneratedImage, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(*img_shape))
def forward(self):
return self.weight
def get_inits(X, device, lr, styles_Y):
gen_img = GeneratedImage(X.shape).to(device)
gen_img.weight.data = X.data
optimizer = torch.optim.Adam(gen_img.parameters(), lr=lr)
styles_Y_gram = [gram(Y) for Y in styles_Y]
return gen_img(), styles_Y_gram, optimizer
# 训练
def train(X, contents_Y, styles_Y, device, lr, max_epochs, lr_decay_epoch):
print("training on ", device)
X, styles_Y_gram, optimizer = get_inits(X, device, lr, styles_Y)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_decay_epoch, gamma=0.1)
for i in range(max_epochs):
start = time.time()
contents_Y_hat, styles_Y_hat = extract_features(
X, content_layers, style_layers)
contents_l, styles_l, tv_l, l = compute_loss(
X, contents_Y_hat, styles_Y_hat, contents_Y, styles_Y_gram)
optimizer.zero_grad()
l.backward(retain_graph = True)
optimizer.step()
scheduler.step()
if i % 50 == 0 and i != 0:
print('epoch %3d, content loss %.2f, style loss %.2f, '
'TV loss %.2f, %.2f sec'
% (i, sum(contents_l).item(), sum(styles_l).item(), tv_l.item(),
time.time() - start))
return X.detach()
image_shape = (150, 225)
# image_shape = (50, 75)
net = net.to(device)
content_X, contents_Y = get_contents(image_shape, device)
style_X, styles_Y = get_styles(image_shape, device)
output = train(content_X, contents_Y, styles_Y, device, 0.01, 500, 200)
plt.imshow(postprocess(output))
plt.show()
# image_shape = (300, 450)
# _, content_Y = get_contents(image_shape, device)
# _, style_Y = get_styles(image_shape, device)
# X = preprocess(postprocess(output), image_shape).to(device)
# big_output = train(X, content_Y, style_Y, device, 0.01, 500, 200)
# d2l.set_figsize((7, 5))
# d2l.plt.imshow(postprocess(big_output)); | 3,432 | 17 | 347 |
0b03c062274666832f931322ecf1ed34060108de | 819 | py | Python | web_app/erbap/review_scrapper/__init__.py | onurtunali/erbap | 53c2598bf7025642c41935949b41e3f9e8b0f4f1 | [
"MIT"
] | null | null | null | web_app/erbap/review_scrapper/__init__.py | onurtunali/erbap | 53c2598bf7025642c41935949b41e3f9e8b0f4f1 | [
"MIT"
] | null | null | null | web_app/erbap/review_scrapper/__init__.py | onurtunali/erbap | 53c2598bf7025642c41935949b41e3f9e8b0f4f1 | [
"MIT"
] | null | null | null | """Scraping reviews and ratings from goodreads.com
DESCRIPTION:
Scraping the newest reviews from a given goodreads book url. Script works as follows:
1. Get the given url and open with webdriver of selenium.
2. Sort the reviews by newest.
3. Parse the returned web page using BeautifulSoup4 to isolate reviews.
4. Append the reviews to global mutable list object `reviews`.
5. Move to the next page until none is left.
DEPENDENCIES:
- selenium==3.11.0
- beautifulsoup4==4.10.0
- geckodriver-v0.30.0-linux64
SCARPING ELEMENTS MAPPING:
- rating stars `<span class=" staticStars notranslate" title="liked it">`
- 5: "it was amazing"
- 4: "really liked it"
- 3: "liked it"
- 2: "it was ok"
- 1: "did not like it"
"""
| 30.333333 | 89 | 0.64591 | """Scraping reviews and ratings from goodreads.com
DESCRIPTION:
Scraping the newest reviews from a given goodreads book url. Script works as follows:
1. Get the given url and open with webdriver of selenium.
2. Sort the reviews by newest.
3. Parse the returned web page using BeautifulSoup4 to isolate reviews.
4. Append the reviews to global mutable list object `reviews`.
5. Move to the next page until none is left.
DEPENDENCIES:
- selenium==3.11.0
- beautifulsoup4==4.10.0
- geckodriver-v0.30.0-linux64
SCARPING ELEMENTS MAPPING:
- rating stars `<span class=" staticStars notranslate" title="liked it">`
- 5: "it was amazing"
- 4: "really liked it"
- 3: "liked it"
- 2: "it was ok"
- 1: "did not like it"
"""
| 0 | 0 | 0 |
3b5dd7a6a0595ca80d545ae9fb1b889f5f586fb1 | 942 | py | Python | wingline/files/formats/_base.py | HappyEinara/wingline | 08d67ad9f58c869c385f954def6af5fa92e968ff | [
"MIT"
] | null | null | null | wingline/files/formats/_base.py | HappyEinara/wingline | 08d67ad9f58c869c385f954def6af5fa92e968ff | [
"MIT"
] | null | null | null | wingline/files/formats/_base.py | HappyEinara/wingline | 08d67ad9f58c869c385f954def6af5fa92e968ff | [
"MIT"
] | null | null | null | """Format base class"""
import abc
from typing import Any, BinaryIO, Iterable, Iterator
from wingline.types import Payload
class Format(metaclass=abc.ABCMeta):
"""Base class for a file format."""
mime_type: str
suffixes: Iterable[str] = set()
@property
def reader(self) -> Iterator[dict[str, Any]]:
"""Reader property"""
return self.read(self._handle)
def writer(self, payload: Payload) -> None:
"""Writer property"""
self.write(self._handle, payload)
@abc.abstractmethod
def read(self, handle: BinaryIO) -> Iterator[dict[str, Any]]:
"""Yields dicts from a file handle."""
raise NotImplementedError
@abc.abstractmethod
def write(self, handle: BinaryIO, payload: Payload) -> None:
"""Writes a payload dict to a file handle."""
raise NotImplementedError
| 23.55 | 65 | 0.643312 | """Format base class"""
import abc
from typing import Any, BinaryIO, Iterable, Iterator
from wingline.types import Payload
class Format(metaclass=abc.ABCMeta):
"""Base class for a file format."""
mime_type: str
suffixes: Iterable[str] = set()
def __init__(self, handle: BinaryIO):
self._handle = handle
@property
def reader(self) -> Iterator[dict[str, Any]]:
"""Reader property"""
return self.read(self._handle)
def writer(self, payload: Payload) -> None:
"""Writer property"""
self.write(self._handle, payload)
@abc.abstractmethod
def read(self, handle: BinaryIO) -> Iterator[dict[str, Any]]:
"""Yields dicts from a file handle."""
raise NotImplementedError
@abc.abstractmethod
def write(self, handle: BinaryIO, payload: Payload) -> None:
"""Writes a payload dict to a file handle."""
raise NotImplementedError
| 46 | 0 | 27 |
ca13cb6c1b3cfefc6b76abb76f1ea51ffe41b4f1 | 445 | py | Python | formal_iac/playbooks_parser/migrations/0004_package_package_version.py | m0nt3cr1st0/Formal_IaC | 02b39e58cea82d9eb83f08e576c5ecec4e04fb14 | [
"MIT"
] | 1 | 2020-06-22T11:46:00.000Z | 2020-06-22T11:46:00.000Z | formal_iac/playbooks_parser/migrations/0004_package_package_version.py | m0nt3cr1st0/Formal_IaC | 02b39e58cea82d9eb83f08e576c5ecec4e04fb14 | [
"MIT"
] | 1 | 2020-06-14T10:16:20.000Z | 2020-06-14T10:16:20.000Z | formal_iac/playbooks_parser/migrations/0004_package_package_version.py | m0nt3cr1st0/Formal_IaC | 02b39e58cea82d9eb83f08e576c5ecec4e04fb14 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-03-20 11:48
from django.db import migrations, models
| 22.25 | 63 | 0.61573 | # Generated by Django 3.0.2 on 2020-03-20 11:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playbooks_parser', '0003_playbook_playbook_content'),
]
operations = [
migrations.AddField(
model_name='package',
name='package_version',
field=models.FloatField(default=1.0),
preserve_default=False,
),
]
| 0 | 331 | 23 |
7b0c5950ae382d0eb6ac76a3acd2177eba6c670d | 4,173 | py | Python | api/v1/views/__init__.py | Davidodari/POLITICO-API | 479560f7accc3a6e46a8cec34c4f435ae9284138 | [
"MIT"
] | 1 | 2019-09-05T23:20:21.000Z | 2019-09-05T23:20:21.000Z | api/v1/views/__init__.py | Davidodari/POLITICO-API | 479560f7accc3a6e46a8cec34c4f435ae9284138 | [
"MIT"
] | 4 | 2019-02-12T10:06:12.000Z | 2019-02-20T05:00:40.000Z | api/v1/views/__init__.py | Davidodari/POLITICO-API | 479560f7accc3a6e46a8cec34c4f435ae9284138 | [
"MIT"
] | 4 | 2019-02-08T23:54:24.000Z | 2019-02-19T16:26:59.000Z | from flask import jsonify, make_response
from api.v1.models.office_model import OfficesModel
from api.v1.models.party_model import PartiesModel
| 42.581633 | 112 | 0.607716 | from flask import jsonify, make_response
from api.v1.models.office_model import OfficesModel
from api.v1.models.party_model import PartiesModel
def generate_response(model_result):
if 'Invalid Id' in model_result:
return make_response(jsonify({"status": 404, "error": "Invalid Id Not Found"}), 404)
# Fix For Delete Bug If Item Doesnt Exist- FIxes Deleting Twice
return make_response(jsonify({"status": 404, "error": "Item Not Found"}), 404)
class Methods:
def __init__(self, item_id, item, model_type):
self.item_id = item_id
self.item = item
self.model_type = model_type
@staticmethod
def id_conversion(item_id):
try:
return int(item_id)
except ValueError:
# Use of Letters as ids edge case
return {"status": 400, "error": "Invalid Id"}
# Channel for method requests
def method_requests(self, option):
# Conversion of id
oid = self.id_conversion(self.item_id)
# Check that id is int for either patch or get or delete
if isinstance(oid, int) and option == 1:
# Option 1 for and get
return self.get(oid)
elif isinstance(oid, int) and option == 2:
# Option 2 for delete
return self.delete(oid)
elif isinstance(oid, int) and option == 0:
# Option 0 for update
return self.patch(oid)
else:
return make_response(jsonify(oid), 400)
def patch(self, oid):
"""Method that handles update"""
if not self.model_type == 'office':
model_result = PartiesModel(party_id=oid).get_specific_item()
else:
model_result = OfficesModel(office_id=oid).get_specific_item()
if 'Invalid Id' in model_result:
# id == 0 or negatives edge case
return make_response(jsonify({"status": 404, "error": "Invalid Id Not Found"}), 404)
elif 'Doesnt Exist' in model_result or 'Error' in model_result:
# Id greater than 0 but not found
return make_response(jsonify({"status": 404, "error": "Item Not Found"}), 404)
else:
# Check keys in request and string is not null
if {'name'} <= set(self.item) and len(self.item['name']) >= 3:
model_result['name'] = self.item['name']
# Success Response
return make_response(
jsonify({"status": 200, "data": [{"id": self.item_id, "name": model_result['name']}]}, 200))
return make_response(jsonify({"status": 400, "error": "Incorrect Data Received,Bad request"}), 400)
def get(self, oid):
"""Gets specific item depending on model type variable"""
model_result = self.model_result_get_specific(oid)
if isinstance(model_result, dict):
# Checks keys for party
if {'id', 'name', 'hqAddress', 'logoUrl'} <= set(model_result):
return make_response(jsonify({"status": 200, "data": [model_result]}), 200)
# Checks Keys for office
elif {'id', 'type', 'name'} <= set(model_result):
return make_response(jsonify({"status": 200, "data": [model_result]}), 200)
return generate_response(model_result)
def delete(self, oid):
"""Delete item method"""
if self.model_type == 'office':
# Delete Party
model_result = OfficesModel(office_id=oid).remove_item()
else:
# Delete Office
model_result = PartiesModel(party_id=oid).remove_item()
if model_result is None:
return make_response(
jsonify({"status": 200, "message": "Deleted Successfully"}), 200)
return generate_response(model_result)
def model_result_get_specific(self, oid):
"""Method that gets a specific item whether office or party depending on passed type"""
if self.model_type == 'office':
model_result = OfficesModel(office_id=oid).get_specific_item()
else:
model_result = PartiesModel(party_id=oid).get_specific_item()
return model_result
| 1,173 | 2,808 | 46 |
984e473bcfc4191b0134c7d4f9460750a44abb67 | 390 | py | Python | AoC20/day_16/b.py | a-recknagel/AoC20 | 7aa0013dc745bdc0ad357e1168b212bd065fd092 | [
"MIT"
] | null | null | null | AoC20/day_16/b.py | a-recknagel/AoC20 | 7aa0013dc745bdc0ad357e1168b212bd065fd092 | [
"MIT"
] | null | null | null | AoC20/day_16/b.py | a-recknagel/AoC20 | 7aa0013dc745bdc0ad357e1168b212bd065fd092 | [
"MIT"
] | null | null | null | from functools import reduce
from operator import mul
from AoC20.day_16 import data as data, parse
rules, my_ticket, other_tickets = parse(data)
other_tickets = [ticket for ticket in other_tickets if rules.ticket_violation(ticket) is None]
fields = rules.field_deduction(other_tickets)
print(reduce(mul, [my_ticket[idx] for name, idx in fields.items() if name.startswith("departure")]))
| 35.454545 | 100 | 0.792308 | from functools import reduce
from operator import mul
from AoC20.day_16 import data as data, parse
rules, my_ticket, other_tickets = parse(data)
other_tickets = [ticket for ticket in other_tickets if rules.ticket_violation(ticket) is None]
fields = rules.field_deduction(other_tickets)
print(reduce(mul, [my_ticket[idx] for name, idx in fields.items() if name.startswith("departure")]))
| 0 | 0 | 0 |
ba2f8b88c64e7ff01707867eca8253f36f35d1e5 | 761 | py | Python | cms/templatetags/cms_tags.py | noxan/django-mini-cms | c833e62571fd232ca5c6bc8278a5629c2886e9f1 | [
"BSD-3-Clause"
] | 1 | 2015-09-14T23:14:22.000Z | 2015-09-14T23:14:22.000Z | cms/templatetags/cms_tags.py | noxan/django-mini-cms | c833e62571fd232ca5c6bc8278a5629c2886e9f1 | [
"BSD-3-Clause"
] | null | null | null | cms/templatetags/cms_tags.py | noxan/django-mini-cms | c833e62571fd232ca5c6bc8278a5629c2886e9f1 | [
"BSD-3-Clause"
] | null | null | null | from django import template
from django.core.urlresolvers import reverse
register = template.Library()
@register.tag
| 29.269231 | 151 | 0.633377 | from django import template
from django.core.urlresolvers import reverse
register = template.Library()
class BreadcrumbsListNode(template.Node):
def render(self, context):
page = context['object']
builder = []
builder.append('<ul class="breadcrumb">')
parent = page.parent
while parent is not None:
builder.append(u'<li><a href="%s">%s</a> <span class="divider">/</span></li>' % (reverse('cms:page', args=[parent.slug]), parent.headline))
parent = parent.parent
builder.append(u'<li class="active">%s</li>' % (page.headline))
builder.append(u'</ul>')
return u''.join(builder)
@register.tag
def render_breadcrumbs(parser, token):
return BreadcrumbsListNode()
| 548 | 20 | 71 |
a7ebd2efd1e19c0bcb7f63e769b558e909041d3f | 2,560 | py | Python | tests/osinfo.py | iacopy/coveragepy | 2f4b4431cbb561aed3ade025da2720a670ba2dd2 | [
"Apache-2.0"
] | 2 | 2021-03-29T19:55:15.000Z | 2021-11-15T12:30:19.000Z | tests/osinfo.py | iacopy/coveragepy | 2f4b4431cbb561aed3ade025da2720a670ba2dd2 | [
"Apache-2.0"
] | null | null | null | tests/osinfo.py | iacopy/coveragepy | 2f4b4431cbb561aed3ade025da2720a670ba2dd2 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""OS information for testing."""
from coverage import env
if env.WINDOWS:
# Windows implementation
def process_ram():
"""How much RAM is this process using? (Windows)"""
import ctypes
# From: http://lists.ubuntu.com/archives/bazaar-commits/2009-February/011990.html
class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure):
"""Used by GetProcessMemoryInfo"""
_fields_ = [
('cb', ctypes.c_ulong),
('PageFaultCount', ctypes.c_ulong),
('PeakWorkingSetSize', ctypes.c_size_t),
('WorkingSetSize', ctypes.c_size_t),
('QuotaPeakPagedPoolUsage', ctypes.c_size_t),
('QuotaPagedPoolUsage', ctypes.c_size_t),
('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t),
('QuotaNonPagedPoolUsage', ctypes.c_size_t),
('PagefileUsage', ctypes.c_size_t),
('PeakPagefileUsage', ctypes.c_size_t),
('PrivateUsage', ctypes.c_size_t),
]
mem_struct = PROCESS_MEMORY_COUNTERS_EX()
ret = ctypes.windll.psapi.GetProcessMemoryInfo(
ctypes.windll.kernel32.GetCurrentProcess(),
ctypes.byref(mem_struct),
ctypes.sizeof(mem_struct)
)
if not ret:
return 0
return mem_struct.PrivateUsage
elif env.LINUX:
# Linux implementation
import os
_scale = {'kb': 1024, 'mb': 1024*1024}
def _VmB(key):
"""Read the /proc/PID/status file to find memory use."""
try:
# Get pseudo file /proc/<pid>/status
with open('/proc/%d/status' % os.getpid()) as t:
v = t.read()
except IOError:
return 0 # non-Linux?
# Get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(key)
v = v[i:].split(None, 3)
if len(v) < 3:
return 0 # Invalid format?
# Convert Vm value to bytes.
return int(float(v[1]) * _scale[v[2].lower()])
def process_ram():
"""How much RAM is this process using? (Linux implementation)"""
return _VmB('VmRSS')
else:
# Generic implementation.
def process_ram():
"""How much RAM is this process using? (stdlib implementation)"""
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
| 35.068493 | 89 | 0.582813 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""OS information for testing."""
from coverage import env
if env.WINDOWS:
# Windows implementation
def process_ram():
"""How much RAM is this process using? (Windows)"""
import ctypes
# From: http://lists.ubuntu.com/archives/bazaar-commits/2009-February/011990.html
class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure):
"""Used by GetProcessMemoryInfo"""
_fields_ = [
('cb', ctypes.c_ulong),
('PageFaultCount', ctypes.c_ulong),
('PeakWorkingSetSize', ctypes.c_size_t),
('WorkingSetSize', ctypes.c_size_t),
('QuotaPeakPagedPoolUsage', ctypes.c_size_t),
('QuotaPagedPoolUsage', ctypes.c_size_t),
('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t),
('QuotaNonPagedPoolUsage', ctypes.c_size_t),
('PagefileUsage', ctypes.c_size_t),
('PeakPagefileUsage', ctypes.c_size_t),
('PrivateUsage', ctypes.c_size_t),
]
mem_struct = PROCESS_MEMORY_COUNTERS_EX()
ret = ctypes.windll.psapi.GetProcessMemoryInfo(
ctypes.windll.kernel32.GetCurrentProcess(),
ctypes.byref(mem_struct),
ctypes.sizeof(mem_struct)
)
if not ret:
return 0
return mem_struct.PrivateUsage
elif env.LINUX:
# Linux implementation
import os
_scale = {'kb': 1024, 'mb': 1024*1024}
def _VmB(key):
"""Read the /proc/PID/status file to find memory use."""
try:
# Get pseudo file /proc/<pid>/status
with open('/proc/%d/status' % os.getpid()) as t:
v = t.read()
except IOError:
return 0 # non-Linux?
# Get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(key)
v = v[i:].split(None, 3)
if len(v) < 3:
return 0 # Invalid format?
# Convert Vm value to bytes.
return int(float(v[1]) * _scale[v[2].lower()])
def process_ram():
"""How much RAM is this process using? (Linux implementation)"""
return _VmB('VmRSS')
else:
# Generic implementation.
def process_ram():
"""How much RAM is this process using? (stdlib implementation)"""
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
| 0 | 0 | 0 |
05615ade95950c107306f3261fffd69d684621d2 | 1,990 | py | Python | python/basic_course_python/function.py | ademilsoncarvalho/estudos | ae7b73a6154c29d54be367066803323c6eb52907 | [
"MIT"
] | null | null | null | python/basic_course_python/function.py | ademilsoncarvalho/estudos | ae7b73a6154c29d54be367066803323c6eb52907 | [
"MIT"
] | null | null | null | python/basic_course_python/function.py | ademilsoncarvalho/estudos | ae7b73a6154c29d54be367066803323c6eb52907 | [
"MIT"
] | null | null | null | import random
# for declaring function using def
test_function()
test_function_parameter("teste parameter")
# function type get type variable
list = ["ade"]
print(type(list))
# function int formating string to int
string = "10"
print(int(string))
# function input receive a value entry from the user in version 3.X from python
age = input("Whats is your age?")
print(int(age))
# range of function return a iterable list of numbers, using in for
print(range(5))
# function help
# help() then the function name you want help
# format examples
# format float
# 7 is houses before the comma
# 2 is houses after the comma
# f format is float
print("R$ {:7.2f}".format(1234.50))
# integer using d
print("R$ {:07d}".format(4))
# format date
print("Data {:02d}/{:02d}".format(9, 4))
# number random
print(int(random.random() * 100))
# using range
print(random.randrange(1, 101))
# numero absoluto abs()
print(abs(10))
print(abs(-10))
# variable __name__
# content variable for "__main__" file run directly
if __name__ == "__main__":
print("file run directly not imported !!")
# boll testing
bool(0)
bool("")
bool(None)
bool(1)
bool(-100)
bool(13.5)
bool("test")
bool(True)
# using find in string, return position OR -1 for not found
string = "test"
print(string.find("t"))
# using for witch string
for letter in string:
print(letter)
# lower and upper
print(string.lower())
print(string.upper())
# first letter upper
print(string.title())
# remove spaces from string
string = " test"
print(string.split())
# __file__ get complete path file
import os
print(__file__)
# dir of actual file
print(os.path.dirname(__file__))
# has_attr verify exists attribute in variable
person = Person()
print('Person has age?:', hasattr(person, 'age'))
# if ternary
print('True' if bool(1) else 'False')
| 18.773585 | 79 | 0.707538 | import random
# for declaring function using def
def test_function():
print("hello function")
test_function()
def test_function_parameter(parameter):
print("hello function " + parameter)
test_function_parameter("teste parameter")
# function type get type variable
list = ["ade"]
print(type(list))
# function int formating string to int
string = "10"
print(int(string))
# function input receive a value entry from the user in version 3.X from python
age = input("Whats is your age?")
print(int(age))
# range of function return a iterable list of numbers, using in for
print(range(5))
# function help
# help() then the function name you want help
# format examples
# format float
# 7 is houses before the comma
# 2 is houses after the comma
# f format is float
print("R$ {:7.2f}".format(1234.50))
# integer using d
print("R$ {:07d}".format(4))
# format date
print("Data {:02d}/{:02d}".format(9, 4))
# number random
print(int(random.random() * 100))
# using range
print(random.randrange(1, 101))
# numero absoluto abs()
print(abs(10))
print(abs(-10))
# variable __name__
# content variable for "__main__" file run directly
if __name__ == "__main__":
print("file run directly not imported !!")
# boll testing
bool(0)
bool("")
bool(None)
bool(1)
bool(-100)
bool(13.5)
bool("test")
bool(True)
# using find in string, return position OR -1 for not found
string = "test"
print(string.find("t"))
# using for witch string
for letter in string:
print(letter)
# lower and upper
print(string.lower())
print(string.upper())
# first letter upper
print(string.title())
# remove spaces from string
string = " test"
print(string.split())
# __file__ get complete path file
import os
print(__file__)
# dir of actual file
print(os.path.dirname(__file__))
# has_attr verify exists attribute in variable
class Person:
age = 23
name = 'Adam'
person = Person()
print('Person has age?:', hasattr(person, 'age'))
# if ternary
print('True' if bool(1) else 'False')
| 86 | 23 | 67 |
4ee2487c8ef55b167304be1e06c008d16bac440a | 3,340 | py | Python | test/base_test_context.py | aspose-tasks-cloud/aspose-tasks-cloud-python | d1852a02fb1aa2591501a34d5e56079f8aac43f0 | [
"MIT"
] | 2 | 2021-08-16T09:25:51.000Z | 2022-01-27T20:20:41.000Z | test/base_test_context.py | aspose-tasks-cloud/aspose-tasks-cloud-python | d1852a02fb1aa2591501a34d5e56079f8aac43f0 | [
"MIT"
] | null | null | null | test/base_test_context.py | aspose-tasks-cloud/aspose-tasks-cloud-python | d1852a02fb1aa2591501a34d5e56079f8aac43f0 | [
"MIT"
] | null | null | null | #
# --------------------------------------------------------------------------------------------------------------------
# <copyright company="Aspose" file="base_test_context.py">
# Copyright (c) 2020 Aspose.Tasks Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# --------------------------------------------------------------------------------------------------------------------
#
import os
import json
import unittest
import warnings
import six
from asposetaskscloud import ApiClient, TasksApi, UploadFileRequest, DeleteFileRequest, DeleteFolderRequest
| 45.135135 | 118 | 0.656886 | #
# --------------------------------------------------------------------------------------------------------------------
# <copyright company="Aspose" file="base_test_context.py">
# Copyright (c) 2020 Aspose.Tasks Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# --------------------------------------------------------------------------------------------------------------------
#
import os
import json
import unittest
import warnings
import six
from asposetaskscloud import ApiClient, TasksApi, UploadFileRequest, DeleteFileRequest, DeleteFolderRequest
class BaseTestContext(unittest.TestCase):
def setUp(self):
root_path = os.path.abspath(os.path.realpath(os.path.dirname(__file__)) + "/..")
self.local_test_folder = os.path.join(root_path, 'testData')
self.remote_test_folder = os.path.join('Temp', 'SdkTests', 'python')
self.remote_test_out = os.path.join('Temp', 'SdkTests', 'python', 'TestOut')
creds_path = os.path.join(root_path, '..', 'testConfig.json')
if not os.path.exists(creds_path):
raise IOError('Credential file testConfig.json is not found')
with open(os.path.join(root_path, '..', 'testConfig.json')) as f:
creds = json.loads(f.read())
api_client = ApiClient()
api_client.configuration.host = creds['BaseUrl']
api_client.configuration.api_key['api_key'] = creds['AppKey']
api_client.configuration.api_key['app_sid'] = creds['AppSid']
if 'AuthUrl' in creds:
api_client.configuration.auth_url = creds['AuthUrl']
self.tasks_api = TasksApi(api_client)
self.uploaded_files = []
if six.PY3:
warnings.simplefilter("ignore", ResourceWarning)
def upload_file(self, filename):
file = os.path.join(self.local_test_folder, filename)
request = UploadFileRequest(filename, file)
self.tasks_api.upload_file(request)
self.uploaded_files.append(filename)
def tearDown(self):
request = DeleteFolderRequest('Temp/SdkTests/TestData/Storage', recursive=True)
self.tasks_api.delete_folder(request)
for file in self.uploaded_files:
request = DeleteFileRequest(file)
self.tasks_api.delete_file(request)
| 1,586 | 20 | 104 |
f72897ac35d6d93b6020380f7e88be2a60683e88 | 3,957 | py | Python | katsdpdisp/test/test_data.py | ska-sa/katsdpdisp | 3fd2f5878c0bd3ae56815568446593b876881e3f | [
"BSD-3-Clause"
] | null | null | null | katsdpdisp/test/test_data.py | ska-sa/katsdpdisp | 3fd2f5878c0bd3ae56815568446593b876881e3f | [
"BSD-3-Clause"
] | 6 | 2020-03-13T08:17:49.000Z | 2021-05-04T14:43:01.000Z | katsdpdisp/test/test_data.py | ska-sa/katsdpdisp | 3fd2f5878c0bd3ae56815568446593b876881e3f | [
"BSD-3-Clause"
] | null | null | null | """Tests for :py:mod:`katsdpdisp.data`."""
import numpy as np
from numpy.testing import assert_array_equal
from katsdpdisp.data import SparseArray
def test_sparsearray(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6,islot_new_bls=6):
"""Simulates the assignment and retrieval of data as it happens in the signal displays when
it receives different sets of baseline data at different timestamps, with some time continuity.
(fullslots,fullbls,fullchan) is the dimensions of the full/complete dataset
(nslots,maxbaselines,fullchan) is the true size of the sparse array, representing a size of (nslots,fullbls,fullchan)
where maxbaselines<fullbls
islot_new_bls is the number of time stamps that passes before there is a new baseline product selected/chosen in the test sequence"""
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
histbaselines=[]
for it in range(fullslots):
if it%islot_new_bls==0:#add a new baseline, remove old, every so often
while True:
newbaseline=rs.random_integers(0,fullbls-1,[1])
if len(histbaselines)==0 or (newbaseline not in histbaselines[-1]):
break
if (len(histbaselines)==0):
newbaselines=np.r_[newbaseline]
elif (len(histbaselines[-1])<islot_new_bls):
newbaselines=np.r_[histbaselines[-1],newbaseline]
else:
newbaselines=np.r_[histbaselines[-1][1:],newbaseline]
histbaselines.append(newbaselines)
mx[it%nslots,histbaselines[-1],:]=fulldata[it,histbaselines[-1],:]
for cit in range(islot_new_bls):
if (cit>=len(histbaselines)):
break
hasthesebaselines=list(set(histbaselines[-1-cit]) & set(histbaselines[-1]))
missingbaselines=list(set(histbaselines[-1-cit]) - set(histbaselines[-1]))
retrieved=mx[(it-cit)%nslots,hasthesebaselines,:]
assert_array_equal(retrieved, fulldata[it-cit,hasthesebaselines,:], 'SparseArray getitem test failed')
missingretrieved=mx[(it-cit)%nslots,missingbaselines,:]
assert_array_equal(missingretrieved,np.zeros(missingretrieved.shape,dtype=np.int32), 'SparseArray missing baseline test failed')
| 56.528571 | 228 | 0.700531 | """Tests for :py:mod:`katsdpdisp.data`."""
import numpy as np
from numpy.testing import assert_array_equal
from katsdpdisp.data import SparseArray
def test_sparsearray(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6,islot_new_bls=6):
"""Simulates the assignment and retrieval of data as it happens in the signal displays when
it receives different sets of baseline data at different timestamps, with some time continuity.
(fullslots,fullbls,fullchan) is the dimensions of the full/complete dataset
(nslots,maxbaselines,fullchan) is the true size of the sparse array, representing a size of (nslots,fullbls,fullchan)
where maxbaselines<fullbls
islot_new_bls is the number of time stamps that passes before there is a new baseline product selected/chosen in the test sequence"""
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
histbaselines=[]
for it in range(fullslots):
if it%islot_new_bls==0:#add a new baseline, remove old, every so often
while True:
newbaseline=rs.random_integers(0,fullbls-1,[1])
if len(histbaselines)==0 or (newbaseline not in histbaselines[-1]):
break
if (len(histbaselines)==0):
newbaselines=np.r_[newbaseline]
elif (len(histbaselines[-1])<islot_new_bls):
newbaselines=np.r_[histbaselines[-1],newbaseline]
else:
newbaselines=np.r_[histbaselines[-1][1:],newbaseline]
histbaselines.append(newbaselines)
mx[it%nslots,histbaselines[-1],:]=fulldata[it,histbaselines[-1],:]
for cit in range(islot_new_bls):
if (cit>=len(histbaselines)):
break
hasthesebaselines=list(set(histbaselines[-1-cit]) & set(histbaselines[-1]))
missingbaselines=list(set(histbaselines[-1-cit]) - set(histbaselines[-1]))
retrieved=mx[(it-cit)%nslots,hasthesebaselines,:]
assert_array_equal(retrieved, fulldata[it-cit,hasthesebaselines,:], 'SparseArray getitem test failed')
missingretrieved=mx[(it-cit)%nslots,missingbaselines,:]
assert_array_equal(missingretrieved,np.zeros(missingretrieved.shape,dtype=np.int32), 'SparseArray missing baseline test failed')
def test_sparsearray_indexing(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6):
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
mx[0,0,0]=fulldata[0,0,0]
assert_array_equal(mx[0,0,0], fulldata[0,0,0], 'SparseArray [scalar,scalar,scalar] index test failed')
mx[1,1,:]=fulldata[1,1,:]
assert_array_equal(mx[1,1,:], fulldata[1,1,:], 'SparseArray [scalar,scalar,slice] index test 2 failed') #baseline change so previous assignment purged (in future may retain until running out of memory and necessary to purge)
mx[2,1,:]=fulldata[2,1,:]
assert_array_equal(mx[1:3,1,:], fulldata[1:3,1,:], 'SparseArray retain old value test failed') #assign to same baseline so previous slot value remain
mx[3,:maxbaselines,0]=fulldata[3,:maxbaselines,0]
assert_array_equal(mx[3,:maxbaselines,0], fulldata[3,:maxbaselines,0], 'SparseArray [scalar,slice,scalar] index test failed')
mx[:,1,3]=fulldata[:nslots,1,3]
assert_array_equal(mx[:,1,3], fulldata[:nslots,1,3], 'SparseArray [slice,scalar,scalar] index test failed')
mx[:,1,:]=fulldata[:nslots,1,:]
assert_array_equal(mx[:,1,:], fulldata[:nslots,1,:], 'SparseArray [slice,scalar,slice] index test failed')
mx[:,1:maxbaselines,:]=fulldata[2:nslots+2,1:maxbaselines,:]
assert_array_equal(mx[:,1:maxbaselines,:], fulldata[2:nslots+2,1:maxbaselines,:], 'SparseArray [slice,slice,slice] index test failed')
| 1,524 | 0 | 23 |
80b324e3881506d24e20d29a50b71b14f0a18219 | 744 | py | Python | main.py | cccfr/mete-migrate | 9c1df5da8f7d2579e6cb47ebb9d38dad237d9f4f | [
"MIT"
] | null | null | null | main.py | cccfr/mete-migrate | 9c1df5da8f7d2579e6cb47ebb9d38dad237d9f4f | [
"MIT"
] | null | null | null | main.py | cccfr/mete-migrate | 9c1df5da8f7d2579e6cb47ebb9d38dad237d9f4f | [
"MIT"
] | null | null | null | import requests
from urllib.parse import urlencode
from_mate = "http://172.16.0.69:3000"
to_mate = "http://mete.cloud.cccfr"
for category in ("users", "drinks"):
items = get_items(category)
for item in items:
set_item(item, category)
| 28.615385 | 122 | 0.662634 | import requests
from urllib.parse import urlencode
from_mate = "http://172.16.0.69:3000"
to_mate = "http://mete.cloud.cccfr"
def get_items(category):
items = requests.get("%s/api/v1/%s" %(from_mate, category)).json()
return items
def set_item(item, category):
params = prepare_params(item, category.strip("s"))
print(params)
print(requests.post("%s/api/v1/%s" %(to_mate, category), params=params, headers={'Content-Type': 'application/json'}))
def prepare_params(item, kind):
params = {}
for key in item.keys():
params[kind+"["+key+"]"] = item[key]
return urlencode(params)
for category in ("users", "drinks"):
items = get_items(category)
for item in items:
set_item(item, category)
| 423 | 0 | 69 |
111726ee2ad9068e4a2603de2d0ec82ab6d2c372 | 3,892 | py | Python | github.py | miketheredherring/py-github-checks | 6e1b2a516c5e97ca922819140c098c7f52ac5586 | [
"MIT"
] | null | null | null | github.py | miketheredherring/py-github-checks | 6e1b2a516c5e97ca922819140c098c7f52ac5586 | [
"MIT"
] | null | null | null | github.py | miketheredherring/py-github-checks | 6e1b2a516c5e97ca922819140c098c7f52ac5586 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import fire
import os
import re
import requests
from configparser import ConfigParser
from datetime import datetime
HTTP_OK_200 = 200
HTTP_CREATED_201 = 201
HTTP_AUTHORIZATION_401 = 401
HTTP_NOT_FOUND_404 = 404
class Github(object):
'''Base class to interface with Github.com.
'''
username = os.environ.get('GITHUB_USERNAME')
token = os.environ.get('GITHUB_TOKEN')
class Checks(object):
'''Abstraction of PR checks.
'''
def _request(self, method, path, payload=None, expected_status=None):
'''RFC2617 defined Basic Authentication via HTTP/token.
'''
client = Github()
url = client.info()['url']
response = method(
'%s%s' % (url, path),
headers={
'Accept': 'application/vnd.github.antiope-preview+json',
'Authorization': '%s:%s' % (client.username, client.token)
}
)
# Validate potential responses
if response.status_code in (HTTP_AUTHORIZATION_401, HTTP_NOT_FOUND_404):
raise Exception('Invalid credentials provided for auth')
# Validate expected status codes for a give action
if expected_status is None:
expected_status = (HTTP_OK_200, )
elif isinstance(expected_status, int):
expected_status = (expected_status, )
if response.status_code not in expected_status:
raise Exception('Unexpected response [%s] for `%s`' % (response.status_code, path))
return response
def create(self, name, branch, sha):
'''Create new checks for a given commit.
'''
response = self._request(
requests.post,
'/check-runs',
payload={
'name': name,
'branch': branch,
'head_sha': sha,
'status': 'completed',
'conclusion': 'success',
'completed_at': datetime.now().isoformat()
},
expected_status=(HTTP_CREATED_201, )
)
return response.json
def list(self, commit_hash):
'''Lists the checks for a given commit.
'''
response = self._request(
requests.get,
'/commits/%s/check-runs' % commit_hash
)
return response.json
@staticmethod
def info():
'''Returns info about the current repository.
'''
info = {}
config = ConfigParser()
config.read('.git/config')
# Validate that this is hosted on remote
try:
remote_url = config['remote "origin"']['url']
except KeyError:
raise ValueError('Git repository does not have remote origin')
# Retrieve the information we need
m = re.match(
r'git@(?P<host>github\.com):(?P<username>[a-zA-Z0-9]+)/(?P<repo_name>[a-zA-Z0-9_-]+)\.git',
remote_url
)
# Validate that the repo is on Github
if m.group('host') is None:
raise ValueError('Git repository origin is not Github.com')
# Build the URL
info['url'] = 'https://api.github.com/repos/%(owner)s/%(repo)s' % {
'owner': m.group('username'),
'repo': m.group('repo_name'),
}
# Determine where is the HEAD
with open('.git/HEAD') as file:
m = re.match(r'ref: ref/heads/(?P<branch>[a-zA-Z0-9_-]+)', f.read())
if m.group('branch') is None:
raise ValueError('Unable to find current branch name')
info['branch'] = m.group('branch')
return info
if __name__ == '__main__':
fire.Fire(Github)
| 31.901639 | 103 | 0.535714 | #!/usr/bin/python
import fire
import os
import re
import requests
from configparser import ConfigParser
from datetime import datetime
HTTP_OK_200 = 200
HTTP_CREATED_201 = 201
HTTP_AUTHORIZATION_401 = 401
HTTP_NOT_FOUND_404 = 404
class Github(object):
'''Base class to interface with Github.com.
'''
username = os.environ.get('GITHUB_USERNAME')
token = os.environ.get('GITHUB_TOKEN')
class Checks(object):
'''Abstraction of PR checks.
'''
def _request(self, method, path, payload=None, expected_status=None):
'''RFC2617 defined Basic Authentication via HTTP/token.
'''
client = Github()
url = client.info()['url']
response = method(
'%s%s' % (url, path),
headers={
'Accept': 'application/vnd.github.antiope-preview+json',
'Authorization': '%s:%s' % (client.username, client.token)
}
)
# Validate potential responses
if response.status_code in (HTTP_AUTHORIZATION_401, HTTP_NOT_FOUND_404):
raise Exception('Invalid credentials provided for auth')
# Validate expected status codes for a give action
if expected_status is None:
expected_status = (HTTP_OK_200, )
elif isinstance(expected_status, int):
expected_status = (expected_status, )
if response.status_code not in expected_status:
raise Exception('Unexpected response [%s] for `%s`' % (response.status_code, path))
return response
def create(self, name, branch, sha):
'''Create new checks for a given commit.
'''
response = self._request(
requests.post,
'/check-runs',
payload={
'name': name,
'branch': branch,
'head_sha': sha,
'status': 'completed',
'conclusion': 'success',
'completed_at': datetime.now().isoformat()
},
expected_status=(HTTP_CREATED_201, )
)
return response.json
def list(self, commit_hash):
'''Lists the checks for a given commit.
'''
response = self._request(
requests.get,
'/commits/%s/check-runs' % commit_hash
)
return response.json
@staticmethod
def info():
'''Returns info about the current repository.
'''
info = {}
config = ConfigParser()
config.read('.git/config')
# Validate that this is hosted on remote
try:
remote_url = config['remote "origin"']['url']
except KeyError:
raise ValueError('Git repository does not have remote origin')
# Retrieve the information we need
m = re.match(
r'git@(?P<host>github\.com):(?P<username>[a-zA-Z0-9]+)/(?P<repo_name>[a-zA-Z0-9_-]+)\.git',
remote_url
)
# Validate that the repo is on Github
if m.group('host') is None:
raise ValueError('Git repository origin is not Github.com')
# Build the URL
info['url'] = 'https://api.github.com/repos/%(owner)s/%(repo)s' % {
'owner': m.group('username'),
'repo': m.group('repo_name'),
}
# Determine where is the HEAD
with open('.git/HEAD') as file:
m = re.match(r'ref: ref/heads/(?P<branch>[a-zA-Z0-9_-]+)', f.read())
if m.group('branch') is None:
raise ValueError('Unable to find current branch name')
info['branch'] = m.group('branch')
return info
if __name__ == '__main__':
fire.Fire(Github)
| 0 | 0 | 0 |
ee89eb0e2998dca274835718c84f1f1c2cd53fe1 | 897 | py | Python | custom_components/racelandshop/helpers/functions/is_safe_to_remove.py | racelandshop/integration | 424057dcad30f20ed0276aec07d28b48b2b187be | [
"MIT"
] | null | null | null | custom_components/racelandshop/helpers/functions/is_safe_to_remove.py | racelandshop/integration | 424057dcad30f20ed0276aec07d28b48b2b187be | [
"MIT"
] | null | null | null | custom_components/racelandshop/helpers/functions/is_safe_to_remove.py | racelandshop/integration | 424057dcad30f20ed0276aec07d28b48b2b187be | [
"MIT"
] | null | null | null | """Helper to check if path is safe to remove."""
from pathlib import Path
from custom_components.racelandshop.share import get_racelandshop
def is_safe_to_remove(path: str) -> bool:
"""Helper to check if path is safe to remove."""
racelandshop = get_racelandshop()
paths = [
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.appdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.netdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.plugin_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.python_script_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.theme_path}"),
Path(f"{racelandshop.core.config_path}/custom_components/"),
]
if Path(path) in paths:
return False
return True
| 42.714286 | 97 | 0.724638 | """Helper to check if path is safe to remove."""
from pathlib import Path
from custom_components.racelandshop.share import get_racelandshop
def is_safe_to_remove(path: str) -> bool:
"""Helper to check if path is safe to remove."""
racelandshop = get_racelandshop()
paths = [
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.appdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.netdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.plugin_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.python_script_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.theme_path}"),
Path(f"{racelandshop.core.config_path}/custom_components/"),
]
if Path(path) in paths:
return False
return True
| 0 | 0 | 0 |
a7d2cd9477a196d03b18e262d995e2e9a05e9ae4 | 591 | py | Python | Interesting_Python/gotchas/mutable_args_inside_func.py | bhishanpdl/Fun_Repos | b2ceed8cce0b05288774ed17c1450f64807e90cc | [
"MIT"
] | null | null | null | Interesting_Python/gotchas/mutable_args_inside_func.py | bhishanpdl/Fun_Repos | b2ceed8cce0b05288774ed17c1450f64807e90cc | [
"MIT"
] | null | null | null | Interesting_Python/gotchas/mutable_args_inside_func.py | bhishanpdl/Fun_Repos | b2ceed8cce0b05288774ed17c1450f64807e90cc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# Ref:
# https://www.reddit.com/r/learnpython/comments/9oc0mu/just_an_interesting_thing_i_found/
# https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
a = f()
b = f()
a.append(3)
b.append(4)
print(b)
# Solution
# Ref: https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
print('\nSolving mutable argument to function gotchas')
a = append_to(3)
b = append_to(4)
print(b)
| 19.7 | 89 | 0.686971 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# Ref:
# https://www.reddit.com/r/learnpython/comments/9oc0mu/just_an_interesting_thing_i_found/
# https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
def f(x=[]):
return x
a = f()
b = f()
a.append(3)
b.append(4)
print(b)
# Solution
# Ref: https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
print('\nSolving mutable argument to function gotchas')
def append_to(element, to=None):
if to is None:
to = []
to.append(element)
return to
a = append_to(3)
b = append_to(4)
print(b)
| 84 | 0 | 45 |
1591d43ba00250badb5c4fb0808383cba8b16c8b | 8,238 | py | Python | cnn_visualization/generate_class_specific_samples.py | tamlhp/dfd_benchmark | 15cc5c4708a5414c6309ea1f20a5dfa3428409fa | [
"MIT"
] | 7 | 2020-03-20T18:46:29.000Z | 2022-03-22T03:06:17.000Z | cnn_visualization/generate_class_specific_samples.py | tamlhp/dfd_benchmark | 15cc5c4708a5414c6309ea1f20a5dfa3428409fa | [
"MIT"
] | 1 | 2021-12-03T06:49:04.000Z | 2021-12-03T06:49:04.000Z | cnn_visualization/generate_class_specific_samples.py | tamlhp/dfd_benchmark | 15cc5c4708a5414c6309ea1f20a5dfa3428409fa | [
"MIT"
] | 2 | 2021-08-23T08:54:09.000Z | 2022-02-07T10:04:23.000Z | """
Created on Thu Oct 26 14:19:44 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from cnn_visualization.misc_functions import preprocess_image, recreate_image, save_image
import argparse
import torch.nn as nn
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
print("bat dau generate xong ... ")
initial_learning_rate = 200
for i in range(1, iterations):
print(i)
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image.to(self.device))
# Target specific class
print(output)
class_loss = -output[0, self.target_class]
if i % 1 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.cpu().data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
print(self.created_image.size)
if i % 1 == 0 or i == iterations-1:
# Save image
initial_learning_rate /=2
im_path = 'generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
if __name__ == '__main__':
target_class = 0 # Flamingo
# pretrained_model = models.alexnet(pretrained=True)
args = parse_args()
print(args)
model = args.model
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
gpu_id = 0 if int(args.gpu_id) >=0 else -1
image_size = args.image_size
iterations= args.iterations
if model== "capsule":
exit(0)
pass
elif model == "drn" :
from pytorch_model.drn.drn_seg import DRNSub
model = DRNSub(1)
pass
elif model == "local_nn" :
from pytorch_model.local_nn import local_nn
model = local_nn()
elif model == "self_attention":
from pytorch_model.self_attention import self_attention
model = self_attention()
elif model == "resnext50":
from pytorch_model.model_cnn_pytorch import resnext50
model = resnext50(False)
elif model == "resnext101":
from pytorch_model.model_cnn_pytorch import resnext101
model = resnext101(False)
elif model == "myresnext":
from pytorch_model.model_cnn_pytorch import MyResNetX
model = MyResNetX()
elif model == "mnasnet":
from pytorch_model.model_cnn_pytorch import mnasnet
model = mnasnet(False)
elif model == "xception_torch":
from pytorch_model.xception import xception
model = xception(pretrained=False)
elif model == "xception2_torch":
from pytorch_model.xception import xception2
model = xception2(pretrained=False)
elif model == "dsp_fwa":
from pytorch_model.DSP_FWA.models.classifier import SPPNet
model = SPPNet(backbone=50, num_class=1)
elif model == "siamese_torch":
from pytorch_model.siamese import SiameseNetworkResnet
model = SiameseNetworkResnet(length_embed = args.length_embed,pretrained=True)
elif model == "efficient":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b'+args.type,num_classes=1)
model = nn.Sequential(model,nn.Sigmoid())
elif model == "efft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=1)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "e4dfft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=4)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "efficientdual":
pass
from pytorch_model.xception import xception
model = xception(pretrained=False)
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
model = model.to(device)
model.load_state_dict(torch.load(args.model_path,map_location=torch.device('cpu')))
print("Load xong ... ")
model.eval()
csig = ClassSpecificImageGeneration(model, target_class,image_size)
csig.generate(iterations = iterations)
| 44.053476 | 138 | 0.666181 | """
Created on Thu Oct 26 14:19:44 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from cnn_visualization.misc_functions import preprocess_image, recreate_image, save_image
import argparse
import torch.nn as nn
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class,image_size):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
self.image_size = image_size
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (image_size, image_size, 3)))
# Create the folder to export images if not exists
if not os.path.exists('generated/class_'+str(self.target_class)):
os.makedirs('generated/class_'+str(self.target_class))
print("init xong ... ")
self.device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
print("bat dau generate xong ... ")
initial_learning_rate = 200
for i in range(1, iterations):
print(i)
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image.to(self.device))
# Target specific class
print(output)
class_loss = -output[0, self.target_class]
if i % 1 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.cpu().data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
print(self.created_image.size)
if i % 1 == 0 or i == iterations-1:
# Save image
initial_learning_rate /=2
im_path = 'generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
def parse_args():
parser = argparse.ArgumentParser(description="Deepfake detection")
parser.add_argument('--model_path', default="../../../model/xception/model_pytorch_4.pt", help='path to model ')
parser.add_argument('--gpu_id',type=int, default=-1, help='path to model ')
parser.add_argument('--image_size',type=int, default=256, help='path to model ')
parser.add_argument('--iterations',type=int, default=256, help='iterations random number')
subparsers = parser.add_subparsers(dest="model", help='Choose 1 of the model from: capsule,drn,resnext50, resnext ,gan,meso,xception')
## torch
parser_capsule = subparsers.add_parser('capsule', help='Capsule')
parser_drn = subparsers.add_parser('drn', help='DRN ')
parser_local_nn = subparsers.add_parser('local_nn', help='Local NN ')
parser_self_attention = subparsers.add_parser('self_attention', help='Self Attention ')
parser_resnext50 = subparsers.add_parser('resnext50', help='Resnext50 ')
parser_resnext101 = subparsers.add_parser('resnext101', help='Resnext101 ')
parser_myresnext = subparsers.add_parser('myresnext', help='My Resnext ')
parser_mnasnet = subparsers.add_parser('mnasnet', help='mnasnet pytorch ')
parser_xception_torch = subparsers.add_parser('xception_torch', help='Xception pytorch ')
parser_xception2_torch = subparsers.add_parser('xception2_torch', help='Xception2 pytorch ')
parser_dsp_fwa = subparsers.add_parser('dsp_fwa', help='DSP_SWA pytorch ')
parser_xception = subparsers.add_parser('xception', help='Xceptionnet')
parser_efficient = subparsers.add_parser('efficient', help='Efficient Net')
parser_efficient.add_argument("--type",type=str,required=False,default="0",help="Type efficient net 0-8")
parser_efficientdual = subparsers.add_parser('efficientdual', help='Efficient Net')
parser_efft = subparsers.add_parser('efft', help='Efficient Net fft')
parser_efft.add_argument("--type", type=str, required=False, default="0", help="Type efficient net 0-8")
parser_e4dfft = subparsers.add_parser('e4dfft', help='Efficient Net 4d fft')
parser_e4dfft.add_argument("--type", type=str, required=False, default="0", help="Type efficient net 0-8")
return parser.parse_args()
if __name__ == '__main__':
target_class = 0 # Flamingo
# pretrained_model = models.alexnet(pretrained=True)
args = parse_args()
print(args)
model = args.model
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
gpu_id = 0 if int(args.gpu_id) >=0 else -1
image_size = args.image_size
iterations= args.iterations
if model== "capsule":
exit(0)
pass
elif model == "drn" :
from pytorch_model.drn.drn_seg import DRNSub
model = DRNSub(1)
pass
elif model == "local_nn" :
from pytorch_model.local_nn import local_nn
model = local_nn()
elif model == "self_attention":
from pytorch_model.self_attention import self_attention
model = self_attention()
elif model == "resnext50":
from pytorch_model.model_cnn_pytorch import resnext50
model = resnext50(False)
elif model == "resnext101":
from pytorch_model.model_cnn_pytorch import resnext101
model = resnext101(False)
elif model == "myresnext":
from pytorch_model.model_cnn_pytorch import MyResNetX
model = MyResNetX()
elif model == "mnasnet":
from pytorch_model.model_cnn_pytorch import mnasnet
model = mnasnet(False)
elif model == "xception_torch":
from pytorch_model.xception import xception
model = xception(pretrained=False)
elif model == "xception2_torch":
from pytorch_model.xception import xception2
model = xception2(pretrained=False)
elif model == "dsp_fwa":
from pytorch_model.DSP_FWA.models.classifier import SPPNet
model = SPPNet(backbone=50, num_class=1)
elif model == "siamese_torch":
from pytorch_model.siamese import SiameseNetworkResnet
model = SiameseNetworkResnet(length_embed = args.length_embed,pretrained=True)
elif model == "efficient":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b'+args.type,num_classes=1)
model = nn.Sequential(model,nn.Sigmoid())
elif model == "efft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=1)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "e4dfft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=4)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "efficientdual":
pass
from pytorch_model.xception import xception
model = xception(pretrained=False)
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
model = model.to(device)
model.load_state_dict(torch.load(args.model_path,map_location=torch.device('cpu')))
print("Load xong ... ")
model.eval()
csig = ClassSpecificImageGeneration(model, target_class,image_size)
csig.generate(iterations = iterations)
| 2,966 | 0 | 49 |
bba0adc27a75d1f640ff8949c6dd981f33acabbd | 10,011 | bzl | Python | dependency_support/com_google_skywater_pdk/cell_libraries.bzl | kammoh/bazel_rules_hdl | 17dfb5cea5ab58460f5ca55244f3afd0724a8a3a | [
"Apache-2.0"
] | 41 | 2020-12-05T21:46:35.000Z | 2022-03-24T22:22:48.000Z | dependency_support/com_google_skywater_pdk/cell_libraries.bzl | kammoh/bazel_rules_hdl | 17dfb5cea5ab58460f5ca55244f3afd0724a8a3a | [
"Apache-2.0"
] | 63 | 2020-12-05T22:23:36.000Z | 2022-03-28T04:56:10.000Z | dependency_support/com_google_skywater_pdk/cell_libraries.bzl | kammoh/bazel_rules_hdl | 17dfb5cea5ab58460f5ca55244f3afd0724a8a3a | [
"Apache-2.0"
] | 13 | 2020-12-15T10:11:39.000Z | 2022-03-27T20:17:10.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Has metadata about the cell libraries in the PDK.
This is used by the Bazel rules to set up the proper workspaces and targets."""
# The following is a list of cell libraries in the PDK. Each cell library has the
# git commit to use and a list of process corners.
#
# This list is manually curated and needs to be updated when upgrading to newer
# cell library versions.
CELL_LIBRARIES = {
"sky130_fd_io": {
"commit": "7ec511f1a4689e174c63b3964d1ba8da9a3565e5", # v0.2.1, 2020-12-09
"shallow_since": "1606239275 -0800",
"library_type": "ip_library",
},
"sky130_fd_pr": {
"commit": "f62031a1be9aefe902d6d54cddd6f59b57627436", # v0.20.1, 2020-12-09
"shallow_since": "1605038979 -0800",
"library_type": "ip_library",
},
"sky130_fd_sc_hd": {
"commit": "ac7fb61f06e6470b94e8afdf7c25268f62fbd7b1", # v0.0.2, 2020-12-04
"shallow_since": "1605028103 -0800",
"corners": {
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v40": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v35": ["basic"],
"ss_n40C_1v40": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
"open_road_configuration": Label("//dependency_support/com_google_skywater_pdk/sky130_fd_sc_hd:open_road_sky130_fd_sc_hd"),
"patches": [
Label("//dependency_support/com_google_skywater_pdk/sky130_fd_sc_hd:pdk.patch"),
],
},
"sky130_fd_sc_hdll": {
"commit": "0694bd23893de20f5233ef024acf6cca1e750ac6", # v0.1.1, 2020-12-04
"shallow_since": "1604475910 -0800",
"corners": {
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_hs": {
"commit": "1d051f49bfe4e2fe9108d702a8bc2e9c081005a4", # v0.0.2, 2020-12-04
"shallow_since": "1605574092 -0800",
"corners": {
"ff_100C_1v95": ["basic"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"tt_025C_1v20": ["basic"],
"tt_025C_1v35": ["basic"],
"tt_025C_1v44": ["basic"],
"tt_025C_1v50": ["basic"],
"tt_025C_1v62": ["basic"],
"tt_025C_1v68": ["basic"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_025C_1v89": ["basic"],
"tt_025C_2v10": ["basic"],
"tt_100C_1v80": ["basic"],
"tt_150C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_hvl": {
"commit": "4fd4f858d16c558a6a488b200649e909bb4dd800", # v0.0.3, 2020-12-04
"shallow_since": "1604476031 -0800",
"corners": {
"ff_085C_5v50": ["basic"],
"ff_085C_5v50_lv1v95": ["basic"],
"ff_100C_5v50": ["basic"],
"ff_100C_5v50_lowhv1v65_lv1v95": ["basic"],
"ff_100C_5v50_lv1v95": ["basic"],
"ff_150C_5v50": ["basic"],
"ff_150C_5v50_lv1v95": ["basic"],
"ff_n40C_4v40": ["basic"],
"ff_n40C_4v40_lv1v95": ["basic"],
"ff_n40C_4v95": ["basic"],
"ff_n40C_4v95_lv1v95": ["basic"],
"ff_n40C_5v50": ["basic", "ccsnoise"],
"ff_n40C_5v50_lowhv1v65_lv1v95": ["basic"],
"ff_n40C_5v50_lv1v95": ["basic", "ccsnoise"],
"hvff_lvss_100C_5v50_lowhv1v65_lv1v60": ["basic"],
"hvff_lvss_100C_5v50_lv1v40": ["basic"],
"hvff_lvss_100C_5v50_lv1v60": ["basic"],
"hvff_lvss_n40C_5v50_lowhv1v65_lv1v60": ["basic"],
"hvff_lvss_n40C_5v50_lv1v35": ["basic"],
"hvff_lvss_n40C_5v50_lv1v60": ["basic"],
"hvss_lvff_100C_1v65": ["basic"],
"hvss_lvff_100C_1v95": ["basic"],
"hvss_lvff_100C_1v95_lowhv1v65": ["basic"],
"hvss_lvff_100C_5v50_lowhv1v65_lv1v95": ["basic"],
"hvss_lvff_n40C_1v65": ["basic"],
"hvss_lvff_n40C_1v95": ["basic"],
"hvss_lvff_n40C_1v95_lowhv1v65": ["basic"],
"hvss_lvff_n40C_5v50_lowhv1v65_lv1v95": ["basic"],
"ss_100C_1v65": ["basic"],
"ss_100C_1v65_lv1v40": ["basic"],
"ss_100C_1v65_lv1v60": ["basic"],
"ss_100C_1v95": ["basic"],
"ss_100C_2v40_lowhv1v65_lv1v60": ["basic"],
"ss_100C_2v70_lowhv1v65_lv1v60": ["basic"],
"ss_100C_3v00": ["basic"],
"ss_100C_3v00_lowhv1v65_lv1v60": ["basic"],
"ss_100C_5v50_lowhv1v65_lv1v60": ["basic"],
"ss_150C_1v65": ["basic"],
"ss_150C_1v65_lv1v60": ["basic"],
"ss_150C_3v00_lowhv1v65_lv1v60": ["basic"],
"ss_n40C_1v32": ["basic"],
"ss_n40C_1v32_lv1v28": ["basic"],
"ss_n40C_1v49": ["basic"],
"ss_n40C_1v49_lv1v44": ["basic"],
"ss_n40C_1v65": ["basic", "ccsnoise"],
"ss_n40C_1v65_lv1v35": ["basic"],
"ss_n40C_1v65_lv1v40": ["basic"],
"ss_n40C_1v65_lv1v60": ["basic", "ccsnoise"],
"ss_n40C_1v95": ["basic"],
"ss_n40C_5v50_lowhv1v65_lv1v60": ["basic"],
"tt_025C_2v64_lv1v80": ["basic"],
"tt_025C_2v97_lv1v80": ["basic"],
"tt_025C_3v30": ["basic"],
"tt_025C_3v30_lv1v80": ["basic"],
"tt_100C_3v30": ["basic"],
"tt_100C_3v30_lv1v80": ["basic"],
"tt_150C_3v30_lv1v80": ["basic"],
},
"default_corner": "ss_100C_1v95",
},
"sky130_fd_sc_lp": {
"commit": "e2c1e0646999163d35ea7b2521c3ec5c28633e63", # v0.0.2, 2020-12-04
"shallow_since": "1604476084 -0800",
"corners": {
"ff_100C_1v95": ["basic"],
"ff_125C_3v15": ["basic"],
"ff_140C_1v95": ["basic"],
"ff_150C_2v05": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic"],
"ff_n40C_2v05": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_140C_1v65": ["basic"],
"ss_150C_1v65": ["basic"],
"ss_n40C_1v55": ["basic"],
"ss_n40C_1v60": ["basic"],
"ss_n40C_1v65": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_ls": {
"commit": "4f549e30dd91a1c264f8895e07b2872fe410a8c2", # v0.1.1, 2020-12-04
"shallow_since": "1604476021 -0800",
"corners": {
"ff_085C_1v95": ["basic"],
"ff_100C_1v65_dest1v76_destvpb1v76_ka1v76": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65_dest1v76_destvpb1v76_ka1v76": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v40": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v35": ["basic"],
"ss_n40C_1v40": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_ms": {
"commit": "ae1b7f68821505cf2d93d9d44cce5ece22710fad", # v0.0.2, 2020-12-04
"shallow_since": "1605631186 -0800",
"corners": {
"ff_085C_1v95": ["leakage"],
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic", "leakage"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65_ka1v76": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise", "leakage"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
}
| 41.367769 | 131 | 0.538807 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Has metadata about the cell libraries in the PDK.
This is used by the Bazel rules to set up the proper workspaces and targets."""
# The following is a list of cell libraries in the PDK. Each cell library has the
# git commit to use and a list of process corners.
#
# This list is manually curated and needs to be updated when upgrading to newer
# cell library versions.
CELL_LIBRARIES = {
"sky130_fd_io": {
"commit": "7ec511f1a4689e174c63b3964d1ba8da9a3565e5", # v0.2.1, 2020-12-09
"shallow_since": "1606239275 -0800",
"library_type": "ip_library",
},
"sky130_fd_pr": {
"commit": "f62031a1be9aefe902d6d54cddd6f59b57627436", # v0.20.1, 2020-12-09
"shallow_since": "1605038979 -0800",
"library_type": "ip_library",
},
"sky130_fd_sc_hd": {
"commit": "ac7fb61f06e6470b94e8afdf7c25268f62fbd7b1", # v0.0.2, 2020-12-04
"shallow_since": "1605028103 -0800",
"corners": {
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v40": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v35": ["basic"],
"ss_n40C_1v40": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
"open_road_configuration": Label("//dependency_support/com_google_skywater_pdk/sky130_fd_sc_hd:open_road_sky130_fd_sc_hd"),
"patches": [
Label("//dependency_support/com_google_skywater_pdk/sky130_fd_sc_hd:pdk.patch"),
],
},
"sky130_fd_sc_hdll": {
"commit": "0694bd23893de20f5233ef024acf6cca1e750ac6", # v0.1.1, 2020-12-04
"shallow_since": "1604475910 -0800",
"corners": {
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_hs": {
"commit": "1d051f49bfe4e2fe9108d702a8bc2e9c081005a4", # v0.0.2, 2020-12-04
"shallow_since": "1605574092 -0800",
"corners": {
"ff_100C_1v95": ["basic"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"tt_025C_1v20": ["basic"],
"tt_025C_1v35": ["basic"],
"tt_025C_1v44": ["basic"],
"tt_025C_1v50": ["basic"],
"tt_025C_1v62": ["basic"],
"tt_025C_1v68": ["basic"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_025C_1v89": ["basic"],
"tt_025C_2v10": ["basic"],
"tt_100C_1v80": ["basic"],
"tt_150C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_hvl": {
"commit": "4fd4f858d16c558a6a488b200649e909bb4dd800", # v0.0.3, 2020-12-04
"shallow_since": "1604476031 -0800",
"corners": {
"ff_085C_5v50": ["basic"],
"ff_085C_5v50_lv1v95": ["basic"],
"ff_100C_5v50": ["basic"],
"ff_100C_5v50_lowhv1v65_lv1v95": ["basic"],
"ff_100C_5v50_lv1v95": ["basic"],
"ff_150C_5v50": ["basic"],
"ff_150C_5v50_lv1v95": ["basic"],
"ff_n40C_4v40": ["basic"],
"ff_n40C_4v40_lv1v95": ["basic"],
"ff_n40C_4v95": ["basic"],
"ff_n40C_4v95_lv1v95": ["basic"],
"ff_n40C_5v50": ["basic", "ccsnoise"],
"ff_n40C_5v50_lowhv1v65_lv1v95": ["basic"],
"ff_n40C_5v50_lv1v95": ["basic", "ccsnoise"],
"hvff_lvss_100C_5v50_lowhv1v65_lv1v60": ["basic"],
"hvff_lvss_100C_5v50_lv1v40": ["basic"],
"hvff_lvss_100C_5v50_lv1v60": ["basic"],
"hvff_lvss_n40C_5v50_lowhv1v65_lv1v60": ["basic"],
"hvff_lvss_n40C_5v50_lv1v35": ["basic"],
"hvff_lvss_n40C_5v50_lv1v60": ["basic"],
"hvss_lvff_100C_1v65": ["basic"],
"hvss_lvff_100C_1v95": ["basic"],
"hvss_lvff_100C_1v95_lowhv1v65": ["basic"],
"hvss_lvff_100C_5v50_lowhv1v65_lv1v95": ["basic"],
"hvss_lvff_n40C_1v65": ["basic"],
"hvss_lvff_n40C_1v95": ["basic"],
"hvss_lvff_n40C_1v95_lowhv1v65": ["basic"],
"hvss_lvff_n40C_5v50_lowhv1v65_lv1v95": ["basic"],
"ss_100C_1v65": ["basic"],
"ss_100C_1v65_lv1v40": ["basic"],
"ss_100C_1v65_lv1v60": ["basic"],
"ss_100C_1v95": ["basic"],
"ss_100C_2v40_lowhv1v65_lv1v60": ["basic"],
"ss_100C_2v70_lowhv1v65_lv1v60": ["basic"],
"ss_100C_3v00": ["basic"],
"ss_100C_3v00_lowhv1v65_lv1v60": ["basic"],
"ss_100C_5v50_lowhv1v65_lv1v60": ["basic"],
"ss_150C_1v65": ["basic"],
"ss_150C_1v65_lv1v60": ["basic"],
"ss_150C_3v00_lowhv1v65_lv1v60": ["basic"],
"ss_n40C_1v32": ["basic"],
"ss_n40C_1v32_lv1v28": ["basic"],
"ss_n40C_1v49": ["basic"],
"ss_n40C_1v49_lv1v44": ["basic"],
"ss_n40C_1v65": ["basic", "ccsnoise"],
"ss_n40C_1v65_lv1v35": ["basic"],
"ss_n40C_1v65_lv1v40": ["basic"],
"ss_n40C_1v65_lv1v60": ["basic", "ccsnoise"],
"ss_n40C_1v95": ["basic"],
"ss_n40C_5v50_lowhv1v65_lv1v60": ["basic"],
"tt_025C_2v64_lv1v80": ["basic"],
"tt_025C_2v97_lv1v80": ["basic"],
"tt_025C_3v30": ["basic"],
"tt_025C_3v30_lv1v80": ["basic"],
"tt_100C_3v30": ["basic"],
"tt_100C_3v30_lv1v80": ["basic"],
"tt_150C_3v30_lv1v80": ["basic"],
},
"default_corner": "ss_100C_1v95",
},
"sky130_fd_sc_lp": {
"commit": "e2c1e0646999163d35ea7b2521c3ec5c28633e63", # v0.0.2, 2020-12-04
"shallow_since": "1604476084 -0800",
"corners": {
"ff_100C_1v95": ["basic"],
"ff_125C_3v15": ["basic"],
"ff_140C_1v95": ["basic"],
"ff_150C_2v05": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic"],
"ff_n40C_2v05": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_140C_1v65": ["basic"],
"ss_150C_1v65": ["basic"],
"ss_n40C_1v55": ["basic"],
"ss_n40C_1v60": ["basic"],
"ss_n40C_1v65": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_ls": {
"commit": "4f549e30dd91a1c264f8895e07b2872fe410a8c2", # v0.1.1, 2020-12-04
"shallow_since": "1604476021 -0800",
"corners": {
"ff_085C_1v95": ["basic"],
"ff_100C_1v65_dest1v76_destvpb1v76_ka1v76": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65_dest1v76_destvpb1v76_ka1v76": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v40": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v35": ["basic"],
"ss_n40C_1v40": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_ms": {
"commit": "ae1b7f68821505cf2d93d9d44cce5ece22710fad", # v0.0.2, 2020-12-04
"shallow_since": "1605631186 -0800",
"corners": {
"ff_085C_1v95": ["leakage"],
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic", "leakage"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65_ka1v76": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise", "leakage"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
}
| 0 | 0 | 0 |
83b24771f6d7e88ea19ed0a5aef62adf8f7e158e | 9,430 | py | Python | utils/misc.py | Michael-F-Bryan/mfb_utils | 5d7be24f5cc5eaf4f0ad590e99b1e7607735acd4 | [
"MIT"
] | null | null | null | utils/misc.py | Michael-F-Bryan/mfb_utils | 5d7be24f5cc5eaf4f0ad590e99b1e7607735acd4 | [
"MIT"
] | null | null | null | utils/misc.py | Michael-F-Bryan/mfb_utils | 5d7be24f5cc5eaf4f0ad590e99b1e7607735acd4 | [
"MIT"
] | null | null | null | """
A Python module containing various utility functions, classes, decorators or
whatever.
"""
from collections import namedtuple, Iterable
import sys
import functools
import inspect
from bs4 import BeautifulSoup
import logging
import time
import random
import os
import errno
# Constants
# =========
USER_AGENTS = [
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36 OPR/34.0.2036.25',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1',
'Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01',
]
"""
A bunch of random User-Agent strings.
"""
# Decorators
# ==========
class Hook:
"""
A special Hook decorator that will call something after a method has
completed.
When decorating your method, make sure to only use keyword arguments in
the hook.
The idea is for a developer to implement a specific class which has various
methods, and on some methods he will add a Hook decorator. Then the user
can create a subclass of this class and implement the hooks themselves.
The user is given access to the return value of the decorated function
through the `self._hook_return_value` variable. The return value is None
if the hook is called before the decorated function.
Example
-------
Developer::
class MyClass:
@Hook('on_do_stuff', arg1='something', arg2=7)
def do_stuff(self):
pass
User::
class MyNewClass(MyClass):
def on_do_stuff(self, **kwargs):
# Do something useful
pass
Parameters
----------
hook_name: str
The name of the hook function to be called.
call_after: bool
Whether to call the hook after or before the decorated function runs.
(default: True)
Raises
------
ValueError
When a normal function is decorated instead of a method.
"""
def call_hook(self, func, args, return_value=None):
"""
Get the "self" argument (i.e. the instance of a class that is implicitly
passed to a method when you call something like "some_class.method()")
then call our hook.
Uses inspect to check that a function has this "self" variable passed
in first. This is a sanity check to ensure that the hook decorator is
only used on methods.
By default any exceptions encountered while running the hook will be
silently ignored.
"""
func_args = inspect.getargspec(func).args
if len(func_args) < 1 or 'self' not in func_args:
raise TypeError('Only methods can be decorated with "Hook"')
instance = args[0]
hook = getattr(instance, self.hook_name, None)
if hook:
instance._hook_return_value = return_value
try:
hook(**self.hook_kwargs)
except Exception:
if not self.skip_exceptions:
raise
class Timed:
"""
Time a function call and save it's duration (in seconds) to
`function.duration`.
Parameters
----------
output_stream: Stream-like object
A stream to write the timing message to, set to None to disable it
(default: stderr)
decimals: int
The number of decimal places to print the duration to in the output
stream
"""
# Functions
# =========
def get_logger(name, log_file, log_level=None):
"""
Get a logger object which is set up properly with the correct formatting,
logfile, etc.
Parameters
----------
name: str
The __name__ of the module calling this function.
log_file: str
The filename of the file to log to.
Returns
-------
logging.Logger
A logging.Logger object that can be used to log to a common file.
"""
logger = logging.getLogger(name)
logger.setLevel(log_level or logging.INFO)
if log_file == 'stdout':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'stderr':
handler = logging.StreamHandler(sys.stderr)
else:
handler = logging.FileHandler(log_file)
if not len(logger.handlers):
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: %(message)s',
datefmt='%Y/%m/%d %I:%M:%S %p'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def flatten(items, ignore_types=(str, bytes)):
"""
Turn a nested structure (usually a list of lists... of lists of lists of
lists) into one flat list.
Parameters
----------
items: list(list(...))
A nested list structure.
ignore_types: list(types)
A list of types (usually iterables) that shouldn't be expanded. (e.g.
don't flatten a string into a list of characters, etc)
Returns
-------
generator
Yields each element of the nested structure in turn.
"""
# If a string, bytes etc is passed in as the "items" nested function then
# just yield it back out
if isinstance(items, ignore_types):
yield items
else:
for x in items:
if isinstance(x, Iterable) and not isinstance(x, ignore_types):
yield from flatten(x)
else:
yield x
def hidden_fields(soup):
"""
Retrieve all the hidden fields from a html form.
Parameters
----------
soup: BeautifulSoup or str
The form to search. If it is not a BeautifulSoup object then assume it
is the html source and convert it into BeautifulSoup.
Returns
-------
dict
A dictionary of the hidden fields and their values.
"""
if not isinstance(soup, BeautifulSoup):
soup = BeautifulSoup(soup, 'html.parser')
hidden = {}
hidden_fields = soup.find_all('input', type='hidden')
for field in hidden_fields:
hidden[field['name']] = field['value']
return hidden
_suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes, decimals=2):
"""
Convert a number of bytes into it's human readable string using SI
suffixes.
Note
----
1 KB = 1024 bytes
Parameters
----------
nbytes: int
The total number of bytes
decimals: int
The number of decimal places to round to
Returns
-------
string
The human readable size.
"""
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(_suffixes)-1:
nbytes /= 1024.
i += 1
f = ('{}'.format(round(nbytes, decimals)))
f = f.rstrip('0').rstrip('.')
return '%s %s' % (f, _suffixes[i])
def innerHTML(element):
"""
Return the HTML contents of a BeautifulSoup tag.
"""
return element.decode_contents(formatter="html")
| 28.662614 | 128 | 0.600848 | """
A Python module containing various utility functions, classes, decorators or
whatever.
"""
from collections import namedtuple, Iterable
import sys
import functools
import inspect
from bs4 import BeautifulSoup
import logging
import time
import random
import os
import errno
# Constants
# =========
USER_AGENTS = [
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36 OPR/34.0.2036.25',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1',
'Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01',
]
"""
A bunch of random User-Agent strings.
"""
# Decorators
# ==========
class Hook:
"""
A special Hook decorator that will call something after a method has
completed.
When decorating your method, make sure to only use keyword arguments in
the hook.
The idea is for a developer to implement a specific class which has various
methods, and on some methods he will add a Hook decorator. Then the user
can create a subclass of this class and implement the hooks themselves.
The user is given access to the return value of the decorated function
through the `self._hook_return_value` variable. The return value is None
if the hook is called before the decorated function.
Example
-------
Developer::
class MyClass:
@Hook('on_do_stuff', arg1='something', arg2=7)
def do_stuff(self):
pass
User::
class MyNewClass(MyClass):
def on_do_stuff(self, **kwargs):
# Do something useful
pass
Parameters
----------
hook_name: str
The name of the hook function to be called.
call_after: bool
Whether to call the hook after or before the decorated function runs.
(default: True)
Raises
------
ValueError
When a normal function is decorated instead of a method.
"""
def __init__(self, hook_name, call_after=True,
skip_exceptions=True, **hook_kwargs):
self.hook_name = hook_name
self.hook_kwargs = hook_kwargs
self.call_after = call_after
self.skip_exceptions = skip_exceptions
def __call__(self, func):
@functools.wraps(func)
def decorated(*args, **kwargs):
if self.call_after:
ret = func(*args, **kwargs)
self.call_hook(func, args, return_value=ret)
else:
self.call_hook(func, args)
ret = func(*args, **kwargs)
return ret
return decorated
def call_hook(self, func, args, return_value=None):
"""
Get the "self" argument (i.e. the instance of a class that is implicitly
passed to a method when you call something like "some_class.method()")
then call our hook.
Uses inspect to check that a function has this "self" variable passed
in first. This is a sanity check to ensure that the hook decorator is
only used on methods.
By default any exceptions encountered while running the hook will be
silently ignored.
"""
func_args = inspect.getargspec(func).args
if len(func_args) < 1 or 'self' not in func_args:
raise TypeError('Only methods can be decorated with "Hook"')
instance = args[0]
hook = getattr(instance, self.hook_name, None)
if hook:
instance._hook_return_value = return_value
try:
hook(**self.hook_kwargs)
except Exception:
if not self.skip_exceptions:
raise
class Timed:
"""
Time a function call and save it's duration (in seconds) to
`function.duration`.
Parameters
----------
output_stream: Stream-like object
A stream to write the timing message to, set to None to disable it
(default: stderr)
decimals: int
The number of decimal places to print the duration to in the output
stream
"""
def __init__(self, output_stream=sys.stderr, decimals=3):
if output_stream is None or hasattr(output_stream, 'write'):
self.output_stream = output_stream
else:
raise TypeError('output_stream should be a Stream (i.e. has a '
'"write()" method)')
if not isinstance(decimals, int):
raise TypeError('decimals must be an integer')
else:
self.decimals = decimals
def __call__(self, func):
@functools.wraps(func)
def decorated(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
decorated.duration = time.time() - start
if self.output_stream:
func_args = []
func_args.extend(args)
func_args.extend('{}={}'.format(key, value) for key, value in kwargs.items())
func_arguments = ', '.join(func_args)
function_call = '{}({})'.format(func.__name__, func_arguments)
duration = round(decorated.duration, self.decimals)
self.output_stream.write(
'{} took {} seconds'.format(function_call, duration))
return ret
return decorated
# Functions
# =========
def get_logger(name, log_file, log_level=None):
"""
Get a logger object which is set up properly with the correct formatting,
logfile, etc.
Parameters
----------
name: str
The __name__ of the module calling this function.
log_file: str
The filename of the file to log to.
Returns
-------
logging.Logger
A logging.Logger object that can be used to log to a common file.
"""
logger = logging.getLogger(name)
logger.setLevel(log_level or logging.INFO)
if log_file == 'stdout':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'stderr':
handler = logging.StreamHandler(sys.stderr)
else:
handler = logging.FileHandler(log_file)
if not len(logger.handlers):
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: %(message)s',
datefmt='%Y/%m/%d %I:%M:%S %p'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def flatten(items, ignore_types=(str, bytes)):
"""
Turn a nested structure (usually a list of lists... of lists of lists of
lists) into one flat list.
Parameters
----------
items: list(list(...))
A nested list structure.
ignore_types: list(types)
A list of types (usually iterables) that shouldn't be expanded. (e.g.
don't flatten a string into a list of characters, etc)
Returns
-------
generator
Yields each element of the nested structure in turn.
"""
# If a string, bytes etc is passed in as the "items" nested function then
# just yield it back out
if isinstance(items, ignore_types):
yield items
else:
for x in items:
if isinstance(x, Iterable) and not isinstance(x, ignore_types):
yield from flatten(x)
else:
yield x
def hidden_fields(soup):
"""
Retrieve all the hidden fields from a html form.
Parameters
----------
soup: BeautifulSoup or str
The form to search. If it is not a BeautifulSoup object then assume it
is the html source and convert it into BeautifulSoup.
Returns
-------
dict
A dictionary of the hidden fields and their values.
"""
if not isinstance(soup, BeautifulSoup):
soup = BeautifulSoup(soup, 'html.parser')
hidden = {}
hidden_fields = soup.find_all('input', type='hidden')
for field in hidden_fields:
hidden[field['name']] = field['value']
return hidden
_suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes, decimals=2):
"""
Convert a number of bytes into it's human readable string using SI
suffixes.
Note
----
1 KB = 1024 bytes
Parameters
----------
nbytes: int
The total number of bytes
decimals: int
The number of decimal places to round to
Returns
-------
string
The human readable size.
"""
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(_suffixes)-1:
nbytes /= 1024.
i += 1
f = ('{}'.format(round(nbytes, decimals)))
f = f.rstrip('0').rstrip('.')
return '%s %s' % (f, _suffixes[i])
def random_user_agent():
return random.choice(USER_AGENTS)
def mkdir(path):
try:
os.makedirs(path)
logger.debug('Made directory: {}'.format(path))
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def innerHTML(element):
"""
Return the HTML contents of a BeautifulSoup tag.
"""
return element.decode_contents(formatter="html")
| 2,077 | 0 | 160 |
11e537c7d8a4810c18a0f7a1e03cbb583f5c278c | 3,454 | py | Python | run_exp_opennet.py | clojia/DTAE | 9cfe89c47cdb7e9796900a00efb202593095d990 | [
"MIT"
] | null | null | null | run_exp_opennet.py | clojia/DTAE | 9cfe89c47cdb7e9796900a00efb202593095d990 | [
"MIT"
] | null | null | null | run_exp_opennet.py | clojia/DTAE | 9cfe89c47cdb7e9796900a00efb202593095d990 | [
"MIT"
] | null | null | null | import argparse
import subprocess
import random
import os
import tensorflow as tf
import sys
#os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3,4,5,6,7"
from tensorflow.python.client import device_lib
if __name__ == '__main__':
main()
| 47.315068 | 311 | 0.590041 | import argparse
import subprocess
import random
import os
import tensorflow as tf
import sys
#os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3,4,5,6,7"
from tensorflow.python.client import device_lib
def main():
parser = argparse.ArgumentParser(description='OpenNetFlat experiments.')
parser.add_argument('-eid', '--exp_id', required=False, dest='exp_id',
default=None, help='path to output directory.')
parser.add_argument('-n','--network', required=True, dest='network',
choices=['flat', 'cnn'], help='dataset name.')
parser.add_argument('-ds','--datasets', required=True, dest='dataset_name',
choices=['mnist', 'fashion-mnist', 'cifar10'], help='dataset name.')
parser.add_argument('-m','--models', required=True, dest='model_names', nargs='+',
default=['ii', 'ce', 'ceii', 'openmax', 'g_openmax', 'central','triplet',], help='model name.')
parser.add_argument('-trc_file', '--tr_classes_list_file', required=True, dest='trc_file',
help='list of training classes.')
parser.add_argument('-o', '--outdir', required=False, dest='output_dir',
default='./exp_result/cnn', help='path to output directory.')
parser.add_argument('-s', '--seed', required=False, dest='seed', type=int,
default=1, help='path to output directory.')
parser.add_argument('--closed', dest='closed', action='store_true',
help='Run closed world experiments.')
parser.add_argument('--no-closed', dest='closed', action='store_false',
help='Run open world experiments.')
parser.add_argument('-p','--pre-trained', required=False, default='false', dest='pre_trained', choices=['false','recon','trans'], help='Use self-supervision pre-trained model: True/False')
parser.add_argument('-t','--transformation', required=False, dest='transformation', choices=['none','random','shift','ae-shift','ae-swap','random1d','ae-affine','ae-gaussian','ae-rotation','shift-ae-rotation','ae-random','rotation','affine', 'crop', 'gaussian', 'offset', 'misc'], help='Tranformation type')
parser.set_defaults(closed=False)
args = parser.parse_args()
if args.exp_id is None:
args.exp_id = random.randint(0, 10000)
tr_classes_list = []
with open(args.trc_file) as fin:
for line in fin:
if line.strip() == '':
continue
cols = line.strip().split()
tr_classes_list.append([int(float(c)) for c in cols])
for tr_classes in tr_classes_list:
for mname in args.model_names:
exp_args = []
exp_args += ['python', 'exp_opennet.py']
exp_args += ['-e', str(args.exp_id)]
exp_args += ['-n', args.network]
exp_args += ['-m', mname]
exp_args += ['-ds', args.dataset_name]
exp_args += ['-trc']
exp_args += [str(c) for c in tr_classes[:10]]
exp_args += ['-o', args.output_dir]
exp_args += ['-s', str(args.seed)]
exp_args += ['-p', str(args.pre_trained)]
exp_args += ['--transformation', args.transformation]
if args.closed:
exp_args += ['--closed']
print(exp_args)
proc = subprocess.Popen(exp_args)
proc.wait()
if __name__ == '__main__':
main()
| 3,195 | 0 | 23 |
f4be202ce9833da0c4fdbb92c0405a985d7b3416 | 589 | py | Python | goldbox_detector_environment/discrete_space.py | kalimuthu-selvaraj/find-goldbox | b34065c4a4ce2ece2d9069319380793516dcbd5d | [
"MIT"
] | null | null | null | goldbox_detector_environment/discrete_space.py | kalimuthu-selvaraj/find-goldbox | b34065c4a4ce2ece2d9069319380793516dcbd5d | [
"MIT"
] | null | null | null | goldbox_detector_environment/discrete_space.py | kalimuthu-selvaraj/find-goldbox | b34065c4a4ce2ece2d9069319380793516dcbd5d | [
"MIT"
] | null | null | null | from typing import Any
import numpy as np
| 23.56 | 69 | 0.590832 | from typing import Any
import numpy as np
class DiscreteSpace:
def __init__(self, n: int):
assert n > 0, "Argument must be a positive integer"
self.n = n
def sample(self) -> int:
return np.random.randint(self.n)
def __contains__(self, item: Any) -> bool:
if isinstance(item, int):
return 0 <= item < self.n
else:
return False
def __eq__(self, other: Any) -> bool:
return isinstance(other, DiscreteSpace) and self.n == other.n
def __repr__(self) -> str:
return f"Discrete({self.n})"
| 389 | -1 | 157 |
54c65c13b158252ff43536ff832fe4579a13dcd7 | 480 | py | Python | python/beautifulSoupCarModels.py | nsdeo12/pyspark | 6cfa6f4afdc756454cf05902ad6492ab45a30589 | [
"MIT"
] | null | null | null | python/beautifulSoupCarModels.py | nsdeo12/pyspark | 6cfa6f4afdc756454cf05902ad6492ab45a30589 | [
"MIT"
] | null | null | null | python/beautifulSoupCarModels.py | nsdeo12/pyspark | 6cfa6f4afdc756454cf05902ad6492ab45a30589 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
url = "https://gomechanic.in/hyderabad"
req = requests.get(url, headers)
soup = BeautifulSoup(req.content, 'html.parser')
print(soup.prettify()) | 32 | 96 | 0.689583 | import requests
from bs4 import BeautifulSoup
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
url = "https://gomechanic.in/hyderabad"
req = requests.get(url, headers)
soup = BeautifulSoup(req.content, 'html.parser')
print(soup.prettify()) | 0 | 0 | 0 |
a9ff23d7b0e5ce21634cc071d7e31b33160711e6 | 3,626 | py | Python | experiments/utils/mnist_cnn.py | elian204/melime | aef885fa4b6b02f7bf7294140d78a85fe546b622 | [
"MIT"
] | 48 | 2020-09-15T02:26:46.000Z | 2021-09-03T17:08:53.000Z | experiments/utils/mnist_cnn.py | elian204/melime | aef885fa4b6b02f7bf7294140d78a85fe546b622 | [
"MIT"
] | 1 | 2020-11-03T04:14:27.000Z | 2020-11-05T16:32:25.000Z | experiments/utils/mnist_cnn.py | elian204/melime | aef885fa4b6b02f7bf7294140d78a85fe546b622 | [
"MIT"
] | 3 | 2020-09-20T16:52:11.000Z | 2021-09-25T10:04:27.000Z | """
Modified example from:
https://github.com/pytorch/examples
"""
from __future__ import print_function
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
| 31.530435 | 112 | 0.591009 | """
Modified example from:
https://github.com/pytorch/examples
"""
from __future__ import print_function
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, epoch, log_interval=10):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction="sum").item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset)
)
)
def train_model(
train_loader, test_loader, device="cpu", gamma=0.7, seed=1.0, log_interval=100, lr=0.5, epochs=10, path=None
):
if not device in ["cuda", "cpu"]:
raise Exception("Please choose one valid device. The options are: cuda and cpu")
if device == "cuda":
use_cuda = torch.cuda.is_available()
if use_cuda is False:
warnings.warn("Cuda is not available. Using the cpu device instead")
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
print("device:", device)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, epoch, log_interval=log_interval)
scheduler.step()
test(model, device, test_loader)
if path is not None:
torch.save(model.state_dict(), path)
return model
def model_load(device, path):
model = Net().to(device)
model.load_state_dict(torch.load(path))
model.eval()
return model
| 3,145 | 0 | 168 |
d1b365331d86c08aef366a840521292fc43d7e44 | 356 | py | Python | problems/pe7.py | tgetzoya/project-euler-python | c459dc0f853c27006db6865be731ad53ee2cd778 | [
"BSD-2-Clause"
] | null | null | null | problems/pe7.py | tgetzoya/project-euler-python | c459dc0f853c27006db6865be731ad53ee2cd778 | [
"BSD-2-Clause"
] | null | null | null | problems/pe7.py | tgetzoya/project-euler-python | c459dc0f853c27006db6865be731ad53ee2cd778 | [
"BSD-2-Clause"
] | null | null | null | from utils.primes import is_prime
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#
# What is the 10 001st prime number?
#
# Answer: 104743
| 19.777778 | 102 | 0.589888 | from utils.primes import is_prime
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#
# What is the 10 001st prime number?
#
# Answer: 104743
def run():
list = []
idx = 1
while len(list) < 10001:
if is_prime(idx):
list.append(idx)
idx += 1
return list[-1]
| 136 | 0 | 23 |
1cda5e5a69716c6d3852145f0291ba2dd75da620 | 10,936 | py | Python | yolo_data.py | leokale/yolo_v1 | ddafb5b06e0dc80b61d9271e4c1d4f48a9f050fc | [
"MIT"
] | 1 | 2019-12-18T03:45:45.000Z | 2019-12-18T03:45:45.000Z | yolo_data.py | leokale/yolo_v1 | ddafb5b06e0dc80b61d9271e4c1d4f48a9f050fc | [
"MIT"
] | null | null | null | yolo_data.py | leokale/yolo_v1 | ddafb5b06e0dc80b61d9271e4c1d4f48a9f050fc | [
"MIT"
] | 1 | 2019-09-14T07:49:54.000Z | 2019-09-14T07:49:54.000Z | # -*- coding:utf-8 -*-
__author__ = 'Leo.Z'
'''
image_name.jpg x y x2 y2 c x y x2 y2 c xy为左上角坐标,x2y2为右下角坐标
'''
import os
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
| 39.197133 | 120 | 0.51207 | # -*- coding:utf-8 -*-
__author__ = 'Leo.Z'
'''
image_name.jpg x y x2 y2 c x y x2 y2 c xy为左上角坐标,x2y2为右下角坐标
'''
import os
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
class yoloDataset(data.Dataset):
# 输入图片大小为448
image_size = 448
def __init__(self, root, list_file, train, transform):
print('data init')
self.root = root
self.train = train
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
self.mean = (123, 117, 104) # RGB
# if isinstance(list_file, list):
# # Cat multiple list files together.
# # This is especially useful for voc07/voc12 combination.
# tmp_file = '/tmp/listfile.txt'
# os.system('cat %s > %s' % (' '.join(list_file), tmp_file))
# list_file = tmp_file
with open(list_file) as f:
lines = f.readlines()
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_boxes = (len(splited) - 1) // 5
box = []
label = []
for i in range(num_boxes):
x = float(splited[1 + 5 * i])
y = float(splited[2 + 5 * i])
x2 = float(splited[3 + 5 * i])
y2 = float(splited[4 + 5 * i])
c = splited[5 + 5 * i]
box.append([x, y, x2, y2])
label.append(int(c) + 1)
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
self.num_samples = len(self.boxes)
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(self.root + fname))
boxes = self.boxes[idx].clone()
labels = self.labels[idx].clone()
if self.train:
# # img = self.random_bright(img)
img, boxes = self.random_flip(img, boxes)
img, boxes = self.randomScale(img, boxes)
img = self.randomBlur(img)
img = self.RandomBrightness(img)
img = self.RandomHue(img)
img = self.RandomSaturation(img)
img, boxes, labels = self.randomShift(img, boxes, labels)
img, boxes, labels = self.randomCrop(img, boxes, labels)
h, w, _ = img.shape
boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes)
img = self.BGR2RGB(img) # because pytorch pretrained model use RGB
img = self.subMean(img, self.mean) # 减去均值
img = cv2.resize(img, (self.image_size, self.image_size))
target = self.encoder(boxes, labels) # 14x14x30
for t in self.transform:
img = t(img)
return img, target
def __len__(self):
return self.num_samples
def encoder(self, boxes, labels):
'''
boxes (tensor) [[x1,y1,x2,y2],[]] 这里的x1,y1,x2,y2都是坐标相对于图片wh的比例
labels (tensor) [...]
return 14x14x30
'''
grid_num = 14
target = torch.zeros((grid_num, grid_num, 30))
cell_size = 1. / grid_num
wh = boxes[:, 2:] - boxes[:, :2]
# print('wh:',wh)
cxcy = (boxes[:, 2:] + boxes[:, :2]) / 2
# print('cxcy:', cxcy)
for i in range(cxcy.size()[0]): # 问题?为什么有一个box处于ij cell,就将4,9都设置为1了。万一一个cell中有两个不同的框呢?
cxcy_sample = cxcy[i]
# 这里得到的ij就是[cell_x,cell_y] ij[1]代表垂直第n个cell,ij[0]代表水平第n个cell
ij = (cxcy_sample / cell_size).ceil() - 1
# print(ij)
# target[cell_y,cell_x,4] 表示在14x14的cell中位于[cell_y,cell_x]的cell的第一个box的confidence
target[int(ij[1]), int(ij[0]), 4] = 1
# target[cell_y,cell_x,4] 表示在14x14的cell中位于[cell_y,cell_x]的cell的第二个box的confidence
target[int(ij[1]), int(ij[0]), 9] = 1
# labels范围在1-20,所以target[cell_y,cell_x,10-29]表示classes分类的onehot编码
# print('labels[i]:',labels[i])
target[int(ij[1]), int(ij[0]), int(labels[i]) + 9] = 1
# 先获取ij网格的左上角坐标
xy = ij * cell_size # 匹配到的网格的左上角相对坐标(比例)
# 计相对于cell的中心点坐标比例(相对于cell)和宽高比例(相对于整张图片),这里为什么两个box设置为一样的?是为了方便计算IoU么?
# 实际上根本不是说一个cell中支持两个不同的box,而是一个cell中只能支持一个box,用两个xywhc,是为了pred多个框,然后选择最大IoU??
delta_xy = (cxcy_sample - xy) / cell_size
target[int(ij[1]), int(ij[0]), 2:4] = wh[i]
target[int(ij[1]), int(ij[0]), :2] = delta_xy
target[int(ij[1]), int(ij[0]), 7:9] = wh[i]
target[int(ij[1]), int(ij[0]), 5:7] = delta_xy
return target
def BGR2RGB(self, img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def BGR2HSV(self, img):
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
def HSV2BGR(self, img):
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
def RandomBrightness(self, bgr):
if random.random() < 0.5:
hsv = self.BGR2HSV(bgr)
h, s, v = cv2.split(hsv)
adjust = random.choice([0.5, 1.5])
v = v * adjust
v = np.clip(v, 0, 255).astype(hsv.dtype)
hsv = cv2.merge((h, s, v))
bgr = self.HSV2BGR(hsv)
return bgr
def RandomSaturation(self, bgr):
if random.random() < 0.5:
hsv = self.BGR2HSV(bgr)
h, s, v = cv2.split(hsv)
adjust = random.choice([0.5, 1.5])
s = s * adjust
s = np.clip(s, 0, 255).astype(hsv.dtype)
hsv = cv2.merge((h, s, v))
bgr = self.HSV2BGR(hsv)
return bgr
def RandomHue(self, bgr):
if random.random() < 0.5:
hsv = self.BGR2HSV(bgr)
h, s, v = cv2.split(hsv)
adjust = random.choice([0.5, 1.5])
h = h * adjust
h = np.clip(h, 0, 255).astype(hsv.dtype)
hsv = cv2.merge((h, s, v))
bgr = self.HSV2BGR(hsv)
return bgr
def randomBlur(self, bgr):
if random.random() < 0.5:
bgr = cv2.blur(bgr, (5, 5))
return bgr
def randomShift(self, bgr, boxes, labels):
# 平移变换
center = (boxes[:, 2:] + boxes[:, :2]) / 2
if random.random() < 0.5:
height, width, c = bgr.shape
after_shfit_image = np.zeros((height, width, c), dtype=bgr.dtype)
after_shfit_image[:, :, :] = (104, 117, 123) # bgr
shift_x = random.uniform(-width * 0.2, width * 0.2)
shift_y = random.uniform(-height * 0.2, height * 0.2)
# print(bgr.shape,shift_x,shift_y)
# 原图像的平移
if shift_x >= 0 and shift_y >= 0:
after_shfit_image[int(shift_y):, int(shift_x):, :] = bgr[:height - int(shift_y), :width - int(shift_x),
:]
elif shift_x >= 0 and shift_y < 0:
after_shfit_image[:height + int(shift_y), int(shift_x):, :] = bgr[-int(shift_y):, :width - int(shift_x),
:]
elif shift_x < 0 and shift_y >= 0:
after_shfit_image[int(shift_y):, :width + int(shift_x), :] = bgr[:height - int(shift_y), -int(shift_x):,
:]
elif shift_x < 0 and shift_y < 0:
after_shfit_image[:height + int(shift_y), :width + int(shift_x), :] = bgr[-int(shift_y):,
-int(shift_x):, :]
shift_xy = torch.FloatTensor([[int(shift_x), int(shift_y)]]).expand_as(center)
center = center + shift_xy
mask1 = (center[:, 0] > 0) & (center[:, 0] < width)
mask2 = (center[:, 1] > 0) & (center[:, 1] < height)
mask = (mask1 & mask2).view(-1, 1)
boxes_in = boxes[mask.expand_as(boxes)].view(-1, 4)
if len(boxes_in) == 0:
return bgr, boxes, labels
box_shift = torch.FloatTensor([[int(shift_x), int(shift_y), int(shift_x), int(shift_y)]]).expand_as(
boxes_in)
boxes_in = boxes_in + box_shift
labels_in = labels[mask.view(-1)]
return after_shfit_image, boxes_in, labels_in
return bgr, boxes, labels
def randomScale(self, bgr, boxes):
# 固定住高度,以0.8-1.2伸缩宽度,做图像形变
if random.random() < 0.5:
scale = random.uniform(0.8, 1.2)
height, width, c = bgr.shape
bgr = cv2.resize(bgr, (int(width * scale), height))
scale_tensor = torch.FloatTensor([[scale, 1, scale, 1]]).expand_as(boxes)
boxes = boxes * scale_tensor
return bgr, boxes
return bgr, boxes
def randomCrop(self, bgr, boxes, labels):
if random.random() < 0.5:
center = (boxes[:, 2:] + boxes[:, :2]) / 2
height, width, c = bgr.shape
h = random.uniform(0.6 * height, height)
w = random.uniform(0.6 * width, width)
x = random.uniform(0, width - w)
y = random.uniform(0, height - h)
x, y, h, w = int(x), int(y), int(h), int(w)
center = center - torch.FloatTensor([[x, y]]).expand_as(center)
mask1 = (center[:, 0] > 0) & (center[:, 0] < w)
mask2 = (center[:, 1] > 0) & (center[:, 1] < h)
mask = (mask1 & mask2).view(-1, 1)
boxes_in = boxes[mask.expand_as(boxes)].view(-1, 4)
if (len(boxes_in) == 0):
return bgr, boxes, labels
box_shift = torch.FloatTensor([[x, y, x, y]]).expand_as(boxes_in)
boxes_in = boxes_in - box_shift
boxes_in[:, 0] = boxes_in[:, 0].clamp_(min=0, max=w)
boxes_in[:, 2] = boxes_in[:, 2].clamp_(min=0, max=w)
boxes_in[:, 1] = boxes_in[:, 1].clamp_(min=0, max=h)
boxes_in[:, 3] = boxes_in[:, 3].clamp_(min=0, max=h)
labels_in = labels[mask.view(-1)]
img_croped = bgr[y:y + h, x:x + w, :]
return img_croped, boxes_in, labels_in
return bgr, boxes, labels
def subMean(self, bgr, mean):
mean = np.array(mean, dtype=np.float32)
bgr = bgr - mean
return bgr
def random_flip(self, im, boxes):
if random.random() < 0.5:
im_lr = np.fliplr(im).copy()
h, w, _ = im.shape
xmin = w - boxes[:, 2]
xmax = w - boxes[:, 0]
boxes[:, 0] = xmin
boxes[:, 2] = xmax
return im_lr, boxes
return im, boxes
def random_bright(self, im, delta=16):
alpha = random.random()
if alpha > 0.3:
im = im * alpha + random.randrange(-delta, delta)
im = im.clip(min=0, max=255).astype(np.uint8)
return im
| 8,453 | 2,744 | 23 |
1b28979dc7c8122af1f82b0c2f31220add3905d4 | 25 | py | Python | pathogen/version.py | clockhart/pathogen | 1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36 | [
"MIT"
] | null | null | null | pathogen/version.py | clockhart/pathogen | 1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36 | [
"MIT"
] | null | null | null | pathogen/version.py | clockhart/pathogen | 1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36 | [
"MIT"
] | null | null | null | __version__ = '0.0.dev5'
| 12.5 | 24 | 0.68 | __version__ = '0.0.dev5'
| 0 | 0 | 0 |
87a6274f943ad7d10be8030e80714479b4b05768 | 2,117 | py | Python | bot.py | maharishidao/musconvbot | 187ccbb53d25e2d42c4179e9d2720ea4b2bf9dca | [
"MIT"
] | null | null | null | bot.py | maharishidao/musconvbot | 187ccbb53d25e2d42c4179e9d2720ea4b2bf9dca | [
"MIT"
] | null | null | null | bot.py | maharishidao/musconvbot | 187ccbb53d25e2d42c4179e9d2720ea4b2bf9dca | [
"MIT"
] | null | null | null |
# from config import conf
#import telegram
#
# tg_token=conf['telegram_token']
# bot = telegram.Bot(token=tg_token)
# print(tg_token)
#
# #proxy list: https://50na50.net/ru/proxy/socks5list
#
# proxy_url='socks5://66.33.210.203:24475'
#
# pp = telegram.utils.request.Request(proxy_url=proxy_url)
# bot = telegram.Bot(token=tg_token, request=pp)
# print(bot.get_me())
#
# REQUEST_KWARGS={'proxy_url'=proxy_url}
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from config import conf
import logging
proxy_url='socks5://104.248.63.49:30588'
REQUEST_KWARGS={'proxy_url':proxy_url}
tg_token=conf['telegram_token']
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
import os
server_url='https://hello-world-delete-234.nw.r.appspot.com/'
PORT = int(os.environ.get('PORT', '8443'))
updater = Updater(tg_token, use_context=True, request_kwargs=REQUEST_KWARGS)
dispatcher = updater.dispatcher
# add handlers
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=tg_token)
updater.bot.set_webhook("server_url" + tg_token)
updater.idle()
# updater = Updater(token=tg_token, use_context=True,request_kwargs=REQUEST_KWARGS)
# dispatcher = updater.dispatcher
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
dispatcher.add_handler(echo_handler)
caps_handler = CommandHandler('caps', caps)
dispatcher.add_handler(caps_handler)
# updater.start_polling()
| 26.135802 | 88 | 0.731696 |
# from config import conf
#import telegram
#
# tg_token=conf['telegram_token']
# bot = telegram.Bot(token=tg_token)
# print(tg_token)
#
# #proxy list: https://50na50.net/ru/proxy/socks5list
#
# proxy_url='socks5://66.33.210.203:24475'
#
# pp = telegram.utils.request.Request(proxy_url=proxy_url)
# bot = telegram.Bot(token=tg_token, request=pp)
# print(bot.get_me())
#
# REQUEST_KWARGS={'proxy_url'=proxy_url}
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from config import conf
import logging
proxy_url='socks5://104.248.63.49:30588'
REQUEST_KWARGS={'proxy_url':proxy_url}
tg_token=conf['telegram_token']
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
import os
server_url='https://hello-world-delete-234.nw.r.appspot.com/'
PORT = int(os.environ.get('PORT', '8443'))
updater = Updater(tg_token, use_context=True, request_kwargs=REQUEST_KWARGS)
dispatcher = updater.dispatcher
# add handlers
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=tg_token)
updater.bot.set_webhook("server_url" + tg_token)
updater.idle()
# updater = Updater(token=tg_token, use_context=True,request_kwargs=REQUEST_KWARGS)
# dispatcher = updater.dispatcher
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id,
text="Просто кинь мне ссылку на трек, и я ее конвертирую!")
def echo(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
def caps(update, context):
text_caps = ' '.join(context.args).upper()
context.bot.send_message(chat_id=update.effective_chat.id, text=text_caps)
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
dispatcher.add_handler(echo_handler)
caps_handler = CommandHandler('caps', caps)
dispatcher.add_handler(caps_handler)
# updater.start_polling()
| 423 | 0 | 69 |
0f909150a3a7a5b03deffdb379525a9f9ad1c95c | 2,427 | py | Python | etl/parsers/etw/Microsoft_Windows_Forwarding.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_Forwarding.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_Forwarding.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Forwarding
GUID : 699e309c-e782-4400-98c8-e21d162d7b7b
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=100, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=101, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=102, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=102, version=1)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=103, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=104, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=104, version=1)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=105, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=107, version=0)
| 28.552941 | 123 | 0.692213 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Forwarding
GUID : 699e309c-e782-4400-98c8-e21d162d7b7b
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=100, version=0)
class Microsoft_Windows_Forwarding_100_0(Etw):
pattern = Struct(
"Id" / WString,
"Query" / WString
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=101, version=0)
class Microsoft_Windows_Forwarding_101_0(Etw):
pattern = Struct(
"Id" / WString,
"Query" / WString,
"Status" / WString
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=102, version=0)
class Microsoft_Windows_Forwarding_102_0(Etw):
pattern = Struct(
"Query" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=102, version=1)
class Microsoft_Windows_Forwarding_102_1(Etw):
pattern = Struct(
"Id" / WString,
"Query" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=103, version=0)
class Microsoft_Windows_Forwarding_103_0(Etw):
pattern = Struct(
"Id" / WString
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=104, version=0)
class Microsoft_Windows_Forwarding_104_0(Etw):
pattern = Struct(
"Id" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=104, version=1)
class Microsoft_Windows_Forwarding_104_1(Etw):
pattern = Struct(
"SubscriptionManagerAddress" / WString,
"ErrorCode" / Int32ul,
"ErrorMessage" / WString
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=105, version=0)
class Microsoft_Windows_Forwarding_105_0(Etw):
pattern = Struct(
"SubscriptionManagerAddress" / WString,
"ErrorCode" / Int32ul,
"ErrorMessage" / WString
)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=107, version=0)
class Microsoft_Windows_Forwarding_107_0(Etw):
pattern = Struct(
"PolicyDescription" / WString
)
| 0 | 1,082 | 198 |
9fa7761b172e6d6bf7711e64ff8fe15c22205fd1 | 17,260 | py | Python | data_formatting.py | andytaylor823/euchre-ML | 691d5dba9a72af201e004308782c9c429dbeba51 | [
"MIT"
] | null | null | null | data_formatting.py | andytaylor823/euchre-ML | 691d5dba9a72af201e004308782c9c429dbeba51 | [
"MIT"
] | null | null | null | data_formatting.py | andytaylor823/euchre-ML | 691d5dba9a72af201e004308782c9c429dbeba51 | [
"MIT"
] | null | null | null | import numpy as np
from tqdm.auto import tqdm
COLS_GROUP1 = 24
COLS_GROUP2 = 47
COLS_GROUP3 = 24*13
COLS_GROUP4 = 55
COLS_TOTAL = COLS_GROUP1 + COLS_GROUP2 + COLS_GROUP3 + COLS_GROUP4
same_color_suit = {'C':'S', 'D':'H', 'H':'D', 'S':'C'}
COLS_TARGET = 24
def format_data(data, usetqdm=True, start=0, stop=None, count=None):
"""
Here is all the data that needs to be fed to the ML algorithm, grouped by phase of the game.
I have also tried to include an estimate of how many columns each will need to take up.
If a categorical feature has N options, I will OHE it as N columns, instead of using N-1.
A card will be OHEncoded as [9-A] + [C/D/H/S] (6+4), and possibly tagged as Y/N trump.
#######
DATA GROUP 1: Calling trump
#######
(4) 1.) Who dealt (position relative to "me")
(4) 2.) Who called trump (position relative to "me")
(1) 3.) Which round was trump called in
(1) 4.) Going alone?
(4) 5.) Which suit is trump (not sure about this one)
(10) 6.) What is the turn card
Total: 24 columns
#######
DATA GROUP 2: Other misc. information
#######
(4) 1.) Who is leading right now
(4) 2.) Who is winning right now
(11) 3.) What card was led (is it trump)
(11) 4.) What card is winning (is it trump)
(5) 5.) Which team won each trick so far (+1 for "me", 0 for no one (yet), -1 for opponents)
(12) 6.) Any players confirmed short in any suits
Total: 47 columns
#######
DATA GROUP 3: All card locations (constant order: 9C, 10C, ..., (D), (H), ..., KS, AS)
#######
For each card (24):
(4) 1.) Confirmed in anyone's hand (my hand + ordered up turn card?)
(4) 2.) Played in a previous trick by someone (maybe later expand this to which prev trick?)
(3) 3.) Played in CURRENT trick by someone
(1) 4.) Confirmed buried
(1) 5.) Is trump
Total: 312 columns
#######
DATA GROUP 4: My remaining hand, again
#######
(11) 1.) Card #1 (is it trump)
(11) 2.) Card #2 (is it trump)
(11) 3.) Card #3 (is it trump)
(11) 4.) Card #4 (is it trump)
(11) 5.) Card #5 (is it trump)
Total: 55 columns
SUPER-TOTAL: 414 columns. Yeesh.
"""
counter = 0
stop = len(data) if stop is None else stop
count = len(data) if count is None else count
formatted = np.zeros((20*(stop-start), COLS_TOTAL), dtype=np.int8)
target = np.zeros((20*(stop-start), COLS_TARGET), dtype=np.int8)
for i in tqdm(data.index) if usetqdm else data.index:
i = int(i)
if i < start: continue
elif i >= stop: break
elif counter >= count: break
game = data.iloc[i]
formatted[20*counter:20*(counter+1)] = format_game(game)
target[20*counter:20*(counter+1)] = get_target(game)
counter += 1
mask = ~np.all(target==0, axis=1)
return formatted[mask], target[mask]
def get_group1_info(game, tricknum, playernum):
"""
#######
DATA GROUP 1: Calling trump
#######
(4) 1.) Who dealt (position relative to "me")
(4) 2.) Who called trump (position relative to "me")
(1) 3.) Which round was trump called in
(1) 4.) Going alone?
(4) 5.) Which suit is trump (not sure if this one needs to be here)
(10) 6.) What is the turn card
Total: 24 columns
"""
group1_info = np.zeros(COLS_GROUP1, dtype=np.int8)
current_player = get_current_player(game, tricknum, playernum)
# who dealt
group1_info[get_relative_position(game, tricknum, playernum, '3')] = 1
# who called
group1_info[4+get_relative_position(game, tricknum, playernum, game['caller'])] = 1
# was it called first round
group1_info[8] = 2-int(game['round'])
# did they go alone
group1_info[9] = int(game['alone'])
# which suit is trump
group1_info[10+{'C':0, 'D':1, 'H':2, 'S':3}[get_trump_suit(game)]] = 1
# what is the turn card
turn_card = get_turn_card(game)
group1_info[14+{n:i for n,i in zip(list('9TJQKA'), range(6))}[turn_card[0]]] = 1
group1_info[20+{s:i for s,i in zip(list('CDHS'), range(4))}[turn_card[1]]] = 1
return group1_info
def get_group2_info(game, tricknum, playernum):
"""
#######
DATA GROUP 2: Other misc. information
#######
(4) 1.) Who is leading right now
(4) 2.) Who is winning right now
(11) 3.) What card was led (is it trump)
(11) 4.) What card is winning (is it trump)
(5) 5.) Which team won each trick so far (+1 for "me", 0 for no one (yet), -1 for opponents)
(12) 6.) Any players confirmed short in any suits
Total: 47 columns
"""
group2_info = np.zeros(COLS_GROUP2, dtype=np.int8)
current_trick = game[['played'+str(i+1) for i in range(4*tricknum, 4*tricknum+playernum)]]
trump_suit = get_trump_suit(game)
# who leads
group2_info[get_relative_position(game, tricknum, playernum, current_trick[0][-1]) if len(current_trick) > 0 else 3] = 1
# who's winning
if len(current_trick) > 0:
winner, winning_card = get_winner(current_trick, trump_suit)
group2_info[4+get_relative_position(game, tricknum, playernum, winner)] = 1
# what card was led
if len(current_trick) > 0:
group2_info[8+{n:i for n,i in zip(list('9TJQKA'), range(6))}[current_trick[0][0]]] = 1
group2_info[14+{s:i for s,i in zip(list('CDHS'), range(4))}[current_trick[0][1]]] = 1
group2_info[18] = (current_trick[0][1]==trump_suit) or (current_trick[0][0]=='J' and current_trick[0][1]==same_color_suit[trump_suit])
# what card is winning
if len(current_trick) > 0:
group2_info[19+{n:i for n,i in zip(list('9TJQKA'), range(6))}[winning_card[0]]] = 1
group2_info[25+{s:i for s,i in zip(list('CDHS'), range(4))}[winning_card[1]]] = 1
group2_info[29] = (winning_card[1]==trump_suit) or (winning_card[0]=='J' and winning_card[1]==same_color_suit[trump_suit])
# what team won each trick so far
for tnum in range(5):
if tnum >= tricknum:
continue
# return +1 if relative_position % 2 == 1, return -1 if relative_position % 2 == 0 (self is always 3)
group2_info[30+tnum] = -1+2*(get_relative_position(game, tricknum, playernum, game['winner'+str(tnum+1)])%2)
# any players confirmed short in suits
# list it like [opp1 short in clubs, opp1 short in diamonds, ..., opp2 short in spades]
for opp_pos in range(3):
for i, s in enumerate(list('CDHS')):
group2_info[35+4*opp_pos + i] = get_short_suitedness(game, tricknum, playernum, opp_pos, s)
return group2_info
card_ix = {**{n:i for n,i in zip(list('9TJQKA'), range(6))},\
**{s:6*i for s,i in zip(list('CDHS'), range(4))}}
def get_group3_info(game, tricknum, playernum):
"""
#######
DATA GROUP 3: All card locations (constant order: 9C, 10C, ..., (D), (H), ..., KS, AS)
#######
For each card (24):
(4) 1.) Confirmed in anyone's hand (my hand + ordered up turn card?)
(4) 2.) Played in a previous trick by someone (maybe later expand this to which prev trick?)
(3) 3.) Played in CURRENT trick by someone
(1) 4.) Confirmed buried
(1) 5.) Is trump
Total: 312 columns
"""
COLS_PER_CARD = 13
group3_info = np.zeros(24*COLS_PER_CARD, dtype=np.int8)
trump_suit = get_trump_suit(game)
# cards played in a previous trick
if tricknum > 0:
prev_played_cards = game[['played'+str(i+1) for i in range(4*tricknum)]]
for c in prev_played_cards:
if '-' in c:
continue
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 4 + get_relative_position(game, tricknum, playernum, c[-1])] = 1
# cards played THIS trick
if playernum > 0:
current_played_cards = game[['played'+str(i+1) for i in range(4*tricknum, 4*tricknum+playernum)]]
for c in current_played_cards:
if c.startswith('-'):
continue
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 8 + get_relative_position(game, tricknum, playernum, c[-1])] = 1
# cards in my hand
my_remaining_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(4*tricknum+playernum, 20)]]\
if c[-1] == get_current_player(game, tricknum, playernum)]
for c in my_remaining_cards:
# position of self wrt self is always 3
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 3] = 1
# confirmed turn card location
if game['round']==2:
turn_card = get_turn_card(game)
group3_info[COLS_PER_CARD*(card_ix[turn_card[0]] + card_ix[turn_card[1]]) + COLS_PER_CARD-2] = 1
elif get_current_player(game, tricknum, playernum) == '3':
original_cards = get_original_hand(game, tricknum, playernum)
played_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(20)]] if c[-1]=='3']
buried_card = [c for c in original_cards if c not in played_cards][0]
group3_info[COLS_PER_CARD*(card_ix[buried_card[0]]+card_ix[buried_card[1]]) + COLS_PER_CARD-2] = 1
else:
turn_card = get_turn_card(game)
all_played_cards = game[['played'+str(i+1) for i in range(4*tricknum+playernum)]]
if turn_card+'3' not in list(all_played_cards):
group3_info[COLS_PER_CARD*(card_ix[turn_card[0]]+card_ix[turn_card[1]]) + get_relative_position(game, tricknum, playernum, 3)] = 1
# Mark trump
for s in list('CDHS'):
if s == trump_suit:
for name in list('9TJQKA'):
group3_info[COLS_PER_CARD*(card_ix[name]+card_ix[s]) + COLS_PER_CARD-1] = 1
group3_info[COLS_PER_CARD*(card_ix['J']+card_ix[same_color_suit[s]]) + COLS_PER_CARD-1] = 1
return group3_info
def get_group4_info(game, tricknum, playernum):
"""
#######
DATA GROUP 4: My remaining hand, again
#######
(11) 1.) Card #1 (is it trump)
(11) 2.) Card #2 (is it trump)
(11) 3.) Card #3 (is it trump)
(11) 4.) Card #4 (is it trump)
(11) 5.) Card #5 (is it trump)
Total: 55 columns
"""
"""
my_cards = [c for c in game[['played'+str(i) for i in range(1,21)]] if c[-1] == str(playernum)]
trump_suit = get_trump_suit(game)
np.random.shuffle(my_cards)
my_cards = [c[:-1] if c not in game[['played'+str(i) for i in range(1,4*tricknum+playernum+1)]] else '00' for c in my_cards]
"""
# slightly more efficient
trump_suit = get_trump_suit(game)
my_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(4*tricknum+playernum, 20)]]\
if c[-1] == get_current_player(game, tricknum, playernum)]
my_cards += ['00']*(5-len(my_cards))
np.random.shuffle(my_cards)
group4_info = []
for c in my_cards:
group4_info += card_to_ohe(c[0], c[1], trump_suit==c[1] or (c[0]=='J' and c[1]==same_color_suit[trump_suit]))
return group4_info
power_to_name = {power:n for power,n in zip([1,2,3,4,5,10,12,15,20,25,30,31,35], list('9TJQKA9TQKAJJ'))}
oldstyle=False
card_ix = {**{n:i for n,i in zip(list('9TJQKA'), range(6))},\
**{s:6*i for s,i in zip(list('CDHS'), range(4))}} | 43.918575 | 142 | 0.606199 | import numpy as np
from tqdm.auto import tqdm
COLS_GROUP1 = 24
COLS_GROUP2 = 47
COLS_GROUP3 = 24*13
COLS_GROUP4 = 55
COLS_TOTAL = COLS_GROUP1 + COLS_GROUP2 + COLS_GROUP3 + COLS_GROUP4
same_color_suit = {'C':'S', 'D':'H', 'H':'D', 'S':'C'}
COLS_TARGET = 24
def format_data(data, usetqdm=True, start=0, stop=None, count=None):
"""
Here is all the data that needs to be fed to the ML algorithm, grouped by phase of the game.
I have also tried to include an estimate of how many columns each will need to take up.
If a categorical feature has N options, I will OHE it as N columns, instead of using N-1.
A card will be OHEncoded as [9-A] + [C/D/H/S] (6+4), and possibly tagged as Y/N trump.
#######
DATA GROUP 1: Calling trump
#######
(4) 1.) Who dealt (position relative to "me")
(4) 2.) Who called trump (position relative to "me")
(1) 3.) Which round was trump called in
(1) 4.) Going alone?
(4) 5.) Which suit is trump (not sure about this one)
(10) 6.) What is the turn card
Total: 24 columns
#######
DATA GROUP 2: Other misc. information
#######
(4) 1.) Who is leading right now
(4) 2.) Who is winning right now
(11) 3.) What card was led (is it trump)
(11) 4.) What card is winning (is it trump)
(5) 5.) Which team won each trick so far (+1 for "me", 0 for no one (yet), -1 for opponents)
(12) 6.) Any players confirmed short in any suits
Total: 47 columns
#######
DATA GROUP 3: All card locations (constant order: 9C, 10C, ..., (D), (H), ..., KS, AS)
#######
For each card (24):
(4) 1.) Confirmed in anyone's hand (my hand + ordered up turn card?)
(4) 2.) Played in a previous trick by someone (maybe later expand this to which prev trick?)
(3) 3.) Played in CURRENT trick by someone
(1) 4.) Confirmed buried
(1) 5.) Is trump
Total: 312 columns
#######
DATA GROUP 4: My remaining hand, again
#######
(11) 1.) Card #1 (is it trump)
(11) 2.) Card #2 (is it trump)
(11) 3.) Card #3 (is it trump)
(11) 4.) Card #4 (is it trump)
(11) 5.) Card #5 (is it trump)
Total: 55 columns
SUPER-TOTAL: 414 columns. Yeesh.
"""
counter = 0
stop = len(data) if stop is None else stop
count = len(data) if count is None else count
formatted = np.zeros((20*(stop-start), COLS_TOTAL), dtype=np.int8)
target = np.zeros((20*(stop-start), COLS_TARGET), dtype=np.int8)
for i in tqdm(data.index) if usetqdm else data.index:
i = int(i)
if i < start: continue
elif i >= stop: break
elif counter >= count: break
game = data.iloc[i]
formatted[20*counter:20*(counter+1)] = format_game(game)
target[20*counter:20*(counter+1)] = get_target(game)
counter += 1
mask = ~np.all(target==0, axis=1)
return formatted[mask], target[mask]
def format_game(game):
formatted_game = np.zeros((20, COLS_TOTAL), dtype=np.int8)
for tricknum in range(5):
for playernum in range(4):
if game['alone'] and int(game['caller'])==(int(get_current_player(game, tricknum, playernum))+2)%4:
continue
group1_info = get_group1_info(game, tricknum, playernum)
group2_info = get_group2_info(game, tricknum, playernum)
group3_info = get_group3_info(game, tricknum, playernum)
group4_info = get_group4_info(game, tricknum, playernum)
formatted_game[4*tricknum+playernum, :len(group1_info)] = group1_info
formatted_game[4*tricknum+playernum, len(group1_info):len(group1_info)+len(group2_info)] = group2_info
formatted_game[4*tricknum+playernum, len(group1_info)+len(group2_info):\
len(group1_info)+len(group2_info)+len(group3_info)] = group3_info
formatted_game[4*tricknum+playernum, len(group1_info)+len(group2_info)+len(group3_info):\
len(group1_info)+len(group2_info)+len(group3_info)+len(group4_info)] = group4_info
return formatted_game
def get_group1_info(game, tricknum, playernum):
"""
#######
DATA GROUP 1: Calling trump
#######
(4) 1.) Who dealt (position relative to "me")
(4) 2.) Who called trump (position relative to "me")
(1) 3.) Which round was trump called in
(1) 4.) Going alone?
(4) 5.) Which suit is trump (not sure if this one needs to be here)
(10) 6.) What is the turn card
Total: 24 columns
"""
group1_info = np.zeros(COLS_GROUP1, dtype=np.int8)
current_player = get_current_player(game, tricknum, playernum)
# who dealt
group1_info[get_relative_position(game, tricknum, playernum, '3')] = 1
# who called
group1_info[4+get_relative_position(game, tricknum, playernum, game['caller'])] = 1
# was it called first round
group1_info[8] = 2-int(game['round'])
# did they go alone
group1_info[9] = int(game['alone'])
# which suit is trump
group1_info[10+{'C':0, 'D':1, 'H':2, 'S':3}[get_trump_suit(game)]] = 1
# what is the turn card
turn_card = get_turn_card(game)
group1_info[14+{n:i for n,i in zip(list('9TJQKA'), range(6))}[turn_card[0]]] = 1
group1_info[20+{s:i for s,i in zip(list('CDHS'), range(4))}[turn_card[1]]] = 1
return group1_info
def get_group2_info(game, tricknum, playernum):
"""
#######
DATA GROUP 2: Other misc. information
#######
(4) 1.) Who is leading right now
(4) 2.) Who is winning right now
(11) 3.) What card was led (is it trump)
(11) 4.) What card is winning (is it trump)
(5) 5.) Which team won each trick so far (+1 for "me", 0 for no one (yet), -1 for opponents)
(12) 6.) Any players confirmed short in any suits
Total: 47 columns
"""
group2_info = np.zeros(COLS_GROUP2, dtype=np.int8)
current_trick = game[['played'+str(i+1) for i in range(4*tricknum, 4*tricknum+playernum)]]
trump_suit = get_trump_suit(game)
# who leads
group2_info[get_relative_position(game, tricknum, playernum, current_trick[0][-1]) if len(current_trick) > 0 else 3] = 1
# who's winning
if len(current_trick) > 0:
winner, winning_card = get_winner(current_trick, trump_suit)
group2_info[4+get_relative_position(game, tricknum, playernum, winner)] = 1
# what card was led
if len(current_trick) > 0:
group2_info[8+{n:i for n,i in zip(list('9TJQKA'), range(6))}[current_trick[0][0]]] = 1
group2_info[14+{s:i for s,i in zip(list('CDHS'), range(4))}[current_trick[0][1]]] = 1
group2_info[18] = (current_trick[0][1]==trump_suit) or (current_trick[0][0]=='J' and current_trick[0][1]==same_color_suit[trump_suit])
# what card is winning
if len(current_trick) > 0:
group2_info[19+{n:i for n,i in zip(list('9TJQKA'), range(6))}[winning_card[0]]] = 1
group2_info[25+{s:i for s,i in zip(list('CDHS'), range(4))}[winning_card[1]]] = 1
group2_info[29] = (winning_card[1]==trump_suit) or (winning_card[0]=='J' and winning_card[1]==same_color_suit[trump_suit])
# what team won each trick so far
for tnum in range(5):
if tnum >= tricknum:
continue
# return +1 if relative_position % 2 == 1, return -1 if relative_position % 2 == 0 (self is always 3)
group2_info[30+tnum] = -1+2*(get_relative_position(game, tricknum, playernum, game['winner'+str(tnum+1)])%2)
# any players confirmed short in suits
# list it like [opp1 short in clubs, opp1 short in diamonds, ..., opp2 short in spades]
for opp_pos in range(3):
for i, s in enumerate(list('CDHS')):
group2_info[35+4*opp_pos + i] = get_short_suitedness(game, tricknum, playernum, opp_pos, s)
return group2_info
card_ix = {**{n:i for n,i in zip(list('9TJQKA'), range(6))},\
**{s:6*i for s,i in zip(list('CDHS'), range(4))}}
def get_group3_info(game, tricknum, playernum):
"""
#######
DATA GROUP 3: All card locations (constant order: 9C, 10C, ..., (D), (H), ..., KS, AS)
#######
For each card (24):
(4) 1.) Confirmed in anyone's hand (my hand + ordered up turn card?)
(4) 2.) Played in a previous trick by someone (maybe later expand this to which prev trick?)
(3) 3.) Played in CURRENT trick by someone
(1) 4.) Confirmed buried
(1) 5.) Is trump
Total: 312 columns
"""
COLS_PER_CARD = 13
group3_info = np.zeros(24*COLS_PER_CARD, dtype=np.int8)
trump_suit = get_trump_suit(game)
# cards played in a previous trick
if tricknum > 0:
prev_played_cards = game[['played'+str(i+1) for i in range(4*tricknum)]]
for c in prev_played_cards:
if '-' in c:
continue
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 4 + get_relative_position(game, tricknum, playernum, c[-1])] = 1
# cards played THIS trick
if playernum > 0:
current_played_cards = game[['played'+str(i+1) for i in range(4*tricknum, 4*tricknum+playernum)]]
for c in current_played_cards:
if c.startswith('-'):
continue
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 8 + get_relative_position(game, tricknum, playernum, c[-1])] = 1
# cards in my hand
my_remaining_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(4*tricknum+playernum, 20)]]\
if c[-1] == get_current_player(game, tricknum, playernum)]
for c in my_remaining_cards:
# position of self wrt self is always 3
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 3] = 1
# confirmed turn card location
if game['round']==2:
turn_card = get_turn_card(game)
group3_info[COLS_PER_CARD*(card_ix[turn_card[0]] + card_ix[turn_card[1]]) + COLS_PER_CARD-2] = 1
elif get_current_player(game, tricknum, playernum) == '3':
original_cards = get_original_hand(game, tricknum, playernum)
played_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(20)]] if c[-1]=='3']
buried_card = [c for c in original_cards if c not in played_cards][0]
group3_info[COLS_PER_CARD*(card_ix[buried_card[0]]+card_ix[buried_card[1]]) + COLS_PER_CARD-2] = 1
else:
turn_card = get_turn_card(game)
all_played_cards = game[['played'+str(i+1) for i in range(4*tricknum+playernum)]]
if turn_card+'3' not in list(all_played_cards):
group3_info[COLS_PER_CARD*(card_ix[turn_card[0]]+card_ix[turn_card[1]]) + get_relative_position(game, tricknum, playernum, 3)] = 1
# Mark trump
for s in list('CDHS'):
if s == trump_suit:
for name in list('9TJQKA'):
group3_info[COLS_PER_CARD*(card_ix[name]+card_ix[s]) + COLS_PER_CARD-1] = 1
group3_info[COLS_PER_CARD*(card_ix['J']+card_ix[same_color_suit[s]]) + COLS_PER_CARD-1] = 1
return group3_info
def get_group4_info(game, tricknum, playernum):
"""
#######
DATA GROUP 4: My remaining hand, again
#######
(11) 1.) Card #1 (is it trump)
(11) 2.) Card #2 (is it trump)
(11) 3.) Card #3 (is it trump)
(11) 4.) Card #4 (is it trump)
(11) 5.) Card #5 (is it trump)
Total: 55 columns
"""
"""
my_cards = [c for c in game[['played'+str(i) for i in range(1,21)]] if c[-1] == str(playernum)]
trump_suit = get_trump_suit(game)
np.random.shuffle(my_cards)
my_cards = [c[:-1] if c not in game[['played'+str(i) for i in range(1,4*tricknum+playernum+1)]] else '00' for c in my_cards]
"""
# slightly more efficient
trump_suit = get_trump_suit(game)
my_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(4*tricknum+playernum, 20)]]\
if c[-1] == get_current_player(game, tricknum, playernum)]
my_cards += ['00']*(5-len(my_cards))
np.random.shuffle(my_cards)
group4_info = []
for c in my_cards:
group4_info += card_to_ohe(c[0], c[1], trump_suit==c[1] or (c[0]=='J' and c[1]==same_color_suit[trump_suit]))
return group4_info
def get_winner(current_trick, trump_suit):
winning_card = current_trick[0]
powers = {n+s:p for n,p in zip(list('9TJQKA'), [1,2,3,4,5,10]) for s in list('CDHS') if s != trump_suit}
powers['J'+same_color_suit[trump_suit]] = 31
powers.update({n+trump_suit:p for n,p in zip(list('9TQKAJ'), [12,15,20,25,30,35])})
for i in range(1,len(current_trick)):
c = current_trick[i]
if c.startswith('-'):
continue
# if winning card is trump, just compare powers
if winning_card[1] == trump_suit or (winning_card[0]=='J' and winning_card[1]==same_color_suit[trump_suit]):
if powers[c[:2]] > powers[winning_card[:2]]:
winning_card = c
else:
# first, check if card is trump
if powers[c[:2]] > 10:
winning_card = c
# next, check if some random offsuit
elif c[1] != winning_card[1]:
continue
# by now, determined neither are trump, and both have the same suit
else:
if powers[c[:2]] > powers[winning_card[:2]]:
winning_card = c
return int(winning_card[-1]), winning_card[:2]
def get_short_suitedness(game, tricknum, playernum, opp_pos, short_suit):
led_cards = [c for c in game[['played'+str(i+1) for i in range(4*tricknum+playernum)]][::4]]
trump_suit = get_trump_suit(game)
for i, c in enumerate(led_cards):
# skip if they lead
if get_relative_position(game, tricknum, playernum, c[-1]) == opp_pos:
continue
# checking against a specific suit, so make sure the led suit is that suit
# (or else if we're checking against trump and the left is led)
if c[1] != short_suit or (c[0]=='J' and c[1]==same_color_suit[trump_suit] and short_suit==same_color_suit[trump_suit]):
continue
associated_trick = game[['played'+str(ix+1) for ix in range(4*i, min(4*(i+1), 4*tricknum+playernum))]]
# skip if they haven't played yet
if opp_pos not in [get_relative_position(game, tricknum, playernum, c[-1]) for c in associated_trick]:
continue
opp_played_card = [c for c in associated_trick if get_relative_position(game, tricknum, playernum, c[-1])==opp_pos][0]
if c[1] == trump_suit or (c[0]=='J' and c[1] == same_color_suit[trump_suit]):
# "if not trump suit and also is not left"
if opp_played_card[1] != trump_suit and not (opp_played_card[0]=='J' and opp_played_card[1]==same_color_suit[trump_suit]):
return 1
else:
# "if not same suit or is left"
if opp_played_card[1] != c[1] or (opp_played_card[0]=='J' and opp_played_card[1]==same_color_suit[trump_suit]):
return 1
return 0
def get_current_player(game, tricknum, playernum):
return game['played'+str(4*tricknum+playernum+1)][-1]
def get_relative_position(game, tricknum, playernum, pos):
return (int(pos) - int(get_current_player(game, tricknum, playernum)) - 1)%4
# self maps to 3, then advances positively by advancing other player pos
power_to_name = {power:n for power,n in zip([1,2,3,4,5,10,12,15,20,25,30,31,35], list('9TJQKA9TQKAJJ'))}
def get_turn_card(game):
name = power_to_name[game['TC_power']]
if game['TC_isD']: return name+'D'
elif game['TC_isH']: return name+'H'
elif game['TC_isS']: return name+'S'
else: return name+'C'
oldstyle=False
def get_original_hand(game, tricknum, playernum):
player = get_current_player(game, tricknum, playernum)
if oldstyle: player = (int(player)+1)%4
return [power_to_name[game['p'+str(player)+'c'+str(i+1)]] +
'D'*game['p'+str(player)+'c'+str(i+1)+'isD'] + \
'H'*game['p'+str(player)+'c'+str(i+1)+'isH'] + \
'S'*game['p'+str(player)+'c'+str(i+1)+'isS'] + \
'C'*(1-game['p'+str(player)+'c'+str(i+1)+'isD']-\
game['p'+str(player)+'c'+str(i+1)+'isH']-\
game['p'+str(player)+'c'+str(i+1)+'isS'])\
for i in range(5)]
def get_trump_suit(game):
if game['trump_isD']: return 'D'
elif game['trump_isH']: return 'H'
elif game['trump_isS']: return 'S'
else: return 'C'
def card_to_ohe(name, suit, trump=None):
arr = [0]*10
for i, n in enumerate(['9', 'T', 'J', 'Q', 'K', 'A']):
if name == n:
arr[i] = 1
break
for i, s in enumerate(['C', 'D', 'H', 'S']):
if suit == s:
arr[6+i] = 1
break
if trump is not None:
arr += [int(trump)]
return arr
card_ix = {**{n:i for n,i in zip(list('9TJQKA'), range(6))},\
**{s:6*i for s,i in zip(list('CDHS'), range(4))}}
def get_target(game):
target = np.zeros((20, 24), dtype=np.int8)
for i, c in enumerate(game[['played'+str(ix+1) for ix in range(20)]]):
if '-' in c:
continue
target[i][card_ix[c[0]] + card_ix[c[1]]] = 1
return target | 5,785 | 0 | 231 |
da84d11e387816b98751779f07c11cd0a7df9915 | 100 | py | Python | db/vectordump/vectordumpConfig.py | rand-projects/fisb-decode | 870f6be8b7e7013fcba0c4f2f894aae425700563 | [
"BSD-2-Clause-Patent"
] | 7 | 2021-05-29T13:12:20.000Z | 2021-12-26T02:38:34.000Z | db/vectordump/vectordumpConfig.py | rand-projects/fisb-decode | 870f6be8b7e7013fcba0c4f2f894aae425700563 | [
"BSD-2-Clause-Patent"
] | null | null | null | db/vectordump/vectordumpConfig.py | rand-projects/fisb-decode | 870f6be8b7e7013fcba0c4f2f894aae425700563 | [
"BSD-2-Clause-Patent"
] | null | null | null | """Vectordump configuration information.
"""
#: MONGO URI
MONGO_URI = 'mongodb://localhost:27017/'
| 16.666667 | 40 | 0.72 | """Vectordump configuration information.
"""
#: MONGO URI
MONGO_URI = 'mongodb://localhost:27017/'
| 0 | 0 | 0 |
b7c392d057e5111056f9d1b54391a80f26144758 | 552 | py | Python | sciNum.py | theloni-monk/chem-py | 216eaf427f4313b1173b712a657a2cd2418a0b5c | [
"MIT"
] | null | null | null | sciNum.py | theloni-monk/chem-py | 216eaf427f4313b1173b712a657a2cd2418a0b5c | [
"MIT"
] | null | null | null | sciNum.py | theloni-monk/chem-py | 216eaf427f4313b1173b712a657a2cd2418a0b5c | [
"MIT"
] | null | null | null | import math
#TODO: WRITEME sciNum | 16.235294 | 40 | 0.572464 | import math
#TODO: WRITEME sciNum
class sciNum:
def __init__(self, base, exponent):
self.base = base
self.exp = exponent
self.sigfigs = _getSigFigs(base)
@classmethod
def fromString(cls, str):
pass
@classmethod
def fromLong(cls, long):
pass
def __str__(self):
pass
def __add__(self, other):
pass
def __sub__(self, other):
pass
def __mul__(self, other):
pass
def __div__(self, other):
pass
def _getSigFigs(num):
pass | 228 | 245 | 45 |
b9f6aa816803d0ea0a2aeca40e13f61770a8b0d4 | 2,199 | py | Python | special_math/specialmath.py | BusinessFawn/SpecialMath | 290cb513b8364e7bb35d1a302910bbd81b0e9c57 | [
"Apache-2.0"
] | null | null | null | special_math/specialmath.py | BusinessFawn/SpecialMath | 290cb513b8364e7bb35d1a302910bbd81b0e9c57 | [
"Apache-2.0"
] | null | null | null | special_math/specialmath.py | BusinessFawn/SpecialMath | 290cb513b8364e7bb35d1a302910bbd81b0e9c57 | [
"Apache-2.0"
] | null | null | null | import os
from special_math.common_utilities import SpecialMathCalc, RequestUtils
from special_math import MAX_SPECIAL_NUMBER_ENTRY
import logging
from flask import Blueprint
bp = Blueprint('specialmath', __name__, url_prefix='/specialmath')
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("LOG_LEVEL", logging.DEBUG))
special_calculator = SpecialMathCalc()
@bp.route('/<int:n>')
def special_math(n: int):
"""
Takes an integer input and computes the special value for that number
:param n: The path value given to calculate the special value from
:return: a dict with context and response and a status code
"""
request_context = RequestUtils().get_request_context()
logger.debug(f'Received request for {n}, request_id: {request_context["request-id"]}')
if n > MAX_SPECIAL_NUMBER_ENTRY:
return {'context': request_context, 'error': {'message': f'Invalid special math request: request '
f'{n} exceeds maximum value of '
f'{MAX_SPECIAL_NUMBER_ENTRY}',
'name': 'InvalidRequestParameter'}}, 400
try:
special_number = special_calculator.calculate_special_value(n)
except Exception as e:
logger.error("Experienced error attempting to calculate special number")
logger.critical(e)
return {'context': request_context, 'error': {'message': 'Unexpected error encountered. '
'Please retry your request. If this error persists '
'reach out to John because he did something wrong.',
'name': 'InternalServerError'}}, 500
logger.debug(f'Calculated special number: {special_number}')
response = {"context": request_context,
"response": {
"special-calculation": special_number
}
}
logger.info(f"Successfully processed request {n}: {response}")
return response
| 43.117647 | 117 | 0.589814 | import os
from special_math.common_utilities import SpecialMathCalc, RequestUtils
from special_math import MAX_SPECIAL_NUMBER_ENTRY
import logging
from flask import Blueprint
bp = Blueprint('specialmath', __name__, url_prefix='/specialmath')
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("LOG_LEVEL", logging.DEBUG))
special_calculator = SpecialMathCalc()
@bp.route('/<int:n>')
def special_math(n: int):
"""
Takes an integer input and computes the special value for that number
:param n: The path value given to calculate the special value from
:return: a dict with context and response and a status code
"""
request_context = RequestUtils().get_request_context()
logger.debug(f'Received request for {n}, request_id: {request_context["request-id"]}')
if n > MAX_SPECIAL_NUMBER_ENTRY:
return {'context': request_context, 'error': {'message': f'Invalid special math request: request '
f'{n} exceeds maximum value of '
f'{MAX_SPECIAL_NUMBER_ENTRY}',
'name': 'InvalidRequestParameter'}}, 400
try:
special_number = special_calculator.calculate_special_value(n)
except Exception as e:
logger.error("Experienced error attempting to calculate special number")
logger.critical(e)
return {'context': request_context, 'error': {'message': 'Unexpected error encountered. '
'Please retry your request. If this error persists '
'reach out to John because he did something wrong.',
'name': 'InternalServerError'}}, 500
logger.debug(f'Calculated special number: {special_number}')
response = {"context": request_context,
"response": {
"special-calculation": special_number
}
}
logger.info(f"Successfully processed request {n}: {response}")
return response
| 0 | 0 | 0 |
f55062eefa3ab2e808cc8285ab422025123f08d6 | 8,136 | py | Python | python/qtLearn/uiModels.py | david-cattermole/qt-learning | cfbb6b94106c29650b62dbd2c51fb7eb6f811d47 | [
"BSD-3-Clause"
] | 13 | 2017-11-30T09:26:08.000Z | 2021-04-22T04:08:16.000Z | python/qtLearn/uiModels.py | david-cattermole/qt-learning | cfbb6b94106c29650b62dbd2c51fb7eb6f811d47 | [
"BSD-3-Clause"
] | null | null | null | python/qtLearn/uiModels.py | david-cattermole/qt-learning | cfbb6b94106c29650b62dbd2c51fb7eb6f811d47 | [
"BSD-3-Clause"
] | 1 | 2019-09-18T01:31:40.000Z | 2019-09-18T01:31:40.000Z | import Qt as Qt
import Qt.QtGui as QtGui
import Qt.QtCore as QtCore
from qtLearn.nodes import Node
import qtLearn.uiUtils as uiUtils
############################################################################
############################################################################ | 33.344262 | 85 | 0.580138 | import Qt as Qt
import Qt.QtGui as QtGui
import Qt.QtCore as QtCore
from qtLearn.nodes import Node
import qtLearn.uiUtils as uiUtils
class ItemModel(QtCore.QAbstractItemModel, uiUtils.QtInfoMixin):
def __init__(self, rootNode, font=None):
super(ItemModel, self).__init__()
self._rootNode = None
self._column_names = {
0: 'Column',
}
self._node_attr_key = {
'Column': 'name',
}
self._font = font
self.setRootNode(rootNode)
def rootNode(self):
return self._rootNode
def setRootNode(self, rootNode):
cls = super(ItemModel, self)
useBeginAndEnd = False
if 'beginResetModel' in cls.__dict__ and 'endResetModel' in cls.__dict__:
useBeginAndEnd = True
if useBeginAndEnd is True:
# super(ItemModel, self).beginResetModel()
self.beginResetModel()
del self._rootNode
self._rootNode = rootNode
if useBeginAndEnd is False:
self.reset()
if useBeginAndEnd is True:
self.endResetModel()
topLeft = self.createIndex(0, 0)
self.dataChanged.emit(topLeft, topLeft)
def columnCount(self, parent):
return len(self._column_names.keys())
def rowCount(self, parent):
if not parent.isValid():
parentNode = self._rootNode
else:
parentNode = parent.internalPointer()
return parentNode.childCount()
def data(self, index, role):
if not index.isValid():
return None
node = index.internalPointer()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
column_index = index.column()
if column_index not in self._column_names:
msg = '{0} was not in {1}'.format(column_index, self._column_names)
raise ValueError(msg)
column_name = self._column_names[column_index]
if column_name not in self._node_attr_key:
msg = '{0} was not in {1}'.format(column_name, self._node_attr_key)
raise ValueError(msg)
attr_name = self._node_attr_key[column_name]
value = getattr(node, attr_name, None)
if value is not None:
value = value()
return value
if role == QtCore.Qt.DecorationRole:
# TODO: Can we refactor this similar to the DisplayRole above?
if index.column() == 0:
return node.icon()
if role == QtCore.Qt.ToolTipRole:
return node.toolTip()
if role == QtCore.Qt.StatusTipRole:
return node.statusTip()
if role == QtCore.Qt.FontRole:
if self._font is not None:
return self._font
def setData(self, index, value, role=QtCore.Qt.EditRole):
if index.isValid():
node = index.internalPointer()
if not node.editable():
return False
if role == QtCore.Qt.EditRole:
node.setName(value)
self.dataChanged.emit(index, index, [role])
return True
return False
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
return self._column_names.get(section, 'Column')
def flags(self, index):
v = QtCore.Qt.NoItemFlags
node = index.internalPointer()
if node.enabled():
v = v | QtCore.Qt.ItemIsEnabled
if node.checkable():
v = v | QtCore.Qt.ItemIsUserCheckable
if node.neverHasChildren():
v = v | QtCore.Qt.ItemNeverHasChildren
if node.selectable():
v = v | QtCore.Qt.ItemIsSelectable
if node.editable():
v = v | QtCore.Qt.ItemIsEditable
return v
def parent(self, index):
node = self.getNode(index) # index.internalPointer()
parentNode = node.parent()
if parentNode == self._rootNode:
return QtCore.QModelIndex()
if parentNode is None:
return QtCore.QModelIndex()
row = parentNode.row()
return self.createIndex(row, 0, parentNode)
def index(self, row, column, parent):
parentNode = self.getNode(parent)
if row < 0 and row >= parentNode.childCount():
print 'ItemModel index:', row
childItem = parentNode.child(row)
if childItem:
return self.createIndex(row, column, childItem)
return QtCore.QModelIndex()
def getNode(self, index):
node = None
if index.isValid():
node = index.internalPointer()
if node is not None:
return node
# else:
# print 'Warning: getNode index is not valid;', index
return self._rootNode
def insertRows(self, position, rows, parent=QtCore.QModelIndex()):
parentNode = self.getNode(parent)
self.beginInsertRows(parent, position, position + rows - 1)
success = None
for row in range(rows):
childCount = parentNode.childCount()
childNode = Node("untitled" + str(childCount))
success = parentNode.insertChild(position, childNode)
self.endInsertRows()
return success
def removeRows(self, position, rows, parent=QtCore.QModelIndex()):
parentNode = self.getNode(parent)
self.beginRemoveRows(parent, position, position + rows - 1)
success = None
for row in range(rows):
success = parentNode.removeChild(position)
self.endRemoveRows()
return success
class SortFilterProxyModel(QtCore.QSortFilterProxyModel, uiUtils.QtInfoMixin):
def __init__(self):
super(SortFilterProxyModel, self).__init__()
self._filterTagName = ''
self._filterTagValue = ''
self._filterTagNodeType = ''
# TODO: Support multiple named tags for filtering, currently only supports 1.
############################################################################
def filterTagName(self):
return self._filterTagName
def setFilterTagName(self, value):
# print('setFilterTagName:', repr(value))
self._filterTagName = value
self.invalidateFilter()
def filterTagValue(self):
return self._filterTagValue
def setFilterTagValue(self, value):
# print('setFilterTagValue:', repr(value))
self._filterTagValue = value
self.invalidateFilter()
def filterTagNodeType(self):
return self._filterTagNodeType
def setFilterTagNodeType(self, value):
# print('setFilterTagNodeType:', repr(value))
self._filterTagNodeType = value
self.invalidateFilter()
############################################################################
def filterAcceptsRow(self, sourceRow, sourceParent):
# print('filterAcceptsRow:', sourceRow, sourceParent)
result = False
srcModel = self.sourceModel()
column = self.filterKeyColumn()
if column < 0:
column = 0
index = srcModel.index(sourceRow, column, sourceParent)
node = index.internalPointer()
tagName = self.filterTagName()
if tagName is None or len(tagName) == 0:
return True
filterNodeType = self.filterTagNodeType()
typeInfo = node.typeInfo
if filterNodeType is None or typeInfo == filterNodeType:
tagValue = self.filterTagValue()
nodeData = node.data()
nodeDataValue = nodeData.get(tagName)
if tagValue is None or len(tagValue) == 0:
result = True
elif nodeDataValue == tagValue:
result = True
else:
result = False
else:
pattern = self.filterRegExp().pattern()
if pattern is None or len(pattern) == 0:
result = True
else:
path = node.allTags()
if pattern in path:
result = True
return result | 7,099 | 100 | 638 |
0966823fc55b1c417ff7874904819d1a018a3ebe | 1,241 | py | Python | L6/pytest.py | thebestday/python | 2efb7fbd5c4ee40c03233875c1989ce68aa0fe18 | [
"MIT"
] | null | null | null | L6/pytest.py | thebestday/python | 2efb7fbd5c4ee40c03233875c1989ce68aa0fe18 | [
"MIT"
] | null | null | null | L6/pytest.py | thebestday/python | 2efb7fbd5c4ee40c03233875c1989ce68aa0fe18 | [
"MIT"
] | null | null | null | # Полуавтоматические тесты
#
# list_temp = [1,2,3,'abc']
#
# print(test_function(list_temp))
# теперь пишем полуавтоматическую фун-ю
function_test()
list_temp = [1, 2, 3,'5', 'abc', 4]
list_out = test_function(list_temp)
print(list_out)
| 25.326531 | 70 | 0.572925 | # Полуавтоматические тесты
def test_function(list_in):
...
# вход лист с числами и строкама
# выход лист с числами
...
list_temp = []
# i = 0
# while (type(list_in[i]) == int):
for i in range(len(list_in)):
if type(list_in[i])== int:
list_temp.append(list_in[i])
elif type(list_in[i]) == str:
if list_in[i].isdigit(): list_temp.append(int(list_in[i]))
# i += 1
return list_temp
#
# list_temp = [1,2,3,'abc']
#
# print(test_function(list_temp))
# теперь пишем полуавтоматическую фун-ю
def function_test():
list_temp = [1,2,3,'abc']
list_out = test_function(list_temp)
if list_out == [1,2,3]:
print('TEST 1 IS OK')
else:
print('TEST 1 IS FAILED')
list_temp = [1, 2, 3, 'abc', 4]
list_out = test_function(list_temp)
if list_out == [1, 2,3,4]:
print('TEST 2 IS OK')
else:
print('TEST 2 IS FAILED')
list_temp = [1, 2, 3,'5', 'abc', 4]
list_out = test_function(list_temp)
if list_out == [1, 2, 3, 5, 4]:
print('TEST 3 IS OK')
else:
print('TEST 3 IS FAILED')
function_test()
list_temp = [1, 2, 3,'5', 'abc', 4]
list_out = test_function(list_temp)
print(list_out)
| 998 | 0 | 44 |
6b7f8a3a8a495a4db69fe71c98c03266148d2518 | 59 | py | Python | publication_backbone/search/api.py | Excentrics/publication-backbone | 65c9820308b09a6ae1086c265f8d49e36f3724b9 | [
"BSD-3-Clause"
] | 6 | 2016-05-19T14:59:51.000Z | 2020-03-19T10:08:29.000Z | publication_backbone/search/api.py | Excentrics/publication-backbone | 65c9820308b09a6ae1086c265f8d49e36f3724b9 | [
"BSD-3-Clause"
] | null | null | null | publication_backbone/search/api.py | Excentrics/publication-backbone | 65c9820308b09a6ae1086c265f8d49e36f3724b9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
| 11.8 | 24 | 0.576271 | # -*- coding: utf-8 -*-
class SearchAPI(object):
pass
| 0 | 12 | 23 |
72b8e51a548b55fa1cb0d56d3c366cec9c1b25ed | 12,521 | py | Python | laika_repo/laika/downloader.py | FusionFuzz/openpilot | ee9a74d9842808f2af3cb6e2173d75483443f31e | [
"MIT"
] | null | null | null | laika_repo/laika/downloader.py | FusionFuzz/openpilot | ee9a74d9842808f2af3cb6e2173d75483443f31e | [
"MIT"
] | null | null | null | laika_repo/laika/downloader.py | FusionFuzz/openpilot | ee9a74d9842808f2af3cb6e2173d75483443f31e | [
"MIT"
] | null | null | null | import certifi
import ftplib
import hatanaka
import os
import urllib.request
import pycurl
import time
import tempfile
from datetime import datetime
from urllib.parse import urlparse
from io import BytesIO
from .constants import SECS_IN_HR, SECS_IN_DAY, SECS_IN_WEEK
from .gps_time import GPSTime
dir_path = os.path.dirname(os.path.realpath(__file__))
def retryable(f):
"""
Decorator to allow us to pass multiple URLs from which to download.
Automatically retry the request with the next URL on failure
"""
return wrapped
@retryable
def ftp_download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
"""
Like download file, but more of them. Keeps a persistent FTP connection open
to be more efficient.
"""
folder_path_abs = os.path.join(cacheDir, folder_path)
ftp = ftp_connect(url_base + folder_path)
filepaths = []
for filename in filenames:
filename_zipped = filename + compression
filepath = str(hatanaka.get_decompressed_path(os.path.join(folder_path_abs, filename)))
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
print("pulling from", url_base, "to", filepath)
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
try:
ftp.retrbinary('RETR ' + filename_zipped, open(filepath_zipped, 'wb').write)
except (ftplib.error_perm):
raise IOError("Could not download file from: " + url_base + folder_path + filename_zipped)
filepaths.append(str(hatanaka.decompress_on_disk(filepath_zipped)))
else:
filepaths.append(filepath)
return filepaths
@retryable
@retryable
| 34.30411 | 144 | 0.692277 | import certifi
import ftplib
import hatanaka
import os
import urllib.request
import pycurl
import time
import tempfile
from datetime import datetime
from urllib.parse import urlparse
from io import BytesIO
from .constants import SECS_IN_HR, SECS_IN_DAY, SECS_IN_WEEK
from .gps_time import GPSTime
dir_path = os.path.dirname(os.path.realpath(__file__))
def retryable(f):
"""
Decorator to allow us to pass multiple URLs from which to download.
Automatically retry the request with the next URL on failure
"""
def wrapped(url_bases, *args, **kwargs):
if isinstance(url_bases, str):
# only one url passed, don't do the retry thing
return f(url_bases, *args, **kwargs)
# not a string, must be a list of url_bases
for url_base in url_bases:
try:
return f(url_base, *args, **kwargs)
except IOError as e:
print(e)
# none of them succeeded
raise IOError("Multiple URL failures attempting to pull file(s)")
return wrapped
def ftp_connect(url):
parsed = urlparse(url)
assert parsed.scheme == 'ftp'
try:
domain = parsed.netloc
ftp = ftplib.FTP(domain)
ftp.login()
except (OSError, ftplib.error_perm):
raise IOError("Could not connect/auth to: " + domain)
try:
ftp.cwd(parsed.path)
except ftplib.error_perm:
raise IOError("Permission failure with folder: " + url)
return ftp
@retryable
def list_dir(url):
try:
ftp = ftp_connect(url)
return ftp.nlst()
except ftplib.error_perm:
raise IOError("Permission failure listing folder: " + url)
def ftp_download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
"""
Like download file, but more of them. Keeps a persistent FTP connection open
to be more efficient.
"""
folder_path_abs = os.path.join(cacheDir, folder_path)
ftp = ftp_connect(url_base + folder_path)
filepaths = []
for filename in filenames:
filename_zipped = filename + compression
filepath = str(hatanaka.get_decompressed_path(os.path.join(folder_path_abs, filename)))
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
print("pulling from", url_base, "to", filepath)
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
try:
ftp.retrbinary('RETR ' + filename_zipped, open(filepath_zipped, 'wb').write)
except (ftplib.error_perm):
raise IOError("Could not download file from: " + url_base + folder_path + filename_zipped)
filepaths.append(str(hatanaka.decompress_on_disk(filepath_zipped)))
else:
filepaths.append(filepath)
return filepaths
def https_download_file(url):
if os.path.isfile(dir_path + '/.netrc'):
netrc_path = dir_path + '/.netrc'
f = None
else:
try:
username = os.environ['NASA_USERNAME']
password = os.environ['NASA_PASSWORD']
f = tempfile.NamedTemporaryFile()
netrc = f"machine urs.earthdata.nasa.gov login {username} password {password}"
f.write(netrc.encode())
f.flush()
netrc_path = f.name
except KeyError:
raise IOError('Could not find .netrc file and no NASA_USERNAME and NASA_PASSWORD in enviroment for urs.earthdata.nasa.gov authentication')
crl = pycurl.Curl()
crl.setopt(crl.CAINFO, certifi.where())
crl.setopt(crl.URL, url)
crl.setopt(crl.FOLLOWLOCATION, True)
crl.setopt(crl.NETRC_FILE, netrc_path)
crl.setopt(crl.NETRC, 2)
crl.setopt(crl.SSL_CIPHER_LIST, 'DEFAULT@SECLEVEL=1')
crl.setopt(crl.COOKIEJAR, '/tmp/cddis_cookies')
crl.setopt(pycurl.CONNECTTIMEOUT, 10)
buf = BytesIO()
crl.setopt(crl.WRITEDATA, buf)
crl.perform()
response = crl.getinfo(pycurl.RESPONSE_CODE)
crl.close()
if f is not None:
f.close()
if response != 200:
raise IOError('HTTPS error ' + str(response))
return buf.getvalue()
def ftp_download_file(url):
urlf = urllib.request.urlopen(url)
data_zipped = urlf.read()
urlf.close()
return data_zipped
@retryable
def download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
return ftp_download_files(
url_base, folder_path, cacheDir, filenames, compression=compression, overwrite=overwrite
)
@retryable
def download_file(url_base, folder_path, filename_zipped):
url = url_base + folder_path + filename_zipped
print('Downloading ' + url)
if 'https' in url:
data_zipped = https_download_file(url)
elif 'ftp':
data_zipped = ftp_download_file(url)
else:
raise NotImplementedError('Did find ftp or https preamble')
return data_zipped
def download_and_cache_file(url_base, folder_path, cacheDir, filename, compression='', overwrite=False):
folder_path_abs = os.path.join(cacheDir, folder_path)
filename_zipped = filename + compression
filepath = str(hatanaka.get_decompressed_path(os.path.join(folder_path_abs, filename)))
filepath_attempt = filepath + '.attempt_time'
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
if os.path.exists(filepath_attempt):
with open(filepath_attempt, 'rb') as rf:
last_attempt_time = float(rf.read().decode())
if time.time() - last_attempt_time < SECS_IN_HR:
raise IOError(f"Too soon to try {folder_path + filename_zipped} from {url_base} ")
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
try:
data_zipped = download_file(url_base, folder_path, filename_zipped)
except (IOError, pycurl.error):
unix_time = time.time()
if not os.path.exists(cacheDir + 'tmp/'):
os.makedirs(cacheDir + '/tmp')
with tempfile.NamedTemporaryFile(delete=False, dir=cacheDir+'tmp/') as fout:
fout.write(str.encode(str(unix_time)))
os.replace(fout.name, filepath + '.attempt_time')
raise IOError(f"Could not download {folder_path + filename_zipped} from {url_base} ")
with open(filepath_zipped, 'wb') as wf:
wf.write(data_zipped)
filepath = str(hatanaka.decompress_on_disk(filepath_zipped))
return filepath
def download_nav(time, cache_dir, constellation='GPS'):
t = time.as_datetime()
try:
if GPSTime.from_datetime(datetime.utcnow()) - time > SECS_IN_DAY:
url_base = 'https://cddis.nasa.gov/archive/gnss/data/daily/'
cache_subdir = cache_dir + 'daily_nav/'
if constellation =='GPS':
filename = t.strftime("brdc%j0.%yn")
folder_path = t.strftime('%Y/%j/%yn/')
elif constellation =='GLONASS':
filename = t.strftime("brdc%j0.%yg")
folder_path = t.strftime('%Y/%j/%yg/')
compression = '.gz' if folder_path >= '2020/335/' else '.Z'
return download_and_cache_file(url_base, folder_path, cache_subdir, filename, compression=compression)
else:
url_base = 'https://cddis.nasa.gov/archive/gnss/data/hourly/'
cache_subdir = cache_dir + 'hourly_nav/'
if constellation =='GPS':
filename = t.strftime("hour%j0.%yn")
folder_path = t.strftime('%Y/%j/')
compression = '.gz' if folder_path >= '2020/336/' else '.Z'
return download_and_cache_file(url_base, folder_path, cache_subdir, filename, compression=compression, overwrite=True)
except IOError:
pass
def download_orbits(time, cache_dir):
cache_subdir = cache_dir + 'cddis_products/'
url_bases = (
'https://cddis.nasa.gov/archive/gnss/products/',
'ftp://igs.ign.fr/pub/igs/products/',
)
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
folder_path = "%i/" % (time.week)
if GPSTime.from_datetime(datetime.utcnow()) - time > 3*SECS_IN_WEEK:
try:
filename = "igs%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igr%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_18.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_12.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_06.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_00.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
return downloaded_files
def download_orbits_russia(time, cache_dir):
cache_subdir = cache_dir + 'russian_products/'
url_base = 'ftp://ftp.glonass-iac.ru/MCC/PRODUCTS/'
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
t = time.as_datetime()
if GPSTime.from_datetime(datetime.utcnow()) - time > 2*SECS_IN_WEEK:
try:
folder_path = t.strftime('%y%j/final/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_base, folder_path, cache_subdir, filename))
continue
except IOError:
pass
try:
folder_path = t.strftime('%y%j/rapid/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
try:
folder_path = t.strftime('%y%j/ultra/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_and_cache_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
return downloaded_files
def download_ionex(time, cache_dir):
cache_subdir = cache_dir + 'ionex/'
t = time.as_datetime()
url_bases = (
'https://cddis.nasa.gov/archive/gnss/products/ionex/',
'ftp://igs.ensg.ign.fr/pub/igs/products/ionosphere/',
'ftp://gssc.esa.int/gnss/products/ionex/',
)
for folder_path in [t.strftime('%Y/%j/')]:
for filename in [t.strftime("codg%j0.%yi"), t.strftime("c1pg%j0.%yi"), t.strftime("c2pg%j0.%yi")]:
try:
filepath = download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.Z')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_dcb(time, cache_dir):
cache_subdir = cache_dir + 'dcb/'
# seem to be a lot of data missing, so try many days
for time in [time - i*SECS_IN_DAY for i in range(14)]:
try:
t = time.as_datetime()
url_bases = (
'https://cddis.nasa.gov/archive/gnss/products/bias/',
'ftp://igs.ign.fr/pub/igs/products/mgex/dcb/',
)
folder_path = t.strftime('%Y/')
filename = t.strftime("CAS0MGXRAP_%Y%j0000_01D_01D_DCB.BSX")
filepath = download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_cors_coords(cache_dir):
cache_subdir = cache_dir + 'cors_coord/'
url_bases = (
'ftp://geodesy.noaa.gov/cors/coord/coord_14/',
'ftp://alt.ngs.noaa.gov/cors/coord/coord_14/'
)
file_names = list_dir(url_bases)
file_names = [file_name for file_name in file_names if file_name.endswith('coord.txt')]
filepaths = download_files(url_bases, '', cache_subdir, file_names)
return filepaths
def download_cors_station(time, station_name, cache_dir):
cache_subdir = cache_dir + 'cors_obs/'
t = time.as_datetime()
folder_path = t.strftime('%Y/%j/') + station_name + '/'
filename = station_name + t.strftime("%j0.%yd")
url_bases = (
'ftp://geodesy.noaa.gov/cors/rinex/',
'ftp://alt.ngs.noaa.gov/cors/rinex/'
)
try:
filepath = download_and_cache_file(url_bases, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError:
print("File not downloaded, check availability on server.")
return None
| 10,471 | 0 | 343 |
981cfd0c2953af13140dfb14bc761fbe6da762c7 | 1,798 | py | Python | bounty_programs_scrapper.py | JoaquinRMtz/H1-reports-offline | 907104eb1789da35a5acf490d8854f31365050c2 | [
"MIT"
] | 2 | 2017-10-09T00:43:50.000Z | 2018-05-02T18:00:34.000Z | bounty_programs_scrapper.py | JoaquinRMtz/H1-reports-offline | 907104eb1789da35a5acf490d8854f31365050c2 | [
"MIT"
] | null | null | null | bounty_programs_scrapper.py | JoaquinRMtz/H1-reports-offline | 907104eb1789da35a5acf490d8854f31365050c2 | [
"MIT"
] | 1 | 2021-06-24T04:27:30.000Z | 2021-06-24T04:27:30.000Z |
import urllib2
import json
import MySQLdb
conn = MySQLdb.connect(host= "localhost", user="root", passwd="", db="hackerone_reports")
x = conn.cursor()
hackerone = "https://hackerone.com/programs/search?query=bounties%3Ayes&sort=name%3Aascending&limit=1000"
opener = urllib2.build_opener()
opener.addheaders = [('Accept','application/json, text/javascript, */*; q=0.01'),('content-type','application/json'),('x-requested-with','XMLHttpRequest')]
response = opener.open(hackerone)
print "Read the response..."
json_string = response.read()
print "Loading json..."
data = json.loads(json_string, encoding='latin-1')
print "Total programs: " + str(data['total'])
programs = data['results']
for program in programs:
about = program['about']
disclosure_email = ''
if 'disclosure_email' in program:
disclosure_email = program['disclosure_email']
disclosure_url = ''
if 'disclosure_url' in program:
disclosure_url = program['disclosure_url']
handle = program['handle']
name = program['name']
offers_rewards = '0'
if 'offers_rewards' in program:
offers_rewards = program['offers_rewards']
offers_thanks = '0'
if 'offers_thanks' in program:
offers_thanks = program['offers_thanks']
stripped_policy = program['stripped_policy']
url = program['url']
try:
x.execute("""INSERT INTO hackerone_programs(about, disclosure_email, disclosure_url, handle, name, offers_rewards, offers_thanks, stripped_policy, url) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""",(about, disclosure_email, disclosure_url, handle, name, offers_rewards, offers_thanks, stripped_policy, url))
conn.commit()
print "Bounty program: " + handle.encode('latin-1') + " added to database."
except Exception as ex:
conn.rollback()
# print "Problems saving: " + str(ex) + ", skipping..."
pass
conn.close()
| 32.107143 | 302 | 0.721913 |
import urllib2
import json
import MySQLdb
conn = MySQLdb.connect(host= "localhost", user="root", passwd="", db="hackerone_reports")
x = conn.cursor()
hackerone = "https://hackerone.com/programs/search?query=bounties%3Ayes&sort=name%3Aascending&limit=1000"
opener = urllib2.build_opener()
opener.addheaders = [('Accept','application/json, text/javascript, */*; q=0.01'),('content-type','application/json'),('x-requested-with','XMLHttpRequest')]
response = opener.open(hackerone)
print "Read the response..."
json_string = response.read()
print "Loading json..."
data = json.loads(json_string, encoding='latin-1')
print "Total programs: " + str(data['total'])
programs = data['results']
for program in programs:
about = program['about']
disclosure_email = ''
if 'disclosure_email' in program:
disclosure_email = program['disclosure_email']
disclosure_url = ''
if 'disclosure_url' in program:
disclosure_url = program['disclosure_url']
handle = program['handle']
name = program['name']
offers_rewards = '0'
if 'offers_rewards' in program:
offers_rewards = program['offers_rewards']
offers_thanks = '0'
if 'offers_thanks' in program:
offers_thanks = program['offers_thanks']
stripped_policy = program['stripped_policy']
url = program['url']
try:
x.execute("""INSERT INTO hackerone_programs(about, disclosure_email, disclosure_url, handle, name, offers_rewards, offers_thanks, stripped_policy, url) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""",(about, disclosure_email, disclosure_url, handle, name, offers_rewards, offers_thanks, stripped_policy, url))
conn.commit()
print "Bounty program: " + handle.encode('latin-1') + " added to database."
except Exception as ex:
conn.rollback()
# print "Problems saving: " + str(ex) + ", skipping..."
pass
conn.close()
| 0 | 0 | 0 |
5809a3605415d756227502e11798f293d6c9000f | 3,615 | py | Python | common_helper_yara/yara_scan.py | fkie-cad/common_helper_yara | 73b69646a0f05340d8ecd874efe8592ff5cd960f | [
"MIT"
] | 2 | 2019-08-21T05:52:09.000Z | 2020-09-14T09:05:08.000Z | common_helper_yara/yara_scan.py | fkie-cad/common_helper_yara | 73b69646a0f05340d8ecd874efe8592ff5cd960f | [
"MIT"
] | 1 | 2021-08-28T16:41:05.000Z | 2021-09-07T09:38:54.000Z | common_helper_yara/yara_scan.py | fkie-cad/common_helper_yara | 73b69646a0f05340d8ecd874efe8592ff5cd960f | [
"MIT"
] | 1 | 2021-08-28T16:41:11.000Z | 2021-08-28T16:41:11.000Z | import logging
import re
from pathlib import Path
from subprocess import check_output, CalledProcessError, STDOUT
from typing import Any, Dict, List, Optional, Tuple, Union
from .common import convert_external_variables
_RULE_BLOCK_REGEX = re.compile(r'^(?P<rule>\w+)\s+\[(?P<raw_meta>.*)\]\s+(?P<scanned_file>.*)\n(?P<raw_matches>(?:0x[a-f0-9]+.*(?:[\n]|$))+)', flags=re.MULTILINE)
_YARA_MATCH_REGEX = re.compile(r'^(?P<offset>0x[a-f0-9]+):(?P<tag>\S+):\s(?P<string>.+)$', flags=re.MULTILINE)
def scan(
signature_path: Union[str, Path],
file_path: Union[str, Path],
external_variables: Optional[Dict[str, Any]] = None,
recursive: bool = False,
compiled: bool = False
) -> dict:
'''
Scan files and return matches
:param signature_path: path to signature file
:param file_path: files to scan
:param external_variables: define external variables
:param recursive: scan recursively
:param compiled: rule is in compiled form (Yara >= 4 only!)
:return: a dict containing the scan results
'''
if external_variables is None:
external_variables = {}
variables = convert_external_variables(external_variables)
recursive_flag = '-r' if recursive else ''
compiled_flag = '-C' if compiled else ''
try:
command = f'yara {variables} {recursive_flag} {compiled_flag} -m -s {signature_path} {file_path}'
scan_result = check_output(command, shell=True, stderr=STDOUT)
return _parse_yara_output(scan_result.decode())
except CalledProcessError as e:
logging.error(f'There seems to be an error in the rule file:\n{e.output.decode()}', exc_info=True)
return {}
except Exception as e:
logging.error(f'Could not parse yara result: {e}', exc_info=True)
return {}
def _parse_meta_data(block: dict) -> Dict[str, str]:
'''
Will be of form 'item0=lowercaseboolean0,item1="value1",item2=value2,..'
'''
meta_data = dict()
for item in block['raw_meta'].split(','):
if '=' in item:
key, value = item.split('=', maxsplit=1)
value = value == 'true' if value in ['true', 'false'] else value.strip('"')
meta_data[key] = value
else:
logging.warning(f'Malformed meta string \'{block["raw_meta"]}\'')
return meta_data
| 38.870968 | 162 | 0.674965 | import logging
import re
from pathlib import Path
from subprocess import check_output, CalledProcessError, STDOUT
from typing import Any, Dict, List, Optional, Tuple, Union
from .common import convert_external_variables
_RULE_BLOCK_REGEX = re.compile(r'^(?P<rule>\w+)\s+\[(?P<raw_meta>.*)\]\s+(?P<scanned_file>.*)\n(?P<raw_matches>(?:0x[a-f0-9]+.*(?:[\n]|$))+)', flags=re.MULTILINE)
_YARA_MATCH_REGEX = re.compile(r'^(?P<offset>0x[a-f0-9]+):(?P<tag>\S+):\s(?P<string>.+)$', flags=re.MULTILINE)
def scan(
signature_path: Union[str, Path],
file_path: Union[str, Path],
external_variables: Optional[Dict[str, Any]] = None,
recursive: bool = False,
compiled: bool = False
) -> dict:
'''
Scan files and return matches
:param signature_path: path to signature file
:param file_path: files to scan
:param external_variables: define external variables
:param recursive: scan recursively
:param compiled: rule is in compiled form (Yara >= 4 only!)
:return: a dict containing the scan results
'''
if external_variables is None:
external_variables = {}
variables = convert_external_variables(external_variables)
recursive_flag = '-r' if recursive else ''
compiled_flag = '-C' if compiled else ''
try:
command = f'yara {variables} {recursive_flag} {compiled_flag} -m -s {signature_path} {file_path}'
scan_result = check_output(command, shell=True, stderr=STDOUT)
return _parse_yara_output(scan_result.decode())
except CalledProcessError as e:
logging.error(f'There seems to be an error in the rule file:\n{e.output.decode()}', exc_info=True)
return {}
except Exception as e:
logging.error(f'Could not parse yara result: {e}', exc_info=True)
return {}
def _add_yara_rule_match(rule_block: dict, block: dict):
# FIXME: the file path that that is scanned does not reflect in the result set.
# rule_block['strings'] += [(*yara_match, block['scanned_file']) for yara_match in parse_matches(block['raw_matches'])]
rule_block['strings'] += [yara_match for yara_match in _parse_matches(block['raw_matches'])]
def _parse_yara_output(output: str) -> dict:
results = dict()
for block in _find_rule_blocks(output):
rule_block = _init_rule_block_entry(results, block)
_add_yara_rule_match(rule_block, block)
return results
def _find_rule_blocks(output: str) -> List[Dict[str, str]]:
return [match.groupdict() for match in _RULE_BLOCK_REGEX.finditer(output)]
def _init_rule_block_entry(results: dict, block: dict) -> dict:
rule_name = block['rule']
if rule_name not in results:
meta = _parse_meta_data(block)
results[rule_name] = dict(rule=rule_name, matches=True, meta=meta, strings=list())
return results[rule_name]
def _parse_matches(raw_matches: str) -> List[Tuple[int, str, bytes]]:
groups = [match.groupdict() for match in _YARA_MATCH_REGEX.finditer(raw_matches)]
return [(int(group['offset'], 16), group['tag'], group['string'].encode()) for group in groups]
def _parse_meta_data(block: dict) -> Dict[str, str]:
'''
Will be of form 'item0=lowercaseboolean0,item1="value1",item2=value2,..'
'''
meta_data = dict()
for item in block['raw_meta'].split(','):
if '=' in item:
key, value = item.split('=', maxsplit=1)
value = value == 'true' if value in ['true', 'false'] else value.strip('"')
meta_data[key] = value
else:
logging.warning(f'Malformed meta string \'{block["raw_meta"]}\'')
return meta_data
| 1,171 | 0 | 115 |
a4195d3d25cf47e92f095627e0042289f5bb4069 | 11 | py | Python | BOJ/divide_and_conquer_boj/star_11.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/divide_and_conquer_boj/star_11.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/divide_and_conquer_boj/star_11.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | # BOJ 2448
| 5.5 | 10 | 0.636364 | # BOJ 2448
| 0 | 0 | 0 |
13e526bdd0bb99cfacb001124c4be8a206cd5b1b | 2,791 | py | Python | src/complexity/partition.py | jacione/phys513 | a8e1d1de800b0372d013d69543e1619b0fb8e4e9 | [
"MIT"
] | null | null | null | src/complexity/partition.py | jacione/phys513 | a8e1d1de800b0372d013d69543e1619b0fb8e4e9 | [
"MIT"
] | null | null | null | src/complexity/partition.py | jacione/phys513 | a8e1d1de800b0372d013d69543e1619b0fb8e4e9 | [
"MIT"
] | null | null | null | """
A script for finding equal or near-equal partitions in a group.
Do parts a, b, and g
"""
from itertools import combinations
import random
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
from progressbar import progressbar as pbar
DIR = Path(__file__).parent
group1 = [10, 13, 23, 6, 20]
group2 = [6, 4, 9, 14, 12, 3, 15, 15]
group3 = [93, 58, 141, 209, 179, 48, 225, 228]
group4 = [2474, 1129, 1388, 3752, 821, 2082, 201, 739]
if __name__ == '__main__':
# frac_perfect(1000)
plot_perfect()
| 28.773196 | 96 | 0.611609 | """
A script for finding equal or near-equal partitions in a group.
Do parts a, b, and g
"""
from itertools import combinations
import random
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
from progressbar import progressbar as pbar
DIR = Path(__file__).parent
group1 = [10, 13, 23, 6, 20]
group2 = [6, 4, 9, 14, 12, 3, 15, 15]
group3 = [93, 58, 141, 209, 179, 48, 225, 228]
group4 = [2474, 1129, 1388, 3752, 821, 2082, 201, 739]
def random_partition_problem(num_vars, max_power):
group_vals = [random.randint(1, 2**max_power) for _ in range(num_vars)]
if sum(group_vals) % 2:
group_vals[0] += 1
return group_vals
def find_partition(group_vals):
num_vars = len(group_vals)
fullset = {i for i in range(num_vars)}
subsets = {frozenset([i]) for i in fullset}
for L in range(1, (num_vars//2)+1):
subsets = subsets | set(map(frozenset, combinations(fullset, L)))
min_cost = sum(group_vals)
for subset in subsets:
part1 = sum([group_vals[i] for i in subset])
part2 = sum([group_vals[i] for i in fullset - subset])
cost = abs(part1 - part2)
if cost < min_cost:
min_cost = cost
if min_cost == 0:
break
return min_cost
def frac_perfect(sample_size=100):
for num_vars in [3, 5, 7, 9, 11]:
powers = np.arange(1, 2*num_vars)
ratios = powers / num_vars
perfect = np.zeros_like(ratios)
for i, power in enumerate(pbar(powers)):
perfect[i] = np.mean([find_partition(random_partition_problem(num_vars, power)) == 0
for _ in range(sample_size)]
)
np.save(f'{DIR}/perfect_{num_vars}vars.npy', np.row_stack((ratios, perfect)))
return
def p_scaling(x):
return 1 - np.exp(-np.sqrt(3/(2*np.pi)) * (2**-x))
def p_perfect(ratio, num_vars):
r_crit = 1
return 1 - np.exp(-np.sqrt(3/(2*np.pi*num_vars)) * 2**(-num_vars * (ratio - r_crit)))
def sub_x(ratio, num_vars):
r_crit = 1
return num_vars * (ratio - r_crit) + 0.5*np.log2(num_vars)
def plot_perfect():
_, ax1 = plt.subplots()
ax1.set_xlabel('M/N ratio')
ax1.set_ylabel('Fraction of perfect partitions')
_, ax2 = plt.subplots()
ax2.set_xlabel('x')
ax2.set_ylabel('p(x)')
for num_vars in [3, 5, 7, 9, 11]:
data = np.load(f'{DIR}/perfect_{num_vars}vars.npy')
ax1.plot(data[0], data[1], label=f'N={num_vars}')
ax2.plot(sub_x(data[0], num_vars), data[1], label=f'N={num_vars}')
x = np.linspace(-7.5, 11, 100)
ax2.plot(x, p_scaling(x), c='k', lw=2, label='scaling')
ax1.legend()
ax2.legend()
plt.show()
if __name__ == '__main__':
# frac_perfect(1000)
plot_perfect()
| 2,083 | 0 | 161 |
bb433bdd617e52976c0e8ede054660f586b245db | 2,374 | py | Python | object_detection/practice/object_detection.py | yoonhero/nova | b811f8992588785233e93ec39cb20869ea74b4f4 | [
"MIT"
] | null | null | null | object_detection/practice/object_detection.py | yoonhero/nova | b811f8992588785233e93ec39cb20869ea74b4f4 | [
"MIT"
] | null | null | null | object_detection/practice/object_detection.py | yoonhero/nova | b811f8992588785233e93ec39cb20869ea74b4f4 | [
"MIT"
] | 1 | 2022-02-24T08:51:55.000Z | 2022-02-24T08:51:55.000Z | from matplotlib import pyplot as plt
import io
from PIL import Image
import cv2
import torch
import os
WIDTH = 1280
HEIGHT = 760
model = torch.hub.load("ultralytics/yolov5", "custom", path="./best.pt")
# results_pandas structure
# xmin ymin xmax ymax confidence class name
cap = cv2.VideoCapture("./driving_video/driving3.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret:
img = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (WIDTH,HEIGHT))
results = get_prediction(img, model)
results.render()
processed_img = cv2.cvtColor(results.imgs[0], cv2.COLOR_BGR2RGB)
stop, processed_prediction = process_prediction(results.pandas().xyxy[0])
if stop:
print("#### PLEASE STOP ####")
cv2.imshow('Result', processed_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print('video is ended')
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
cap.release()
cv2.destroyAllWindows() | 26.977273 | 256 | 0.592249 | from matplotlib import pyplot as plt
import io
from PIL import Image
import cv2
import torch
import os
WIDTH = 1280
HEIGHT = 760
model = torch.hub.load("ultralytics/yolov5", "custom", path="./best.pt")
def get_prediction(img_bytes,model):
img = img_bytes
# inference
results = model(img, size=640)
return results
def isExistInDf(df, column, label):
return False if df.loc[df[column] == label].empty else True
def existDf(df, column, label):
return df.loc[df[column] == label]
# results_pandas structure
# xmin ymin xmax ymax confidence class name
def process_prediction(results_pandas):
labels = {"0":"biker", "1":"car", "2":"pedestrian", "3":"trafficLight", "4": "trafficLight-Green", "5":"trafficLight-GreenLeft", "6":"trafficLight-Red", "7":"trafficLight-RedLeft", "8":"trafficLight-Yellow", "9":"trafficLight-YellowLeft", "10":"truck"}
results = {}
confi_condition = results_pandas["confidence"]> 0.4
confi_result = results_pandas[confi_condition]
for label in labels.values():
if isExistInDf(confi_result, "name", label):
try:
# return with prediction position
labelDf = existDf(confi_result,"name", label)
labelDf_column = ["xmin", 'xmax', 'ymin', 'ymax']
labelDf = labelDf.loc[:, labelDf_column]
results[label] = labelDf.values.tolist()
finally:
pass
return len(results.keys()) != 0, results
cap = cv2.VideoCapture("./driving_video/driving3.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret:
img = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (WIDTH,HEIGHT))
results = get_prediction(img, model)
results.render()
processed_img = cv2.cvtColor(results.imgs[0], cv2.COLOR_BGR2RGB)
stop, processed_prediction = process_prediction(results.pandas().xyxy[0])
if stop:
print("#### PLEASE STOP ####")
cv2.imshow('Result', processed_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print('video is ended')
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
cap.release()
cv2.destroyAllWindows() | 1,211 | 0 | 92 |
18e703903ceb392bd36a5dee344aca973912c7ae | 1,287 | py | Python | controller.py | dhill2522/ChE436-project | 7ebdc1642a0dc0a25be09affbcdbb0a63099ea25 | [
"MIT"
] | null | null | null | controller.py | dhill2522/ChE436-project | 7ebdc1642a0dc0a25be09affbcdbb0a63099ea25 | [
"MIT"
] | null | null | null | controller.py | dhill2522/ChE436-project | 7ebdc1642a0dc0a25be09affbcdbb0a63099ea25 | [
"MIT"
] | null | null | null | import runs
import optimization as opt
| 33 | 88 | 0.614608 | import runs
import optimization as opt
class Controller(object):
def __init__(self):
# Initial PID and FOPDT parameters
self.K_c = 1.44
self.Tau_I = 221.925
self.Tau_D = 44.898
self.K_p = 0.14501294865265488
self.Tau_p = 159.4251614964272
self.Theta_P = 124.9997
def auto_tune(self):
# Run a step test
print('Running a doublet test on the system...')
runs.doublet_test(data_file='tuning_step_test.csv', show_plot=False)
# Fit the FOPDT parameters
print('Fitting FOPDT parameters to the data...')
sol = opt.optimize_parameters('tuning_step_test.csv')
self.K_p = sol['Kp']
self.Tau_p = sol['tauP']
self.Theta_P = sol['thetaP']
# Determine the PID tuning parameters
print('Determining initial PID tuning parameters')
tau_c = max(self.Tau_p, 8*self.Theta_P)
self.K_c = 1/self.K_p * (self.Tau_p + 0.5*self.Theta_P) / (tau_c + self.Theta_P)
self.Tau_I = self.Tau_p + 0.5*self.Theta_P
self.Tau_D = self.Tau_p*self.Theta_P / (2*self.Tau_p + self.Theta_P)
return
def run(self, run_time):
runs.run_controller(run_time, (self.K_c, self.Tau_I, self.Tau_D))
return
| 1,140 | 4 | 103 |