hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e22c1cc67e30dea2f2f744cddeec749b92661095 | 268 | py | Python | src/tests/add_block.py | TimmMoetz/blockchain-lab | 02bb55cc201586dbdc8fdc252a32381f525e83ff | [
"RSA-MD"
] | 2 | 2021-11-08T12:00:02.000Z | 2021-11-12T18:37:52.000Z | src/tests/add_block.py | TimmMoetz/blockchain-lab | 02bb55cc201586dbdc8fdc252a32381f525e83ff | [
"RSA-MD"
] | null | null | null | src/tests/add_block.py | TimmMoetz/blockchain-lab | 02bb55cc201586dbdc8fdc252a32381f525e83ff | [
"RSA-MD"
] | 1 | 2022-03-28T13:49:37.000Z | 2022-03-28T13:49:37.000Z | from src.blockchain.block import Transaction
from src.blockchain.blockchain import Blockchain
transactions = [Transaction("Justus", "Jonas", 10.0),
Transaction("Bernd", "Harald", 5.0)]
blockchain = Blockchain()
blockchain.add_block(transactions) | 38.285714 | 54 | 0.723881 | from src.blockchain.block import Transaction
from src.blockchain.blockchain import Blockchain
transactions = [Transaction("Justus", "Jonas", 10.0),
Transaction("Bernd", "Harald", 5.0)]
blockchain = Blockchain()
blockchain.add_block(transactions) | 0 | 0 | 0 |
ccd8c566aab41230d7340859b48f4b0424036776 | 1,225 | py | Python | kafka_test/producer2.py | bihire/Kafka_real_time_maps | f5a141fc17d2f4acfa324ec4be7f1b829872d4bd | [
"Apache-2.0"
] | null | null | null | kafka_test/producer2.py | bihire/Kafka_real_time_maps | f5a141fc17d2f4acfa324ec4be7f1b829872d4bd | [
"Apache-2.0"
] | null | null | null | kafka_test/producer2.py | bihire/Kafka_real_time_maps | f5a141fc17d2f4acfa324ec4be7f1b829872d4bd | [
"Apache-2.0"
] | null | null | null | from pykafka import KafkaClient
import json
from datetime import datetime
import uuid
import time
input_file = open('./data/bus2.json')
json_array = json.load(input_file)
coordinates = json_array['features'][0]['geometry']['coordinates']
# Generate uuid
# Kafaka producer
client = KafkaClient(hosts="localhost:9092")
topic = client.topics['bus-lines']
producer = topic.get_sync_producer()
# Generate all coordinates
generate_coordinates(coordinates)
print('every thing works fine')
| 22.685185 | 71 | 0.639184 | from pykafka import KafkaClient
import json
from datetime import datetime
import uuid
import time
input_file = open('./data/bus2.json')
json_array = json.load(input_file)
coordinates = json_array['features'][0]['geometry']['coordinates']
# Generate uuid
def generate_uuid():
return uuid.uuid4()
# Kafaka producer
client = KafkaClient(hosts="localhost:9092")
topic = client.topics['bus-lines']
producer = topic.get_sync_producer()
# Generate all coordinates
def generate_coordinates(coordinates):
# new_coordinates = []
i = 0
while i < len(coordinates):
data = {}
data['busline'] = 202
data['key'] = str(data['busline']) + '_' + str(generate_uuid())
data['time_stamp'] = str(datetime.utcnow())
data['longitude'] = coordinates[i][0]
data['latitude'] = coordinates[i][1]
message = json.dumps(data)
producer.produce(message.encode('ascii'))
time.sleep(1)
# If buses reaches last coordinaates
if i == len(coordinates)-1:
coordinates = coordinates[::-1]
i = 0
else:
i += 1
# return new_coordinates
generate_coordinates(coordinates)
print('every thing works fine')
| 685 | 0 | 45 |
7634bfdb722bfce166fb7f04830a5a2633da23c7 | 947 | py | Python | FINAL/controllers/ControllerStock.py | nan0te/Python-Algorithm-And-DataStructure | 7b7802b56d397c38f230f5efb687cedc6cc263f3 | [
"MIT"
] | null | null | null | FINAL/controllers/ControllerStock.py | nan0te/Python-Algorithm-And-DataStructure | 7b7802b56d397c38f230f5efb687cedc6cc263f3 | [
"MIT"
] | null | null | null | FINAL/controllers/ControllerStock.py | nan0te/Python-Algorithm-And-DataStructure | 7b7802b56d397c38f230f5efb687cedc6cc263f3 | [
"MIT"
] | null | null | null | from classes.DatabaseConnection import DatabaseConnection | 43.045455 | 126 | 0.571278 | from classes.DatabaseConnection import DatabaseConnection
class ControllerStock:
def addStock(self, stock):
conn = DatabaseConnection('localhost', 'root', 'qweqwe1', 'final')
try:
with conn.connection.cursor() as cursor:
query = "INSERT INTO stock VALUES (%s, %s)"
cursor.execute(query, (0, stock))
conn.connection.commit()
finally:
conn.connection.close()
def modificarStock(self, id, stock):
conn = DatabaseConnection('localhost', 'root', 'qweqwe1', 'final')
try:
with conn.connection.cursor() as cursor:
query = "UPDATE stock SET cantidad=%s WHERE idstock=(SELECT stock_idstock from producto WHERE idproducto=%s);"
cursor.execute(query, (stock, id))
conn.connection.commit()
finally:
conn.connection.close() | 813 | 1 | 76 |
dcc5a1848aff8b2230f7660468370debf714b44c | 1,272 | py | Python | taegis_sdk_python/services/investigations/investigations.py | secureworks/taegis-sdk-python | 195bbc2ca9738c9fad572ca2d22e6e9297fa9b35 | [
"Apache-2.0"
] | 1 | 2021-02-25T23:00:46.000Z | 2021-02-25T23:00:46.000Z | taegis_sdk_python/services/investigations/investigations.py | secureworks/taegis-sdk-python | 195bbc2ca9738c9fad572ca2d22e6e9297fa9b35 | [
"Apache-2.0"
] | null | null | null | taegis_sdk_python/services/investigations/investigations.py | secureworks/taegis-sdk-python | 195bbc2ca9738c9fad572ca2d22e6e9297fa9b35 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from taegis_sdk_python import ServiceCore
from taegis_sdk_python.services.investigations.mutations import InvestigationMutation
from taegis_sdk_python.services.investigations.queries import InvestigationQuery
| 35.333333 | 87 | 0.695755 | from datetime import datetime
from taegis_sdk_python import ServiceCore
from taegis_sdk_python.services.investigations.mutations import InvestigationMutation
from taegis_sdk_python.services.investigations.queries import InvestigationQuery
class InvestigationsService:
def __init__(self, query_builder: ServiceCore):
# --- Setting investigation endpoint
query_builder.service_endpoint = "/investigations/query"
self._queries = InvestigationQuery(query_builder)
self._mutations = InvestigationMutation(query_builder)
@staticmethod
def get_default_time_based_name(
prefix: str = "service-investigation",
date_pattern: str = '%Y-%m-%d %H:%M:%S.%f'
) -> str:
"""
Service method to create an item name based on time
:param prefix: a string that specifies the start of the name e.g. "taegis_auto"
:param date_pattern: optional desired date pattern
:return: the name of the item
"""
dt = datetime.utcnow().strftime(date_pattern)[:-3]
return f"{prefix} {dt}"
@property
def query(self) -> InvestigationQuery:
return self._queries
@property
def mutation(self) -> InvestigationMutation:
return self._mutations
| 357 | 651 | 23 |
fcc15f3e399e6eb7f92500a718e326ebe8f1235e | 8,292 | py | Python | src/python/algorithms/grasp.py | dvbuntu/cspy | f5d7fcfaebbc2cc7830f44c84b2aacb31a91c093 | [
"MIT"
] | null | null | null | src/python/algorithms/grasp.py | dvbuntu/cspy | f5d7fcfaebbc2cc7830f44c84b2aacb31a91c093 | [
"MIT"
] | null | null | null | src/python/algorithms/grasp.py | dvbuntu/cspy | f5d7fcfaebbc2cc7830f44c84b2aacb31a91c093 | [
"MIT"
] | null | null | null | from time import time
from math import factorial
from logging import getLogger
from collections import deque
from itertools import permutations, repeat
from random import sample, randint
from typing import List, Optional, Callable
from networkx import DiGraph
from numpy.random import choice
import numpy as np
# Local imports
from cspy.algorithms.path_base import PathBase
from cspy.checking import check_time_limit_breached
log = getLogger(__name__)
class GRASP(PathBase):
"""
Greedy Randomised Adaptive Search Procedure for the (resource) constrained
shortest path problem. Adapted from `Ferone et al 2019`_.
Parameters
----------
G : object instance :class:`nx.Digraph()`
must have ``n_res`` graph attribute and all edges must have
``res_cost`` attribute. Also, the number of nodes must be
:math:`\geq 5`.
max_res : list of floats
:math:`[M_1, M_2, ..., M_{n\_res}]` upper bounds for resource
usage.
min_res : list of floats
:math:`[L_1, L_2, ..., L_{n\_res}]` lower bounds for resource
usage.
preprocess : bool, optional
enables preprocessing routine. Default : False.
max_iter : int, optional
Maximum number of iterations for algorithm. Default : 100.
max_localiter : int, optional
Maximum number of local search iterations. Default : 10.
time_limit : int, optional
time limit in seconds.
Default: None
threshold : float, optional
specify a threshold for a an acceptable resource feasible path with
total cost <= threshold.
Note this typically causes the search to terminate early.
Default: None
alpha : float, optional
Greediness factor 0 (random) --> 1 (greedy). Default : 0.2.
REF_callback : REFCallback, optional
Custom resource extension callback. See `REFs`_ for more details.
Default : None
.. _REFs : https://cspy.readthedocs.io/en/latest/ref.html
.. _Ferone et al 2019: https://www.tandfonline.com/doi/full/10.1080/10556788.2018.1548015
Raises
------
Exception
if no resource feasible path is found
"""
def run(self):
"""
Calculate shortest path with resource constraints.
"""
start = time()
while (self.it < self.max_iter and not self.stop and
not check_time_limit_breached(start, self.time_limit)):
self._algorithm()
self.it += 1
if not self.best_solution.path:
raise Exception("No resource feasible path has been found")
def _check_path(self, solution=None):
"""
Returns True if solution.path is valid and resource feasible,
False otherwise
"""
if solution:
path, cost = solution.path, solution.cost
if (len(path) > 2 and cost < 1e10 and path[0] == 'Source' and
path[-1] == 'Sink'):
self.st_path = path
return self.check_feasibility(return_edge=False)
else:
return False
else:
return False
@staticmethod
def _find_alternative_paths(G, path, rng=None):
"""
Static Method used in local search to randomly generate valid paths.
Using a subset of edges, it generates a connected path starting at
the source node.
"""
# get all edges involving only these nodes
poss_edges = G.subgraph(path).edges()
if poss_edges:
sample_size = randint(1, len(poss_edges))
if rng:
tmp = np.empty(len(poss_edges), dtype='object')
tmp[:] = poss_edges
selection = rng.choice(tmp,
replace=False,
size=sample_size).tolist()
else:
selection = sample(deque(poss_edges), sample_size)
# will use last value tried with given key
path_edges = dict([edge for edge in selection if edge in G.edges()])
elem = 'Source' # start point in the new list
new_list = []
for _ in path_edges:
try:
new_list.append((elem, path_edges[elem]))
elem = path_edges[elem]
except KeyError:
pass
if new_list:
nodes_to_keep = [t[0] for t in new_list]
nodes_to_keep.append(new_list[-1][1])
else:
nodes_to_keep = []
else:
nodes_to_keep = []
return nodes_to_keep
class Solution(object):
"""
Object for solutions and candidates during GRASP iterations.
Parameters
----------
path : list
list of nodes in current path
cost : float
cost of solution
"""
| 33.844898 | 93 | 0.581404 | from time import time
from math import factorial
from logging import getLogger
from collections import deque
from itertools import permutations, repeat
from random import sample, randint
from typing import List, Optional, Callable
from networkx import DiGraph
from numpy.random import choice
import numpy as np
# Local imports
from cspy.algorithms.path_base import PathBase
from cspy.checking import check_time_limit_breached
log = getLogger(__name__)
class GRASP(PathBase):
"""
Greedy Randomised Adaptive Search Procedure for the (resource) constrained
shortest path problem. Adapted from `Ferone et al 2019`_.
Parameters
----------
G : object instance :class:`nx.Digraph()`
must have ``n_res`` graph attribute and all edges must have
``res_cost`` attribute. Also, the number of nodes must be
:math:`\geq 5`.
max_res : list of floats
:math:`[M_1, M_2, ..., M_{n\_res}]` upper bounds for resource
usage.
min_res : list of floats
:math:`[L_1, L_2, ..., L_{n\_res}]` lower bounds for resource
usage.
preprocess : bool, optional
enables preprocessing routine. Default : False.
max_iter : int, optional
Maximum number of iterations for algorithm. Default : 100.
max_localiter : int, optional
Maximum number of local search iterations. Default : 10.
time_limit : int, optional
time limit in seconds.
Default: None
threshold : float, optional
specify a threshold for a an acceptable resource feasible path with
total cost <= threshold.
Note this typically causes the search to terminate early.
Default: None
alpha : float, optional
Greediness factor 0 (random) --> 1 (greedy). Default : 0.2.
REF_callback : REFCallback, optional
Custom resource extension callback. See `REFs`_ for more details.
Default : None
.. _REFs : https://cspy.readthedocs.io/en/latest/ref.html
.. _Ferone et al 2019: https://www.tandfonline.com/doi/full/10.1080/10556788.2018.1548015
Raises
------
Exception
if no resource feasible path is found
"""
def __init__(self,
G: DiGraph,
max_res: List[float],
min_res: List[float],
preprocess: Optional[bool] = False,
max_iter: Optional[int] = 100,
max_localiter: Optional[int] = 10,
time_limit: Optional[int] = None,
threshold: Optional[float] = None,
alpha: Optional[float] = 0.2,
REF_callback=None):
# Pass arguments to parent class
super().__init__(G, max_res, min_res, preprocess, threshold,
REF_callback)
# Algorithm specific attributes
self.max_iter = max_iter
self.max_localiter = max_localiter
self.time_limit = time_limit
self.alpha = alpha
# Algorithm specific parameters
self.it = 0
self.stop = False
self.best_path = None
self.best_solution = None
self.nodes = self.G.nodes()
def run(self):
"""
Calculate shortest path with resource constraints.
"""
start = time()
while (self.it < self.max_iter and not self.stop and
not check_time_limit_breached(start, self.time_limit)):
self._algorithm()
self.it += 1
if not self.best_solution.path:
raise Exception("No resource feasible path has been found")
def _algorithm(self):
solution = self._construct()
solution = self._local_search(solution)
self._update_best(solution)
def _construct(self):
solution = Solution(sample(self.nodes, 1), 0) # Init solution
# Construction phase
while len(solution.path) < len(self.nodes):
candidates = [i for i in self.nodes if i not in solution.path]
weights = deque(
map(self._heuristic, repeat(solution.path[-1]), candidates))
# Build Restricted Candidiate List (RCL)
restriced_candidates = [
candidates[i]
for i, c in enumerate(weights)
if c <= (min(weights) + self.alpha *
(max(weights) - min(weights)))
]
# Select random node from RCL to add to the current solution
solution.path.append(choice(restriced_candidates))
solution.cost = self._cost_solution(solution)
return solution
def _local_search(self, solution):
for _ in range(self.max_localiter): # Local search phase
# Init candidate solution using random valid path generator
candidate = Solution(
self._find_alternative_paths(self.G, solution.path), 0)
# evaluate candidate solution
candidate.cost = self._cost_solution(candidate)
# Update solution with candidate if lower cost and resource feasible
if (candidate.path and candidate.cost < solution.cost and
self._check_path(candidate)):
solution = candidate
return solution
def _update_best(self, solution):
if not self.best_solution or solution.cost < self.best_solution.cost:
self.best_solution = solution
def _heuristic(self, i, j):
# Given a node pair returns a weight to apply
if i and j:
if (i, j) not in self.G.edges():
return 1e10
else:
return self.G.get_edge_data(i, j)['weight']
else:
return 1e10
def _cost_solution(self, solution=None):
if solution:
return sum(
self._heuristic(i, j)
for i, j in zip(solution.path, solution.path[1:]))
else:
return 1e11
def _check_path(self, solution=None):
"""
Returns True if solution.path is valid and resource feasible,
False otherwise
"""
if solution:
path, cost = solution.path, solution.cost
if (len(path) > 2 and cost < 1e10 and path[0] == 'Source' and
path[-1] == 'Sink'):
self.st_path = path
return self.check_feasibility(return_edge=False)
else:
return False
else:
return False
@staticmethod
def _find_alternative_paths(G, path, rng=None):
"""
Static Method used in local search to randomly generate valid paths.
Using a subset of edges, it generates a connected path starting at
the source node.
"""
# get all edges involving only these nodes
poss_edges = G.subgraph(path).edges()
if poss_edges:
sample_size = randint(1, len(poss_edges))
if rng:
tmp = np.empty(len(poss_edges), dtype='object')
tmp[:] = poss_edges
selection = rng.choice(tmp,
replace=False,
size=sample_size).tolist()
else:
selection = sample(deque(poss_edges), sample_size)
# will use last value tried with given key
path_edges = dict([edge for edge in selection if edge in G.edges()])
elem = 'Source' # start point in the new list
new_list = []
for _ in path_edges:
try:
new_list.append((elem, path_edges[elem]))
elem = path_edges[elem]
except KeyError:
pass
if new_list:
nodes_to_keep = [t[0] for t in new_list]
nodes_to_keep.append(new_list[-1][1])
else:
nodes_to_keep = []
else:
nodes_to_keep = []
return nodes_to_keep
class Solution(object):
"""
Object for solutions and candidates during GRASP iterations.
Parameters
----------
path : list
list of nodes in current path
cost : float
cost of solution
"""
def __init__(self, path, cost):
self.path = path
self.cost = cost
| 3,210 | 0 | 216 |
9c24168042929e4ba8251725ec7b04988cba41be | 203 | wsgi | Python | dash.wsgi | EljakimHerrewijnen/Project_5-6 | 219893588220eff4004efb09e755d0b864f56392 | [
"MIT"
] | null | null | null | dash.wsgi | EljakimHerrewijnen/Project_5-6 | 219893588220eff4004efb09e755d0b864f56392 | [
"MIT"
] | null | null | null | dash.wsgi | EljakimHerrewijnen/Project_5-6 | 219893588220eff4004efb09e755d0b864f56392 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/dash/")
from dash import app as application
application.secret_key = 'Add your secret key'
| 22.555556 | 46 | 0.778325 | #!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/dash/")
from dash import app as application
application.secret_key = 'Add your secret key'
| 0 | 0 | 0 |
ecb89e0fd7e71a14ad7d87305eac1673798be061 | 7,357 | py | Python | src/sima/post/iso19901_7filter.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/post/iso19901_7filter.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/post/iso19901_7filter.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | # This an autogenerated file
#
# Generated with ISO19901_7Filter
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.iso19901_7filter import ISO19901_7FilterBlueprint
from typing import Dict
from sima.post.consequenceclass import ConsequenceClass
from sima.post.controlsignalinputslot import ControlSignalInputSlot
from sima.post.inputslot import InputSlot
from sima.post.iso19901_7_analysis import ISO19901_7_analysis
from sima.post.mooringtype import MooringType
from sima.post.operationnode import OperationNode
from sima.post.outputslot import OutputSlot
from sima.sima.scriptablevalue import ScriptableValue
class ISO19901_7Filter(OperationNode):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
x : int
(default 0)
y : int
(default 0)
h : int
(default 0)
w : int
(default 0)
controlSignalInputSlots : List[ControlSignalInputSlot]
filterInputSlots : List[InputSlot]
filterOutputSlots : List[OutputSlot]
breakingStrength : float
Breaking strength(default 0.0)
customSafetyFactor : float
Safety factor(default 0.0)
analysis : ISO19901_7_analysis
mooringType : MooringType
consequenceClass : ConsequenceClass
useCustomSafetyFactor : bool
(default False)
"""
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return ISO19901_7FilterBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def x(self) -> int:
""""""
return self.__x
@x.setter
def x(self, value: int):
"""Set x"""
self.__x = int(value)
@property
def y(self) -> int:
""""""
return self.__y
@y.setter
def y(self, value: int):
"""Set y"""
self.__y = int(value)
@property
def h(self) -> int:
""""""
return self.__h
@h.setter
def h(self, value: int):
"""Set h"""
self.__h = int(value)
@property
def w(self) -> int:
""""""
return self.__w
@w.setter
def w(self, value: int):
"""Set w"""
self.__w = int(value)
@property
def controlSignalInputSlots(self) -> List[ControlSignalInputSlot]:
""""""
return self.__controlSignalInputSlots
@controlSignalInputSlots.setter
def controlSignalInputSlots(self, value: List[ControlSignalInputSlot]):
"""Set controlSignalInputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__controlSignalInputSlots = value
@property
def filterInputSlots(self) -> List[InputSlot]:
""""""
return self.__filterInputSlots
@filterInputSlots.setter
def filterInputSlots(self, value: List[InputSlot]):
"""Set filterInputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__filterInputSlots = value
@property
def filterOutputSlots(self) -> List[OutputSlot]:
""""""
return self.__filterOutputSlots
@filterOutputSlots.setter
def filterOutputSlots(self, value: List[OutputSlot]):
"""Set filterOutputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__filterOutputSlots = value
@property
def breakingStrength(self) -> float:
"""Breaking strength"""
return self.__breakingStrength
@breakingStrength.setter
def breakingStrength(self, value: float):
"""Set breakingStrength"""
self.__breakingStrength = float(value)
@property
def customSafetyFactor(self) -> float:
"""Safety factor"""
return self.__customSafetyFactor
@customSafetyFactor.setter
def customSafetyFactor(self, value: float):
"""Set customSafetyFactor"""
self.__customSafetyFactor = float(value)
@property
def analysis(self) -> ISO19901_7_analysis:
""""""
return self.__analysis
@analysis.setter
def analysis(self, value: ISO19901_7_analysis):
"""Set analysis"""
self.__analysis = value
@property
def mooringType(self) -> MooringType:
""""""
return self.__mooringType
@mooringType.setter
def mooringType(self, value: MooringType):
"""Set mooringType"""
self.__mooringType = value
@property
def consequenceClass(self) -> ConsequenceClass:
""""""
return self.__consequenceClass
@consequenceClass.setter
def consequenceClass(self, value: ConsequenceClass):
"""Set consequenceClass"""
self.__consequenceClass = value
@property
def useCustomSafetyFactor(self) -> bool:
""""""
return self.__useCustomSafetyFactor
@useCustomSafetyFactor.setter
def useCustomSafetyFactor(self, value: bool):
"""Set useCustomSafetyFactor"""
self.__useCustomSafetyFactor = bool(value)
| 28.515504 | 297 | 0.633954 | # This an autogenerated file
#
# Generated with ISO19901_7Filter
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.iso19901_7filter import ISO19901_7FilterBlueprint
from typing import Dict
from sima.post.consequenceclass import ConsequenceClass
from sima.post.controlsignalinputslot import ControlSignalInputSlot
from sima.post.inputslot import InputSlot
from sima.post.iso19901_7_analysis import ISO19901_7_analysis
from sima.post.mooringtype import MooringType
from sima.post.operationnode import OperationNode
from sima.post.outputslot import OutputSlot
from sima.sima.scriptablevalue import ScriptableValue
class ISO19901_7Filter(OperationNode):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
x : int
(default 0)
y : int
(default 0)
h : int
(default 0)
w : int
(default 0)
controlSignalInputSlots : List[ControlSignalInputSlot]
filterInputSlots : List[InputSlot]
filterOutputSlots : List[OutputSlot]
breakingStrength : float
Breaking strength(default 0.0)
customSafetyFactor : float
Safety factor(default 0.0)
analysis : ISO19901_7_analysis
mooringType : MooringType
consequenceClass : ConsequenceClass
useCustomSafetyFactor : bool
(default False)
"""
def __init__(self , name="", description="", _id="", x=0, y=0, h=0, w=0, breakingStrength=0.0, customSafetyFactor=0.0, analysis=ISO19901_7_analysis.INTACT_CONDITION, mooringType=MooringType.PERMANENT_MOORING, consequenceClass=ConsequenceClass.CLASS_ONE, useCustomSafetyFactor=False, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.x = x
self.y = y
self.h = h
self.w = w
self.controlSignalInputSlots = list()
self.filterInputSlots = list()
self.filterOutputSlots = list()
self.breakingStrength = breakingStrength
self.customSafetyFactor = customSafetyFactor
self.analysis = analysis
self.mooringType = mooringType
self.consequenceClass = consequenceClass
self.useCustomSafetyFactor = useCustomSafetyFactor
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return ISO19901_7FilterBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def x(self) -> int:
""""""
return self.__x
@x.setter
def x(self, value: int):
"""Set x"""
self.__x = int(value)
@property
def y(self) -> int:
""""""
return self.__y
@y.setter
def y(self, value: int):
"""Set y"""
self.__y = int(value)
@property
def h(self) -> int:
""""""
return self.__h
@h.setter
def h(self, value: int):
"""Set h"""
self.__h = int(value)
@property
def w(self) -> int:
""""""
return self.__w
@w.setter
def w(self, value: int):
"""Set w"""
self.__w = int(value)
@property
def controlSignalInputSlots(self) -> List[ControlSignalInputSlot]:
""""""
return self.__controlSignalInputSlots
@controlSignalInputSlots.setter
def controlSignalInputSlots(self, value: List[ControlSignalInputSlot]):
"""Set controlSignalInputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__controlSignalInputSlots = value
@property
def filterInputSlots(self) -> List[InputSlot]:
""""""
return self.__filterInputSlots
@filterInputSlots.setter
def filterInputSlots(self, value: List[InputSlot]):
"""Set filterInputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__filterInputSlots = value
@property
def filterOutputSlots(self) -> List[OutputSlot]:
""""""
return self.__filterOutputSlots
@filterOutputSlots.setter
def filterOutputSlots(self, value: List[OutputSlot]):
"""Set filterOutputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__filterOutputSlots = value
@property
def breakingStrength(self) -> float:
"""Breaking strength"""
return self.__breakingStrength
@breakingStrength.setter
def breakingStrength(self, value: float):
"""Set breakingStrength"""
self.__breakingStrength = float(value)
@property
def customSafetyFactor(self) -> float:
"""Safety factor"""
return self.__customSafetyFactor
@customSafetyFactor.setter
def customSafetyFactor(self, value: float):
"""Set customSafetyFactor"""
self.__customSafetyFactor = float(value)
@property
def analysis(self) -> ISO19901_7_analysis:
""""""
return self.__analysis
@analysis.setter
def analysis(self, value: ISO19901_7_analysis):
"""Set analysis"""
self.__analysis = value
@property
def mooringType(self) -> MooringType:
""""""
return self.__mooringType
@mooringType.setter
def mooringType(self, value: MooringType):
"""Set mooringType"""
self.__mooringType = value
@property
def consequenceClass(self) -> ConsequenceClass:
""""""
return self.__consequenceClass
@consequenceClass.setter
def consequenceClass(self, value: ConsequenceClass):
"""Set consequenceClass"""
self.__consequenceClass = value
@property
def useCustomSafetyFactor(self) -> bool:
""""""
return self.__useCustomSafetyFactor
@useCustomSafetyFactor.setter
def useCustomSafetyFactor(self, value: bool):
"""Set useCustomSafetyFactor"""
self.__useCustomSafetyFactor = bool(value)
| 1,044 | 0 | 27 |
ed0efe028d7407c561a7706d3ca7cfd640997727 | 1,609 | py | Python | wstools/dl_book.py | inductiveload/wstools | b354a642b10a8d1bfa2a7683d2270c42512cb25d | [
"MIT"
] | null | null | null | wstools/dl_book.py | inductiveload/wstools | b354a642b10a8d1bfa2a7683d2270c42512cb25d | [
"MIT"
] | null | null | null | wstools/dl_book.py | inductiveload/wstools | b354a642b10a8d1bfa2a7683d2270c42512cb25d | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import argparse
import logging
import dotenv
import dl_hathi
import dl_ia
if __name__ == "__main__":
main()
| 27.271186 | 92 | 0.630205 | #! /usr/bin/env python3
import argparse
import logging
import dotenv
import dl_hathi
import dl_ia
class DlDef():
def __init__(self, src, id, filename):
self.src = src.lower()
self.id = id
self.filename = filename
self.skip_existing = False
self.use_proxy = False
def do_download(dl, dl_dir):
if dl.src == 'ht':
dl_hathi.download(dl.id, dl_dir, skip_existing=dl.skip_existing,
proxy=dl.use_proxy)
return True
elif dl.src == 'ia':
return dl_ia.download(dl.id, dl_dir, skip_existing=dl.skip_existing, skip_djvu=True)
else:
raise NotImplementedError
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-v', '--verbose', action='store_true',
help='show debugging information')
parser.add_argument('-i', '--id',
help='The source identifier')
parser.add_argument('-s', '--src',
help='The source key (e.g. ia for Internet Archive)')
parser.add_argument('-o', '--outdir',
help='The output dir')
args = parser.parse_args()
dotenv.load_dotenv()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("oauthlib").setLevel(logging.WARNING)
logging.getLogger("requests_oauthlib").setLevel(logging.WARNING)
if __name__ == "__main__":
main()
| 1,378 | -7 | 95 |
e67268989913f9d423b937c3ad58d1726ae4048f | 5,280 | py | Python | venv/Lib/site-packages/PySide2/examples/tutorial/t8.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 41 | 2021-06-19T13:57:18.000Z | 2021-12-02T17:08:53.000Z | venv/Lib/site-packages/PySide2/examples/tutorial/t8.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | venvWIN/Lib/site-packages/PySide2/examples/tutorial/t8.py | NeroNekro/PortableController | a8bbfc1b6c8cb2c919e48eb0104e42f436059b18 | [
"BSD-3-Clause"
] | 4 | 2021-07-02T03:09:51.000Z | 2021-11-25T13:00:10.000Z |
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
# PySide2 tutorial 8
import sys
from PySide2 import QtCore, QtGui, QtWidgets
app = QtWidgets.QApplication(sys.argv)
widget = MyWidget()
widget.setGeometry(100, 100, 500, 355)
widget.show()
sys.exit(app.exec_())
| 34.509804 | 94 | 0.644129 |
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
# PySide2 tutorial 8
import sys
from PySide2 import QtCore, QtGui, QtWidgets
class LCDRange(QtWidgets.QWidget):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
lcd = QtWidgets.QLCDNumber(2)
self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider.setRange(0, 99)
self.slider.setValue(0)
self.connect(self.slider, QtCore.SIGNAL("valueChanged(int)"),
lcd, QtCore.SLOT("display(int)"))
self.connect(self.slider, QtCore.SIGNAL("valueChanged(int)"),
self, QtCore.SIGNAL("valueChanged(int)"))
layout = QtWidgets.QVBoxLayout()
layout.addWidget(lcd)
layout.addWidget(self.slider)
self.setLayout(layout)
self.setFocusProxy(self.slider)
def value(self):
return self.slider.value()
@QtCore.Slot(int)
def setValue(self, value):
self.slider.setValue(value)
def setRange(self, minValue, maxValue):
if minValue < 0 or maxValue > 99 or minValue > maxValue:
QtCore.qWarning("LCDRange.setRange(%d, %d)\n"
"\tRange must be 0..99\n"
"\tand minValue must not be greater than maxValue" % (minValue, maxValue))
return
self.slider.setRange(minValue, maxValue)
class CannonField(QtWidgets.QWidget):
angleChanged = QtCore.Signal(int)
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.currentAngle = 45
self.setPalette(QtGui.QPalette(QtGui.QColor(250, 250, 200)))
self.setAutoFillBackground(True)
def angle(self):
return self.currentAngle
@QtCore.Slot(int)
def setAngle(self, angle):
if angle < 5:
angle = 5
if angle > 70:
angle = 70
if self.currentAngle == angle:
return
self.currentAngle = angle
self.update()
self.emit(QtCore.SIGNAL("angleChanged(int)"), self.currentAngle)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawText(200, 200, "Angle = %d" % self.currentAngle)
class MyWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
quit = QtWidgets.QPushButton("Quit")
quit.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
self.connect(quit, QtCore.SIGNAL("clicked()"),
qApp, QtCore.SLOT("quit()"))
angle = LCDRange()
angle.setRange(5, 70)
cannonField = CannonField()
self.connect(angle, QtCore.SIGNAL("valueChanged(int)"),
cannonField.setAngle)
self.connect(cannonField, QtCore.SIGNAL("angleChanged(int)"),
angle.setValue)
gridLayout = QtWidgets.QGridLayout()
gridLayout.addWidget(quit, 0, 0)
gridLayout.addWidget(angle, 1, 0)
gridLayout.addWidget(cannonField, 1, 1, 2, 1)
gridLayout.setColumnStretch(1, 10)
self.setLayout(gridLayout)
angle.setValue(60)
angle.setFocus()
app = QtWidgets.QApplication(sys.argv)
widget = MyWidget()
widget.setGeometry(100, 100, 500, 355)
widget.show()
sys.exit(app.exec_())
| 2,620 | 376 | 95 |
46e2b726a4e22ae3ad1a647b3a4d43ba1c89a9a6 | 215 | py | Python | tsp_solvers/methods/__init__.py | ggsdc/tsp-examples | 85976dc325bacec79f0f3503870fedfcd98167a6 | [
"MIT"
] | 2 | 2020-06-29T18:52:50.000Z | 2020-09-28T23:00:53.000Z | tsp_solvers/methods/__init__.py | ggsdc/tsp-solvers | 85976dc325bacec79f0f3503870fedfcd98167a6 | [
"MIT"
] | 1 | 2022-03-12T00:59:20.000Z | 2022-03-12T00:59:20.000Z | tsp_solvers/methods/__init__.py | ggsdc/tsp-examples | 85976dc325bacec79f0f3503870fedfcd98167a6 | [
"MIT"
] | 1 | 2021-03-16T02:40:15.000Z | 2021-03-16T02:40:15.000Z | from .aco import AntColonyOptimization
from .ga import GeneticAlgorithm
from .lp import LinearIntegerProgram
from .pso import ParticleSwarmOptimization
from .sa import SimulatedAnnealing
from .two_opt import TwoOpt
| 30.714286 | 42 | 0.860465 | from .aco import AntColonyOptimization
from .ga import GeneticAlgorithm
from .lp import LinearIntegerProgram
from .pso import ParticleSwarmOptimization
from .sa import SimulatedAnnealing
from .two_opt import TwoOpt
| 0 | 0 | 0 |
c8a53aefbc50e3861b60a90b536faa2da9e29b0c | 10,609 | py | Python | src/model/engine.py | yohann84L/faster_rcnn_test_case | c960790629462a94c1934c8efc59e494d392410c | [
"MIT"
] | null | null | null | src/model/engine.py | yohann84L/faster_rcnn_test_case | c960790629462a94c1934c8efc59e494d392410c | [
"MIT"
] | null | null | null | src/model/engine.py | yohann84L/faster_rcnn_test_case | c960790629462a94c1934c8efc59e494d392410c | [
"MIT"
] | null | null | null | import math
import sys
import time
import torch
from ..utils import utils
from ..utils.metric_logger import MetricLogger, SmoothedValue
@torch.no_grad()
import numpy as np
def get_model_scores(pred_boxes):
"""Creates a dictionary of from model_scores to image ids.
Args:
pred_boxes (dict): dict of dicts of 'boxes' and 'scores'
Returns:
dict: keys are model_scores and values are image ids (usually filenames)
"""
model_score = {}
for img_id, val in pred_boxes.items():
for score in val['scores']:
if score not in model_score.keys():
model_score[score] = [img_id]
else:
model_score[score].append(img_id)
return model_score
def calc_precision_recall(image_results):
"""Calculates precision and recall from the set of images
Args:
img_results (dict): dictionary formatted like:
{
'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},
'img_id2': ...
...
}
Returns:
tuple: of floats of (precision, recall)
"""
tp, fp, fn = 0, 0, 0
precision, recall = 0, 0
for img_id, res in image_results.items():
tp += res['TP']
fp += res['FP']
fn += res['FN']
try:
precision = tp / (tp + fp)
except ZeroDivisionError:
precision = 0.0
try:
recall = tp / (tp + fn)
except ZeroDivisionError:
recall = 0.0
return precision, recall
def get_single_image_results(gt_boxes, pred_boxes, iou_thr):
"""Calculates number of true_pos, false_pos, false_neg from single batch of boxes.
Args:
gt_boxes (list of list of floats): list of locations of ground truth
objects as [xmin, ymin, xmax, ymax]
pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)
and 'scores'
iou_thr (float): value of IoU to consider as threshold for a
true prediction.
Returns:
dict: true positives (int), false positives (int), false negatives (int)
"""
all_pred_indices = range(len(pred_boxes))
all_gt_indices = range(len(gt_boxes))
if len(all_pred_indices) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
if len(all_gt_indices) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
gt_idx_thr = []
pred_idx_thr = []
ious = []
for ipb, pred_box in enumerate(pred_boxes):
for igb, gt_box in enumerate(gt_boxes):
iou = calc_iou(gt_box, pred_box)
if iou > iou_thr:
gt_idx_thr.append(igb)
pred_idx_thr.append(ipb)
ious.append(iou)
iou_sort = np.argsort(ious)[::1]
if len(iou_sort) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
else:
gt_match_idx = []
pred_match_idx = []
for idx in iou_sort:
gt_idx = gt_idx_thr[idx]
pr_idx = pred_idx_thr[idx]
# If the boxes are unmatched, add them to matches
if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):
gt_match_idx.append(gt_idx)
pred_match_idx.append(pr_idx)
tp = len(gt_match_idx)
fp = len(pred_boxes) - len(pred_match_idx)
fn = len(gt_boxes) - len(gt_match_idx)
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
| 36.965157 | 117 | 0.625695 | import math
import sys
import time
import torch
from ..utils import utils
from ..utils.metric_logger import MetricLogger, SmoothedValue
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, writer):
model.train()
metric_logger = MetricLogger(delimiter=" ", writer=writer)
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
epoch_results = {}
lr_scheduler = None
if epoch == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(data_loader, print_freq, epoch, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training:\n{}".format(loss_value, loss_dict))
sys.exit(1)
optimizer.zero_grad()
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
@torch.no_grad()
def evaluate(model, data_loader, device, writer, epoch, threshold=0.5):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = MetricLogger(delimiter=" ", writer=writer)
header = 'Test:'
total, correct = 0, 0
for image, targets in metric_logger.log_every(data_loader, 50, epoch=epoch, header=header):
image = list(img.to(device) for img in image)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
targets_labels = torch.as_tensor([int(1 in target["labels"]) for target in targets], dtype=torch.int8)
torch.cuda.synchronize()
model_time = time.time()
outputs = model(image)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
# Filter score only superior as threshold=0.5
outputs_filtred = []
for output in outputs:
output["labels"] = output["labels"][output["scores"] >= threshold]
# output["scores"] = output["scores"][output["scores"] >= threshold]
if 1 in output["labels"]:
outputs_filtred.append(1)
outputs_filtred = torch.as_tensor(outputs_filtred, dtype=torch.int8)
model_time = time.time() - model_time
total += len(image)
correct += (targets_labels == outputs_filtred).sum().item()
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
metric_logger.update(model_time=model_time)
print("Test accuracy :", correct / total)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
torch.set_num_threads(n_threads)
writer.add_scalar("Accuracy/eval", correct / total, epoch)
import numpy as np
def get_model_scores(pred_boxes):
"""Creates a dictionary of from model_scores to image ids.
Args:
pred_boxes (dict): dict of dicts of 'boxes' and 'scores'
Returns:
dict: keys are model_scores and values are image ids (usually filenames)
"""
model_score = {}
for img_id, val in pred_boxes.items():
for score in val['scores']:
if score not in model_score.keys():
model_score[score] = [img_id]
else:
model_score[score].append(img_id)
return model_score
def iou_pytorch(outputs: torch.Tensor, labels: torch.Tensor):
SMOOTH = 1e-6
# You can comment out this line if you are passing tensors of equal shape
# But if you are passing output from UNet or something it will most probably
# be with the BATCH x 1 x H x W shape
outputs = outputs.squeeze(1) # BATCH x 1 x H x W => BATCH x H x W
intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0
union = (outputs | labels).float().sum((1, 2)) # Will be zzero if both are 0
iou = (intersection + SMOOTH) / (union + SMOOTH) # We smooth our devision to avoid 0/0
thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10 # This is equal to comparing with thresolds
return thresholded # Or thresholded.mean() if you are interested in average across the batch
def calc_precision_recall(image_results):
"""Calculates precision and recall from the set of images
Args:
img_results (dict): dictionary formatted like:
{
'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},
'img_id2': ...
...
}
Returns:
tuple: of floats of (precision, recall)
"""
tp, fp, fn = 0, 0, 0
precision, recall = 0, 0
for img_id, res in image_results.items():
tp += res['TP']
fp += res['FP']
fn += res['FN']
try:
precision = tp / (tp + fp)
except ZeroDivisionError:
precision = 0.0
try:
recall = tp / (tp + fn)
except ZeroDivisionError:
recall = 0.0
return precision, recall
def get_single_image_results(gt_boxes, pred_boxes, iou_thr):
"""Calculates number of true_pos, false_pos, false_neg from single batch of boxes.
Args:
gt_boxes (list of list of floats): list of locations of ground truth
objects as [xmin, ymin, xmax, ymax]
pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)
and 'scores'
iou_thr (float): value of IoU to consider as threshold for a
true prediction.
Returns:
dict: true positives (int), false positives (int), false negatives (int)
"""
all_pred_indices = range(len(pred_boxes))
all_gt_indices = range(len(gt_boxes))
if len(all_pred_indices) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
if len(all_gt_indices) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
gt_idx_thr = []
pred_idx_thr = []
ious = []
for ipb, pred_box in enumerate(pred_boxes):
for igb, gt_box in enumerate(gt_boxes):
iou = calc_iou(gt_box, pred_box)
if iou > iou_thr:
gt_idx_thr.append(igb)
pred_idx_thr.append(ipb)
ious.append(iou)
iou_sort = np.argsort(ious)[::1]
if len(iou_sort) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
else:
gt_match_idx = []
pred_match_idx = []
for idx in iou_sort:
gt_idx = gt_idx_thr[idx]
pr_idx = pred_idx_thr[idx]
# If the boxes are unmatched, add them to matches
if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):
gt_match_idx.append(gt_idx)
pred_match_idx.append(pr_idx)
tp = len(gt_match_idx)
fp = len(pred_boxes) - len(pred_match_idx)
fn = len(gt_boxes) - len(gt_match_idx)
return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}
def get_avg_precision_at_iou(gt_boxes, pred_bb, iou_thr=0.5):
model_scores = get_model_scores(pred_bb)
sorted_model_scores = sorted(model_scores.keys())
# Sort the predicted boxes in descending order (lowest scoring boxes first):
for img_id in pred_bb.keys():
arg_sort = np.argsort(pred_bb[img_id]['scores'])
pred_bb[img_id]['scores'] = np.array(pred_bb[img_id]['scores'])[arg_sort].tolist()
pred_bb[img_id]['boxes'] = np.array(pred_bb[img_id]['boxes'])[arg_sort].tolist()
pred_boxes_pruned = deepcopy(pred_bb)
precisions = []
recalls = []
model_thrs = []
img_results = {}
# Loop over model score thresholds and calculate precision, recall
for ithr, model_score_thr in enumerate(sorted_model_scores[:-1]):
# On first iteration, define img_results for the first time:
print("Mode score : ", model_score_thr)
img_ids = gt_boxes.keys() if ithr == 0 else model_scores[model_score_thr]
for img_id in img_ids:
gt_boxes_img = gt_boxes[img_id]
box_scores = pred_boxes_pruned[img_id]['scores']
start_idx = 0
for score in box_scores:
if score <= model_score_thr:
pred_boxes_pruned[img_id]
start_idx += 1
else:
break
# Remove boxes, scores of lower than threshold scores:
pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:]
pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:]
# Recalculate image results for this image
print(img_id)
img_results[img_id] = get_single_image_results(gt_boxes_img, pred_boxes_pruned[img_id]['boxes'], iou_thr=0.5)
# calculate precision and recall
prec, rec = calc_precision_recall(img_results)
precisions.append(prec)
recalls.append(rec)
model_thrs.append(model_score_thr)
precisions = np.array(precisions)
recalls = np.array(recalls)
prec_at_rec = []
for recall_level in np.linspace(0.0, 1.0, 11):
try:
args = np.argwhere(recalls > recall_level).flatten()
prec = max(precisions[args])
print(recalls, "Recall")
print(recall_level, "Recall Level")
print(args, "Args")
print(prec, "precision")
except ValueError:
prec = 0.0
prec_at_rec.append(prec)
avg_prec = np.mean(prec_at_rec)
return {
'avg_prec': avg_prec,
'precisions': precisions,
'recalls': recalls,
'model_thrs': model_thrs}
| 6,815 | 0 | 91 |
ab7e2592e2099f09a239c3fd0328d7e0d181bf79 | 856 | py | Python | context.py | robertdale/amqp-demo | 89e8cd9597e2101ed43ce3ff071b1adcb511c54f | [
"Apache-2.0"
] | null | null | null | context.py | robertdale/amqp-demo | 89e8cd9597e2101ed43ce3ff071b1adcb511c54f | [
"Apache-2.0"
] | null | null | null | context.py | robertdale/amqp-demo | 89e8cd9597e2101ed43ce3ff071b1adcb511c54f | [
"Apache-2.0"
] | 1 | 2018-10-22T09:11:34.000Z | 2018-10-22T09:11:34.000Z | from springpython.context import *
from springpython.config import *
from pika_client import *
from amqplib_client import *
from ticker_system import *
from buy_low_sell_high import *
| 30.571429 | 67 | 0.714953 | from springpython.context import *
from springpython.config import *
from pika_client import *
from amqplib_client import *
from ticker_system import *
from buy_low_sell_high import *
class AppContext(PythonConfig):
def __init__(self):
PythonConfig.__init__(self)
self.exchange_name = "my_exchange"
@Object(scope.PROTOTYPE, lazy_init=True)
def rabbitmq_publisher(self):
#return PikaPublisher(exchange_name=self.exchange_name)
return PyAmqpLibPublisher(exchange_name=self.exchange_name)
@Object(scope.PROTOTYPE, lazy_init=True)
def rabbitmq_listener(self):
buyer = Buyer(self.rabbitmq_publisher(), "", trend=25)
print "Buyer = %s" % id(buyer)
return buyer
@Object(scope.PROTOTYPE, lazy_init=True)
def ticker(self):
return Ticker(self.rabbitmq_publisher(), "")
| 396 | 252 | 23 |
8c182e3c729a12301bd584c5f59f07b9a49cfb11 | 2,068 | py | Python | examples/lcd-demo/lib/widgets/button.py | fragmuffin/howto-micropython | 3ad04e4c9d1a784daba8d636a2f39cc3d31f67ab | [
"MIT"
] | 1 | 2018-03-08T20:15:24.000Z | 2018-03-08T20:15:24.000Z | examples/lcd-demo/lib/widgets/button.py | fragmuffin/howto-micropython | 3ad04e4c9d1a784daba8d636a2f39cc3d31f67ab | [
"MIT"
] | null | null | null | examples/lcd-demo/lib/widgets/button.py | fragmuffin/howto-micropython | 3ad04e4c9d1a784daba8d636a2f39cc3d31f67ab | [
"MIT"
] | 2 | 2017-12-12T11:09:39.000Z | 2021-06-18T11:17:50.000Z | from colors import *
from utils import font_height
| 28.328767 | 71 | 0.542553 | from colors import *
from utils import font_height
class Button:
color_line = WHITE
color_fill = GREY_20
text_color = WHITE
font_index = 1
font_scale = 0
def __init__(self, lcd, x1, y1, x2, y2, label="Button"):
self.lcd = lcd
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
self.render()
@property
def xywh(self):
return (self.x1, self.y1, self.x2 - self.x1, self.y2 - self.y1)
def render(self):
# Box
self.lcd.set_pen(self.color_line, self.color_fill)
xywh = self.xywh
self.lcd.rect_interior(*xywh)
self.lcd.rect(*xywh)
# Label, center justified
self.lcd.set_font(
self.font_index,
scale=self.font_scale,
trans=1,
)
self.lcd.set_text_color(self.text_color, BLACK)
height = font_height(self.font_index, self.font_scale)
label_width = height * len(self.label)
center_x = (self.x1 + self.x2) / 2
center_y = (self.y1 + self.y2) / 2
self.lcd.set_pos(
int(center_x - (label_width / 2)),
int(center_y - (height / 2))
)
self.lcd.write(self.label)
def is_pressed(self, touching=None, x=0, y=0):
"""
Return if the button is pressed or not.
>>> btn.is_pressed() # calls lcd
alternatively, for multiple buttons
>>> touch = btn.lcd.get_touch()
>>> btn1.is_pressed(*touch)
>>> btn2.is_pressed(*touch)
:param touching: True if lcd is being touched
:param x: x coordinate
:param y: y coordinate
:return: True if button is being pressed, False otherwise
"""
if touching is None:
(touching, x, y) = self.lcd.get_touch()
if touching:
if not (self.y1 < y < self.y2):
return False
if not (self.x1 < x < self.x2):
return False
return True
return False
| 987 | 1,007 | 23 |
36b2d251b7c9806e85eed9b3df8ea233472d5695 | 3,489 | py | Python | SNACC_Text/OCR & BoW.py | emariwileslee/SNACC | 02bd1826eb77b6a397446d7db503d449dc2102a7 | [
"MIT"
] | null | null | null | SNACC_Text/OCR & BoW.py | emariwileslee/SNACC | 02bd1826eb77b6a397446d7db503d449dc2102a7 | [
"MIT"
] | null | null | null | SNACC_Text/OCR & BoW.py | emariwileslee/SNACC | 02bd1826eb77b6a397446d7db503d449dc2102a7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[51]:
# Generic Libraries
from PIL import Image
from keras.preprocessing.text import Tokenizer
import os
import pandas as pd
import numpy as np
import re,string,unicodedata
import cv2
import requests
import csv
import pickle
#Tesseract Library
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
#Warnings
import warnings
warnings.filterwarnings("ignore")
#Garbage Collection
import gc
#Gensim Library for Text Processing
import gensim.parsing.preprocessing as gsp
from gensim.parsing.preprocessing import remove_stopwords
from gensim import utils
#TextBlob Library (Sentiment Analysis)
from textblob import TextBlob, Word
#Plotting Libraries
import matplotlib.pyplot as plt
import seaborn as sns
# In[52]:
#Define Directory Path
#sample_images = r'C:\Users\calli\Downloads\train_images'
test_images = r'C:\Users\calli\Documents\MATLAB\archive\bow_sample'
# In[53]:
#Custom Function to Traverse the folder
# In[54]:
#Traversing the folders
#traverse(sample_images)
traverse(test_images)
# In[55]:
ex_txt = [] #list to store the extracted text
txt4bow = [] #list to use for bag of words
#Function to Extract Text
def TxtExtract(directory):
"""
This function will handle the core OCR processing of images.
"""
for subdir, dirs, files in os.walk(directory):
for file in files:
filepath = subdir + os.sep + file
img_cv = cv2.imread(filepath)
img_rgb = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
text = pytesseract.image_to_string(img_rgb)
x = re.sub(r'\n{2, 10}', '\n', text)
ifspace = text.isspace()
if ifspace == True:
print(file)
print("image does not have text")
else:
print(file)
ex_txt.extend([[file, filepath, (x.rstrip("\n"))]])
txt4bow.extend([text])
print(x.rstrip("\n"))
fol_nm = os.path.split(os.path.dirname(subdir))[-1]
print(f"Text Extracted from the files in '{fol_nm}' folder & saved to list..")
# In[56]:
#Extracting Text from JPG files in Sample Image Folder
#TxtExtract(sample_images)
#Extracting Text from JPG files in Dataset Folder
TxtExtract(test_images)
# In[57]:
with open('OCR.csv', 'w', newline='', encoding='utf-8') as f:
header = ['FileName', 'Filepath', 'Text']
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(ex_txt)
# In[62]:
#BOW
filtered_txt = []
for n in range(len(txt4bow)):
#remove stopwords
filtered_sentence = remove_stopwords(txt4bow[n])
filtered_txt.extend([filtered_sentence])
# using tokenizer
model = Tokenizer()
model.fit_on_texts(filtered_txt)
keys = model.word_index.keys()
#print keys
print(f'keys: {list(keys)}\n')
#create bag of words representation
rep = model.texts_to_matrix(filtered_txt, mode='count')
print(rep)
# In[63]:
with open('BOW.csv', 'w', newline='', encoding='utf-8') as f:
header = [keys]
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(rep)
# In[85]:
#Free up memory
gc.collect()
# In[ ]:
| 19.49162 | 87 | 0.659215 | #!/usr/bin/env python
# coding: utf-8
# In[51]:
# Generic Libraries
from PIL import Image
from keras.preprocessing.text import Tokenizer
import os
import pandas as pd
import numpy as np
import re,string,unicodedata
import cv2
import requests
import csv
import pickle
#Tesseract Library
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
#Warnings
import warnings
warnings.filterwarnings("ignore")
#Garbage Collection
import gc
#Gensim Library for Text Processing
import gensim.parsing.preprocessing as gsp
from gensim.parsing.preprocessing import remove_stopwords
from gensim import utils
#TextBlob Library (Sentiment Analysis)
from textblob import TextBlob, Word
#Plotting Libraries
import matplotlib.pyplot as plt
import seaborn as sns
# In[52]:
#Define Directory Path
#sample_images = r'C:\Users\calli\Downloads\train_images'
test_images = r'C:\Users\calli\Documents\MATLAB\archive\bow_sample'
# In[53]:
#Custom Function to Traverse the folder
def traverse(directory):
path, dirs, files = next(os.walk(directory))
fol_nm = os.path.split(os.path.dirname(path))[-1]
print(f'Number of files found in "{fol_nm}" : ',len(files))
# In[54]:
#Traversing the folders
#traverse(sample_images)
traverse(test_images)
# In[55]:
ex_txt = [] #list to store the extracted text
txt4bow = [] #list to use for bag of words
#Function to Extract Text
def TxtExtract(directory):
"""
This function will handle the core OCR processing of images.
"""
for subdir, dirs, files in os.walk(directory):
for file in files:
filepath = subdir + os.sep + file
img_cv = cv2.imread(filepath)
img_rgb = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
text = pytesseract.image_to_string(img_rgb)
x = re.sub(r'\n{2, 10}', '\n', text)
ifspace = text.isspace()
if ifspace == True:
print(file)
print("image does not have text")
else:
print(file)
ex_txt.extend([[file, filepath, (x.rstrip("\n"))]])
txt4bow.extend([text])
print(x.rstrip("\n"))
fol_nm = os.path.split(os.path.dirname(subdir))[-1]
print(f"Text Extracted from the files in '{fol_nm}' folder & saved to list..")
# In[56]:
#Extracting Text from JPG files in Sample Image Folder
#TxtExtract(sample_images)
#Extracting Text from JPG files in Dataset Folder
TxtExtract(test_images)
# In[57]:
with open('OCR.csv', 'w', newline='', encoding='utf-8') as f:
header = ['FileName', 'Filepath', 'Text']
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(ex_txt)
# In[62]:
#BOW
filtered_txt = []
for n in range(len(txt4bow)):
#remove stopwords
filtered_sentence = remove_stopwords(txt4bow[n])
filtered_txt.extend([filtered_sentence])
# using tokenizer
model = Tokenizer()
model.fit_on_texts(filtered_txt)
keys = model.word_index.keys()
#print keys
print(f'keys: {list(keys)}\n')
#create bag of words representation
rep = model.texts_to_matrix(filtered_txt, mode='count')
print(rep)
# In[63]:
with open('BOW.csv', 'w', newline='', encoding='utf-8') as f:
header = [keys]
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(rep)
# In[85]:
#Free up memory
gc.collect()
# In[ ]:
| 170 | 0 | 22 |
8755111f7c57716c76a8394711e2608cb621490b | 4,225 | py | Python | tests/util.py | henrikhorluck/black | 5379d4f3f460ec9b7063dd1cc10f437b0edf9ae3 | [
"MIT"
] | 3 | 2021-05-26T15:54:51.000Z | 2021-05-28T16:44:06.000Z | tests/util.py | marnixah/black-but-usable | 83b83d3066d1d857983bfa1a666a409e7255d79d | [
"MIT"
] | 16 | 2021-12-22T19:53:57.000Z | 2022-03-28T20:13:17.000Z | tests/util.py | marnixah/black-but-usable | 83b83d3066d1d857983bfa1a666a409e7255d79d | [
"MIT"
] | 1 | 2021-06-02T17:46:23.000Z | 2021-06-02T17:46:23.000Z | import os
import sys
import unittest
from contextlib import contextmanager
from functools import partial
from pathlib import Path
from typing import Any, Iterator, List, Optional, Tuple
import black
from black.debug import DebugVisitor
from black.mode import TargetVersion
from black.output import diff, err, out
THIS_DIR = Path(__file__).parent
DATA_DIR = THIS_DIR / "data"
PROJECT_ROOT = THIS_DIR.parent
EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)"
DETERMINISTIC_HEADER = "[Deterministic header]"
PY36_VERSIONS = {
TargetVersion.PY36,
TargetVersion.PY37,
TargetVersion.PY38,
TargetVersion.PY39,
}
DEFAULT_MODE = black.Mode()
ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True)
fs = partial(black.format_str, mode=DEFAULT_MODE)
def assert_format(
source: str,
expected: str,
mode: black.Mode = DEFAULT_MODE,
*,
fast: bool = False,
minimum_version: Optional[Tuple[int, int]] = None,
) -> None:
"""Convenience function to check that Black formats as expected.
You can pass @minimum_version if you're passing code with newer syntax to guard
safety guards so they don't just crash with a SyntaxError. Please note this is
separate from TargetVerson Mode configuration.
"""
actual = black.format_str(source, mode=mode)
_assert_format_equal(expected, actual)
# It's not useful to run safety checks if we're expecting no changes anyway. The
# assertion right above will raise if reality does actually make changes. This just
# avoids wasted CPU cycles.
if not fast and source != expected:
# Unfortunately the AST equivalence check relies on the built-in ast module
# being able to parse the code being formatted. This doesn't always work out
# when checking modern code on older versions.
if minimum_version is None or sys.version_info >= minimum_version:
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode=mode)
def read_data(name: str, data: bool = True) -> Tuple[str, str]:
"""read_data('test_name') -> 'input', 'output'"""
if not name.endswith((".py", ".pyi", ".out", ".diff")):
name += ".py"
base_dir = DATA_DIR if data else PROJECT_ROOT
return read_data_from_file(base_dir / name)
@contextmanager
def change_directory(path: Path) -> Iterator[None]:
"""Context manager to temporarily chdir to a different directory."""
previous_dir = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(previous_dir)
| 32.751938 | 87 | 0.664142 | import os
import sys
import unittest
from contextlib import contextmanager
from functools import partial
from pathlib import Path
from typing import Any, Iterator, List, Optional, Tuple
import black
from black.debug import DebugVisitor
from black.mode import TargetVersion
from black.output import diff, err, out
THIS_DIR = Path(__file__).parent
DATA_DIR = THIS_DIR / "data"
PROJECT_ROOT = THIS_DIR.parent
EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)"
DETERMINISTIC_HEADER = "[Deterministic header]"
PY36_VERSIONS = {
TargetVersion.PY36,
TargetVersion.PY37,
TargetVersion.PY38,
TargetVersion.PY39,
}
DEFAULT_MODE = black.Mode()
ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True)
fs = partial(black.format_str, mode=DEFAULT_MODE)
def _assert_format_equal(expected: str, actual: str) -> None:
if actual != expected and not os.environ.get("SKIP_AST_PRINT"):
bdv: DebugVisitor[Any]
out("Expected tree:", fg="green")
try:
exp_node = black.lib2to3_parse(expected)
bdv = DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
err(str(ve))
out("Actual tree:", fg="red")
try:
exp_node = black.lib2to3_parse(actual)
bdv = DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
err(str(ve))
if actual != expected:
out(diff(expected, actual, "expected", "actual"))
assert actual == expected
def assert_format(
source: str,
expected: str,
mode: black.Mode = DEFAULT_MODE,
*,
fast: bool = False,
minimum_version: Optional[Tuple[int, int]] = None,
) -> None:
"""Convenience function to check that Black formats as expected.
You can pass @minimum_version if you're passing code with newer syntax to guard
safety guards so they don't just crash with a SyntaxError. Please note this is
separate from TargetVerson Mode configuration.
"""
actual = black.format_str(source, mode=mode)
_assert_format_equal(expected, actual)
# It's not useful to run safety checks if we're expecting no changes anyway. The
# assertion right above will raise if reality does actually make changes. This just
# avoids wasted CPU cycles.
if not fast and source != expected:
# Unfortunately the AST equivalence check relies on the built-in ast module
# being able to parse the code being formatted. This doesn't always work out
# when checking modern code on older versions.
if minimum_version is None or sys.version_info >= minimum_version:
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode=mode)
def dump_to_stderr(*output: str) -> str:
return "\n" + "\n".join(output) + "\n"
class BlackBaseTestCase(unittest.TestCase):
def assertFormatEqual(self, expected: str, actual: str) -> None:
_assert_format_equal(expected, actual)
def read_data(name: str, data: bool = True) -> Tuple[str, str]:
"""read_data('test_name') -> 'input', 'output'"""
if not name.endswith((".py", ".pyi", ".out", ".diff")):
name += ".py"
base_dir = DATA_DIR if data else PROJECT_ROOT
return read_data_from_file(base_dir / name)
def read_data_from_file(file_name: Path) -> Tuple[str, str]:
with open(file_name, "r", encoding="utf8") as test:
lines = test.readlines()
_input: List[str] = []
_output: List[str] = []
result = _input
for line in lines:
line = line.replace(EMPTY_LINE, "")
if line.rstrip() == "# output":
result = _output
continue
result.append(line)
if _input and not _output:
# If there's no output marker, treat the entire file as already pre-formatted.
_output = _input[:]
return "".join(_input).strip() + "\n", "".join(_output).strip() + "\n"
@contextmanager
def change_directory(path: Path) -> Iterator[None]:
"""Context manager to temporarily chdir to a different directory."""
previous_dir = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(previous_dir)
| 1,484 | 22 | 118 |
4b0ed1b10ed8b98c9effeed92290b9f07d90a9e0 | 1,966 | py | Python | tests/conftest.py | kateya/clade | f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3 | [
"Apache-2.0"
] | 11 | 2018-10-15T08:46:00.000Z | 2022-02-14T14:03:15.000Z | tests/conftest.py | kateya/clade | f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3 | [
"Apache-2.0"
] | 136 | 2018-08-07T11:11:29.000Z | 2022-03-31T19:02:21.000Z | tests/conftest.py | kateya/clade | f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3 | [
"Apache-2.0"
] | 6 | 2018-11-09T12:52:39.000Z | 2022-02-19T20:34:25.000Z | # Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import shutil
import tempfile
from clade import Clade
from clade.intercept import intercept
from tests.test_intercept import test_project_make, test_project
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
| 31.206349 | 89 | 0.735504 | # Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import shutil
import tempfile
from clade import Clade
from clade.intercept import intercept
from tests.test_intercept import test_project_make, test_project
@pytest.fixture(scope="session")
def cmds_file():
# Disable multiprocessing
os.environ["CLADE_DEBUG"] = "1"
with tempfile.NamedTemporaryFile() as fh:
intercept(command=test_project_make, output=fh.name, use_wrappers=True)
yield fh.name
@pytest.fixture(scope="session")
def envs_file():
# Disable multiprocessing
os.environ["CLADE_DEBUG"] = "1"
c = Clade(work_dir=test_project + '/clade')
c.intercept(command=test_project_make, use_wrappers=True, intercept_envs=True)
yield os.path.join(c.work_dir, "envs.txt")
@pytest.fixture(scope="session")
def clade_api(tmpdir_factory):
tmpdir = tmpdir_factory.mktemp("Clade")
c = Clade(tmpdir)
c.intercept(command=test_project_make, use_wrappers=True, intercept_envs=True)
c.parse_list(["CrossRef", "Variables", "Macros", "Typedefs", "CDB"])
yield c
def pytest_collection_modifyitems(config, items):
skip_cif = pytest.mark.skipif(not shutil.which("cif"), reason="cif is not installed")
for item in items:
if 'cif' in item.keywords:
item.add_marker(skip_cif)
| 910 | 0 | 89 |
2cb891783c09b8adbfa6b5c9d597b2c2d0c3be49 | 7,602 | py | Python | tf/008_vgg19.py | deep-learning/facenet | e74cf7c2a29477ed76cd34e243f993090c6f6987 | [
"MIT"
] | null | null | null | tf/008_vgg19.py | deep-learning/facenet | e74cf7c2a29477ed76cd34e243f993090c6f6987 | [
"MIT"
] | null | null | null | tf/008_vgg19.py | deep-learning/facenet | e74cf7c2a29477ed76cd34e243f993090c6f6987 | [
"MIT"
] | 1 | 2021-09-28T09:20:31.000Z | 2021-09-28T09:20:31.000Z | import numpy as np
import tensorflow as tf
_VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
"""
The network configuration:
- RGB 224x224x3
- conv3-64
- conv3-64
- maxpool
- (112x112x128)
- conv3-128
- conv3-128
- maxpool
- (56x56x256)
- conv3-256
- conv3-256
- conv3-256
- conv3-256 (vs. vgg16)
- maxpool
- (28x28x512)
- conv3-512
- conv3-512
- conv3-512
- conv3-512 (vs. vgg16)
- maxpool
- (14x14x512)
- conv3-512
- conv3-512
- conv3-512
- conv3-512 (vs.vgg16)
- maxpool
- (7x7x512x4096)
- fc-4096
- (4096x4096)
- fc-4096
- (4096x1000)
- fc-1000
- softmax
"""
WIDTH = 224
HEIGHT = 224
CHANNELS = 3
LABELS = 1000
model = {}
model_save_path = None
model_save_iter_freq = 0
learning_rate = 0.05
_inputRGB = None
_inputBGR = None
_inputNormalizedBGR = None
_conv1_1 = None
_conv1_2 = None
_pool = None
_conv2_1 = None
_conv2_2 = None
_pool2 = None
_conv3_1 = None
_conv3_2 = None
_conv3_3 = None
_conv3_4 = None
_pool3 = None
_conv4_1 = None
_conv4_2 = None
_conv4_3 = None
_conv4_4 = None
_pool4 = None
_conv5_1 = None
_conv5_2 = None
_conv5_3 = None
_conv5_4 = None
_pool5 = None
_fc6 = None
_relu6 = None
_fc7 = None
_relu7 = None
_fc8 = None
_preds = None
# in [? 1000] shape
_loss = None
_optimizer = None
_train_labels = None
@property
def inputRGB(self):
"""of shape [?, 224, 224, 3] in RGB order"""
return self._inputRGB
@property
@property
@property
| 27.846154 | 100 | 0.489608 | import numpy as np
import tensorflow as tf
def conv_layer(x, kHeight, kWidth, strideX, strideY, feature_num, name, padding='SAME'):
# assuming NHWC data format
channel_num = int(x.get_shape()[-1])
with tf.variable_scope(name) as scope:
w = tf.get_variable('w', shape=[kHeight, kWidth, channel_num, feature_num])
b = tf.get_variable('b', shape=[feature_num])
feature_map = tf.nn.conv2d(x,
w,
strides=[1, strideX, strideY, 1],
padding=padding)
out = tf.nn.bias_add(feature_map, b)
return tf.nn.relu(tf.reshape(out, feature_map.get_shape().as_list(), name=scope.name))
def max_pooling_layer(x, kHeight, kWidth, stride, name, padding):
return tf.nn.max_pool(x,
ksize=[1, kHeight, kWidth, 1], # [batch, height, width, channels]
strides=[1, stride, stride, 1], # no pooling on batch & channel dimension
padding=padding,
name=name)
def dropout(x, keep_prop, name=None):
return tf.nn.dropout(x, keep_prob=keep_prop, name=name)
def fc_layer(x, inputD, outputD, name, relu=True):
with tf.variable_scope(name) as scope:
w = tf.get_variable('w', shape=[inputD, outputD], dtype=tf.float32)
b = tf.get_variable('b', [outputD], dtype=tf.float32)
out = tf.nn.xw_plus_b(x, w, b, name=scope.name)
if relu:
return tf.nn.relu(out)
else:
return out
_VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
"""
The network configuration:
- RGB 224x224x3
- conv3-64
- conv3-64
- maxpool
- (112x112x128)
- conv3-128
- conv3-128
- maxpool
- (56x56x256)
- conv3-256
- conv3-256
- conv3-256
- conv3-256 (vs. vgg16)
- maxpool
- (28x28x512)
- conv3-512
- conv3-512
- conv3-512
- conv3-512 (vs. vgg16)
- maxpool
- (14x14x512)
- conv3-512
- conv3-512
- conv3-512
- conv3-512 (vs.vgg16)
- maxpool
- (7x7x512x4096)
- fc-4096
- (4096x4096)
- fc-4096
- (4096x1000)
- fc-1000
- softmax
"""
WIDTH = 224
HEIGHT = 224
CHANNELS = 3
LABELS = 1000
model = {}
model_save_path = None
model_save_iter_freq = 0
learning_rate = 0.05
_inputRGB = None
_inputBGR = None
_inputNormalizedBGR = None
_conv1_1 = None
_conv1_2 = None
_pool = None
_conv2_1 = None
_conv2_2 = None
_pool2 = None
_conv3_1 = None
_conv3_2 = None
_conv3_3 = None
_conv3_4 = None
_pool3 = None
_conv4_1 = None
_conv4_2 = None
_conv4_3 = None
_conv4_4 = None
_pool4 = None
_conv5_1 = None
_conv5_2 = None
_conv5_3 = None
_conv5_4 = None
_pool5 = None
_fc6 = None
_relu6 = None
_fc7 = None
_relu7 = None
_fc8 = None
_preds = None
# in [? 1000] shape
_loss = None
_optimizer = None
_train_labels = None
def __init__(self,
model=None,
model_save_path=None,
model_save_iter_freq=0):
self.model = self.__init__empty_model() if not model else model
self.model_save_path = model_save_path
self.model_save_iter_freq = model_save_iter_freq
# define train labels
self.__train_labels = tf.placeholder(tf.float32,
shape=[None, Vgg19.LABELS])
self._inputRGB = tf.placeholder(tf.float32,
shape=[None, Vgg19.HEIGHT, Vgg19.WIDTH, Vgg19.CHANNELS])
r, g, b = tf.split(self._inputRGB, 3, 3)
self._inputBGR = tf.concat([b, g, r], 3)
self._inputNormalizedBGR = tf.concat([
b - _VGG_MEAN[0],
g - _VGG_MEAN[1],
r - _VGG_MEAN[2]
], 3)
# setup vgg-net graph
self._conv1_1 = self.conv
@property
def inputRGB(self):
"""of shape [?, 224, 224, 3] in RGB order"""
return self._inputRGB
@property
def inputBGR(self):
return self._inputBGR
@property
def preds(self):
return self._preds
@property
def train_labels(self):
return self._train_labels
def _avg_pool(self, value, name):
return tf.nn.avg_pool(value,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name=name)
def _max_pool(self, value, name):
return tf.nn.max_pool(value,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name=name)
def _conv_layer(self, value, name):
with tf.variable_scope(name):
shape = value.get_shape().as_list()
dim = 1
def _get_conv_filter(self, name):
return tf.Variable(self.model[name][0], name='filter')
def _get_biases(self, name):
return tf.Variable(self.model[name][1], name='biases')
def _get_fc_weights(self, name):
return tf.Variable(self.model[name][0], name='weights')
def __init__empty_model(self):
self.model = {
# [wights, biases]
'conv1_1': [np.ndarray([3, 3, 3, 64]),
np.ndarray([64])],
'conv1_2': [np.ndarray([3, 3, 64, 64]),
np.ndarray([64])],
'conv2_1': [np.ndarray([3, 3, 64, 128]),
np.ndarray([128])],
'conv2_2': [np.ndarray([3, 3, 128, 128]),
np.ndarray([128])],
'conv3_1': [np.ndarray([3, 3, 128, 256]),
np.ndarray([256])],
'conv3_2': [np.ndarray([3, 3, 256, 256]),
np.ndarray([256])],
'conv3_3': [np.ndarray([3, 3, 256, 256]),
np.ndarray([256])],
'conv3_4': [np.ndarray([3, 3, 256, 256]),
np.ndarray([256])],
'conv4_1': [np.ndarray([3, 3, 256, 512]),
np.ndarray([512])],
'conv4_2': [np.ndarray([3, 3, 512, 512]),
np.ndarray([512])],
'conv4_3': [np.ndarray([3, 3, 512, 512]),
np.ndarray([512])],
'conv4_4': [np.ndarray([3, 3, 512, 512]),
np.ndarray([512])],
'conv5_1': [np.ndarray([3, 3, 512, 512]),
np.ndarray([512])],
'conv5_2': [np.ndarray([3, 3, 512, 512]),
np.ndarray([512])],
'conv5_3': [np.ndarray([3, 3, 512, 512]),
np.ndarray([512])],
'conv5_4': [np.ndarray([3, 3, 512, 512]),
np.ndarray([512])],
'fc6': [np.ndarray([7 * 7 * 512 * 4096, 4096]),
np.ndarray([4096])],
'fc7': [np.ndarray([4096, 4096]),
np.ndarray([4096])],
'fc8': [np.ndarray([4096, 1000]),
np.ndarray([1000])]
}
def _max_pool(self, value, name):
return tf.nn.max_pool(value,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name=name)
def _conv_layer(self, value, name):
pass
| 5,425 | 0 | 440 |
ed710979bbf692e432aa0514fab5daaf13dc5dca | 1,943 | py | Python | dd_auth/urls.py | datadealer/dd_auth | 5a183d64035059e88dd83c1ef5dfaf083faa362c | [
"Artistic-2.0"
] | null | null | null | dd_auth/urls.py | datadealer/dd_auth | 5a183d64035059e88dd83c1ef5dfaf083faa362c | [
"Artistic-2.0"
] | null | null | null | dd_auth/urls.py | datadealer/dd_auth | 5a183d64035059e88dd83c1ef5dfaf083faa362c | [
"Artistic-2.0"
] | 1 | 2021-06-06T22:29:12.000Z | 2021-06-06T22:29:12.000Z | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import TemplateView
from jsonrpc import jsonrpc_site
import dd_auth.views
# Uncomment the next line for overwriting templates
#from django.views.generic.simple import direct_to_template
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#Admin Urls
#url(r'^dd_auth_admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^dd_auth_admin/dd_invitation/token/make$', 'dd_invitation.admin_views.make_tokens'),
url(r'^dd_auth_admin/', include(admin.site.urls)),
url(r'^accounts/i18n/', include('django.conf.urls.i18n'), name="set_language"),
url(r'^accounts/lang/$', TemplateView.as_view(template_name='account/setlang.html')),
url(r'^accounts/remote/access_denied/$', TemplateView.as_view(template_name='account/access_denied.html')),
# JSON-RPC URLs
# url(r'^authapi/browse/', 'jsonrpc.views.browse', name="jsonrpc_browser"),
url(r'^authapi/$', jsonrpc_site.dispatch, name="jsonrpc_mountpoint"),
url(r'^accounts/', include('allauth.urls')),
url(r'^accounts/remote/sign_in/$', 'dd_auth.views.sign_in', {'template_name': 'account/sign_in.html'}, name='remote_sign_in'),
url(r'^accounts/remote/sign_up/$', 'dd_auth.views.sign_up', {'template_name': 'account/sign_up.html'}, name='remote_sign_up'),
url(r'^accounts/remote/reset/$', 'dd_auth.views.reset_password', {'template_name': 'account/reset.html'}, name='remote_reset_password'),
url(r'^accounts/remote/sign_out/$', 'dd_auth.views.sign_out', name='remote_sign_out'),
#url(r'^accounts/remote/set_email/$', 'dd_auth.views.set_email', name='remote_set_email')
)
if getattr(settings, 'DD_SERVE_STATIC', False):
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| 48.575 | 140 | 0.741637 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import TemplateView
from jsonrpc import jsonrpc_site
import dd_auth.views
# Uncomment the next line for overwriting templates
#from django.views.generic.simple import direct_to_template
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#Admin Urls
#url(r'^dd_auth_admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^dd_auth_admin/dd_invitation/token/make$', 'dd_invitation.admin_views.make_tokens'),
url(r'^dd_auth_admin/', include(admin.site.urls)),
url(r'^accounts/i18n/', include('django.conf.urls.i18n'), name="set_language"),
url(r'^accounts/lang/$', TemplateView.as_view(template_name='account/setlang.html')),
url(r'^accounts/remote/access_denied/$', TemplateView.as_view(template_name='account/access_denied.html')),
# JSON-RPC URLs
# url(r'^authapi/browse/', 'jsonrpc.views.browse', name="jsonrpc_browser"),
url(r'^authapi/$', jsonrpc_site.dispatch, name="jsonrpc_mountpoint"),
url(r'^accounts/', include('allauth.urls')),
url(r'^accounts/remote/sign_in/$', 'dd_auth.views.sign_in', {'template_name': 'account/sign_in.html'}, name='remote_sign_in'),
url(r'^accounts/remote/sign_up/$', 'dd_auth.views.sign_up', {'template_name': 'account/sign_up.html'}, name='remote_sign_up'),
url(r'^accounts/remote/reset/$', 'dd_auth.views.reset_password', {'template_name': 'account/reset.html'}, name='remote_reset_password'),
url(r'^accounts/remote/sign_out/$', 'dd_auth.views.sign_out', name='remote_sign_out'),
#url(r'^accounts/remote/set_email/$', 'dd_auth.views.set_email', name='remote_set_email')
)
if getattr(settings, 'DD_SERVE_STATIC', False):
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| 0 | 0 | 0 |
26074e8637e2ca21f37e6ea6af1267eb2e7449da | 997 | py | Python | GestiRED/migrations/0005_auto_20181004_1959.py | osabogal10/GestiREDBackend | 99aa3b01bd67910cc0f96751c88d0f4e83763392 | [
"MIT"
] | null | null | null | GestiRED/migrations/0005_auto_20181004_1959.py | osabogal10/GestiREDBackend | 99aa3b01bd67910cc0f96751c88d0f4e83763392 | [
"MIT"
] | null | null | null | GestiRED/migrations/0005_auto_20181004_1959.py | osabogal10/GestiREDBackend | 99aa3b01bd67910cc0f96751c88d0f4e83763392 | [
"MIT"
] | 1 | 2018-11-19T00:08:05.000Z | 2018-11-19T00:08:05.000Z | # Generated by Django 2.1.1 on 2018-10-05 00:59
from django.db import migrations, models
import django.utils.timezone
| 28.485714 | 74 | 0.602808 | # Generated by Django 2.1.1 on 2018-10-05 00:59
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('GestiRED', '0004_auto_20181004_1956'),
]
operations = [
migrations.AlterField(
model_name='fase',
name='fechaInicial',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='project',
name='fechaRegistro',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='resource',
name='fechaRegistro',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='user',
name='fechaRegistro',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| 0 | 854 | 23 |
90f46465268e644fa6964243fd0a1d033b4b5511 | 8,757 | py | Python | worker/resources/Steam.py | fga-eps-mds/2018.2-GamesBI_Importadores | 72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae | [
"MIT"
] | 1 | 2018-10-25T20:39:16.000Z | 2018-10-25T20:39:16.000Z | worker/resources/Steam.py | fga-eps-mds/2018.2-GamesBI_Importadores | 72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae | [
"MIT"
] | null | null | null | worker/resources/Steam.py | fga-eps-mds/2018.2-GamesBI_Importadores | 72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae | [
"MIT"
] | 2 | 2018-11-10T16:08:46.000Z | 2018-11-26T14:06:12.000Z | import os
import requests
import colorific
from PIL import Image
from . import Validador
STEAM_OWNERS_MIN = int(os.environ['STEAM_OWNERS_MIN'])
STEAM_GAMES_LIMIT = int(os.environ['STEAM_GAMES_LIMIT'])
| 35.310484 | 79 | 0.534087 | import os
import requests
import colorific
from PIL import Image
from . import Validador
STEAM_OWNERS_MIN = int(os.environ['STEAM_OWNERS_MIN'])
STEAM_GAMES_LIMIT = int(os.environ['STEAM_GAMES_LIMIT'])
class Steam(object):
def get_steam_data(self):
url = 'http://steamspy.com/api.php?request=all'
header = {'Accept': 'application/json'}
request = requests.get(url, headers=header)
status = request.status_code
if status == 200:
data = request.json()
return self.filter_steam_games(data)
else:
return []
def filter_steam_games(self, games_data):
count = 0
filtered_data = []
for game in games_data.values():
if self.valid_game(game):
if count >= STEAM_GAMES_LIMIT:
break
keys = ['appid', 'name', 'positive', 'negative', 'owners',
'average_forever', 'average_2weeks', 'price']
filtered_data.append(
{key: game[key] if key in game else None for key in keys})
validador = Validador.Validador()
if not validador.game_exists(filtered_data[count]['name']):
additional_information = self.get_infos_game_steam(
filtered_data[count]['appid']
)
else:
additional_information = self.get_empty_dict_data()
filtered_data[count].update(additional_information)
count += 1
return filtered_data
def get_infos_game_steam(self, game_id):
url = 'https://store.steampowered.com/api/appdetails?appids={}'.format(
game_id)
header = {'Accept': 'application/json'}
request = requests.get(url, headers=header)
status = request.status_code
if status == 200:
data = request.json()
return self.filter_infos_game_steam(data)
else:
return self.get_empty_dict_data()
def filter_infos_game_steam(self, game_data):
for game in game_data.values():
if 'data' in game:
data = game["data"]
keys = ['header_image', 'release_date']
dict_simple_fields = {
key: data[key] if key in data else [] for key in keys
}
dict_simple_fields['release_date'] = self.get_release_date(
dict_simple_fields['release_date']
)
keys_array = ['supported_languages', 'genres', 'screenshots']
dict_array_fields = {
key: data[key] if key in data else [] for key in keys_array
}
dict_array_fields['supported_languages'] = self.get_languages(
dict_array_fields['supported_languages']
)
dict_array_fields['genres'] = self.get_genres(
dict_array_fields['genres']
)
dict_array_fields['screenshots'] = self.get_screenshots(
dict_array_fields['screenshots']
)
pallete_game = self.get_pallete_game(
dict_array_fields['screenshots']
)
keys_pallets = ['r', 'g', 'b']
dict_pallet_fields = {
key: pallete_game[key] if key in pallete_game else
None for key in keys_pallets
}
dict_result = {}
dict_result.update(dict_simple_fields)
dict_result.update(dict_array_fields)
dict_result.update(dict_pallet_fields)
return dict_result
return self.get_empty_dict_data()
def get_empty_dict_data(self):
return {
'r': None,
'g': None,
'b': None,
'header_image': None,
'supported_languages': [],
'genres': [],
'screenshots': [],
'release_date': None
}
def get_release_date(self, dict_date):
if 'date' in dict_date:
return dict_date['date']
else:
return None
def get_languages(self, str_languages):
languages = []
array_languages = str_languages.split(', ')
for language in array_languages:
strong = True if '<strong>' in language else False
if strong:
correct_format_language = language.split('<')[0]
languages.append(correct_format_language)
else:
languages.append(language)
return languages
def get_genres(self, genres):
array_genres = []
for genre in genres:
if 'description' in genre:
array_genres.append(genre['description'])
return array_genres
def get_screenshots(self, screenshots):
list_screenshots = []
for screenshot in screenshots:
if 'path_thumbnail' in screenshot:
url = screenshot['path_thumbnail']
pallete = self.get_palette(url)
dictionary_screenshot = {
'url': url,
'palette': pallete
}
else:
dictionary_screenshot = None
list_screenshots.append(dictionary_screenshot)
return list_screenshots
def get_pallete_game(self, screenshots):
palletes = []
for screenshot in screenshots:
palletes.append(screenshot['palette'])
return self.get_average_pallets(palletes)
def valid_game(self, game):
if 'owners' in game:
owners_str = game['owners']
owners = self.read_owners(owners_str)
if owners > STEAM_OWNERS_MIN:
return True
else:
return False
else:
return False
def read_owners(self, str_owners):
vector_numbers = self.valid_owners(str_owners)
average = self.calculates_avarege(vector_numbers)
return average
def valid_owners(self, str_owners):
low_average = str_owners.split(" .. ")[0]
high_average = str_owners.split(" .. ")[1]
low_average_valid = ""
for number in low_average:
if number != ",":
low_average_valid += number
high_average_valid = ""
for number in high_average:
if number != ",":
high_average_valid += number
low_average_int = int(low_average_valid)
high_average_int = int(high_average_valid)
return [low_average_int, high_average_int]
def calculates_avarege(self, numbers):
sum = 0
for number in numbers:
sum = sum + number
return sum / len(numbers)
def get_palette(self, img_url):
request = requests.get(img_url, stream=True)
status = request.status_code
if status == 200:
img = Image.open(request.raw)
palette = colorific.extract_colors(img)
array_colors = []
for color in palette.colors:
hex_value = colorific.rgb_to_hex(color.value)
dictionary_colors = {
'r': color.value[0],
'g': color.value[1],
'b': color.value[2],
'hex': hex_value
}
array_colors.append(dictionary_colors)
if palette.bgcolor is not None:
hex_value = colorific.rgb_to_hex(palette.bgcolor.value)
dictionary_colors = {
'r': palette.bgcolor.value[0],
'g': palette.bgcolor.value[1],
'b': palette.bgcolor.value[2],
'hex': hex_value
}
array_colors.append(dictionary_colors)
else:
array_colors = []
return array_colors
def get_average_pallets(self, array_photos):
rgb_average = {
'r': 0,
'g': 0,
'b': 0
}
qtd_pallets = 0
for photo in array_photos:
for palette in photo:
rgb_average['r'] += palette['r']
rgb_average['g'] += palette['g']
rgb_average['b'] += palette['b']
qtd_pallets += 1
if qtd_pallets > 0:
rgb_average['r'] = int(rgb_average['r'] / qtd_pallets)
rgb_average['g'] = int(rgb_average['r'] / qtd_pallets)
rgb_average['b'] = int(rgb_average['r'] / qtd_pallets)
return rgb_average
else:
return []
| 8,100 | -1 | 455 |
3c330a8d9cb14a93ab27dbe10b1276b1bf8fd1b4 | 1,319 | py | Python | Main.py | GabrielSturtevant/GameOfLife | 5982248a097c59e61ffbc3745cb47038813cdaba | [
"MIT"
] | null | null | null | Main.py | GabrielSturtevant/GameOfLife | 5982248a097c59e61ffbc3745cb47038813cdaba | [
"MIT"
] | null | null | null | Main.py | GabrielSturtevant/GameOfLife | 5982248a097c59e61ffbc3745cb47038813cdaba | [
"MIT"
] | null | null | null | #!/usr/bin/python
import Board
import os
import time
import sys
from Loader import glider_gun
dimension = 15
if len(sys.argv) > 1:
dimension = int(sys.argv[1])
foo = Board.Board(dimension)
# for key, cell in foo.board.iteritems():
# cell.state = False
# for i in range(len(glider_gun)):
# for j in range(len(glider_gun[i])):
# key = str(i)+','+str(j)
# foo.board[key].state = glider_gun[i][j] == 1
first = []
second = []
turns = 0
firstup = True
while True:
turns += 1
os.system('cls' if os.name == 'nt' else 'clear')
foo.print_printable()
if turns == 1:
first = foo.printable[:]
foo.print_board()
if turns == 2:
second = first[:]
first = foo.printable[:]
foo.print_board()
if turns > 2:
second = first[:]
first = foo.printable[:]
foo.print_board()
if check(foo.printable, second):
os.system('cls' if os.name == 'nt' else 'clear')
print "\nIterations: " + str(turns)
exit()
print "\nIterations: " + str(turns)
foo.take_turn()
time.sleep(.125)
print "\n\n"
| 21.983333 | 60 | 0.54511 | #!/usr/bin/python
import Board
import os
import time
import sys
from Loader import glider_gun
def check(a, b):
if len(a) == len(b):
for f in range(len(a)):
if a[f] != b[f]:
return False
else:
return False
return True
dimension = 15
if len(sys.argv) > 1:
dimension = int(sys.argv[1])
foo = Board.Board(dimension)
# for key, cell in foo.board.iteritems():
# cell.state = False
# for i in range(len(glider_gun)):
# for j in range(len(glider_gun[i])):
# key = str(i)+','+str(j)
# foo.board[key].state = glider_gun[i][j] == 1
first = []
second = []
turns = 0
firstup = True
while True:
turns += 1
os.system('cls' if os.name == 'nt' else 'clear')
foo.print_printable()
if turns == 1:
first = foo.printable[:]
foo.print_board()
if turns == 2:
second = first[:]
first = foo.printable[:]
foo.print_board()
if turns > 2:
second = first[:]
first = foo.printable[:]
foo.print_board()
if check(foo.printable, second):
os.system('cls' if os.name == 'nt' else 'clear')
print "\nIterations: " + str(turns)
exit()
print "\nIterations: " + str(turns)
foo.take_turn()
time.sleep(.125)
print "\n\n"
| 157 | 0 | 23 |
18510b5614dc987c996b9e593e6d0dcf8a925415 | 342 | py | Python | src/dummy.py | dhruvramani/OffensEval | bb587430546ec494f9ee47cce7e897c2359e0d19 | [
"MIT"
] | 1 | 2020-02-06T07:52:05.000Z | 2020-02-06T07:52:05.000Z | src/dummy.py | dhruvramani/OffensEval | bb587430546ec494f9ee47cce7e897c2359e0d19 | [
"MIT"
] | null | null | null | src/dummy.py | dhruvramani/OffensEval | bb587430546ec494f9ee47cce7e897c2359e0d19 | [
"MIT"
] | 1 | 2020-02-06T07:52:08.000Z | 2020-02-06T07:52:08.000Z | import linecache
path = '/home/nevronas/Projects/Personal-Projects/Dhruv/OffensEval/dataset/train-v1/offenseval-training-v1.tsv'
maxi = 0
with open(path, "r") as f:
for line in f:
contents = line.split("\t")
l = len(contents[1].split(" ")[1:])
print(l)
if(l > maxi):
maxi = l
print("\n",maxi)
| 26.307692 | 111 | 0.593567 | import linecache
path = '/home/nevronas/Projects/Personal-Projects/Dhruv/OffensEval/dataset/train-v1/offenseval-training-v1.tsv'
maxi = 0
with open(path, "r") as f:
for line in f:
contents = line.split("\t")
l = len(contents[1].split(" ")[1:])
print(l)
if(l > maxi):
maxi = l
print("\n",maxi)
| 0 | 0 | 0 |
bdc949d94253ea7d78b86ba3e79803f744687443 | 310 | py | Python | error.py | healeycodes/hoot-language | e9cf0566dfb1d5ded44750a5d5e5e5e8b7d38791 | [
"MIT"
] | 4 | 2021-03-14T23:42:00.000Z | 2021-03-25T08:08:00.000Z | error.py | healeycodes/hoot-language | e9cf0566dfb1d5ded44750a5d5e5e5e8b7d38791 | [
"MIT"
] | null | null | null | error.py | healeycodes/hoot-language | e9cf0566dfb1d5ded44750a5d5e5e5e8b7d38791 | [
"MIT"
] | null | null | null | from tokens import Token
| 18.235294 | 51 | 0.680645 | from tokens import Token
class ParseError(Exception):
def __init__(self, message: str):
self.message = message
class RuntimeError(Exception):
def __init__(self, token: Token, message: str):
self.token = token
self.message = message
class BreakJump(RuntimeError):
pass
| 127 | 34 | 121 |
0b774c9e5b78ce84a694fd086d1c8bbeb264b317 | 1,684 | py | Python | script/api_tester.py | duguyue100/retina-simulation | c07d978400a926f9791151a23acc11b47de7e9c5 | [
"MIT"
] | 5 | 2018-11-07T11:00:46.000Z | 2020-06-17T13:26:59.000Z | script/api_tester.py | Bia10/retina-simulation | c07d978400a926f9791151a23acc11b47de7e9c5 | [
"MIT"
] | 1 | 2017-02-15T18:06:45.000Z | 2017-02-15T19:10:34.000Z | script/api_tester.py | Bia10/retina-simulation | c07d978400a926f9791151a23acc11b47de7e9c5 | [
"MIT"
] | 4 | 2018-03-07T00:22:31.000Z | 2020-06-17T13:27:04.000Z | """This script disovers and tests OpenCV bioinspired module's API.
Author: Yuhuang Hu
Email : yuhuang.hu@uzh.ch
"""
import cv2
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
from simretina import dataset, gui, retina
option = "test-movie-py"
if option == "test-builtin-image":
# testing for builtin dataset
frame, size = dataset.get_lenna()
print frame.shape
print size
if option == "test-builtin-video":
# testing for builtin video
frames, size_v = dataset.get_taichi()
print len(frames)
print size_v
if option == "test-bgr2rgb-sequence":
frames, size = dataset.get_horse_riding()
new_frames = gui.trans_bgr2rgb_seq(frames)
for frame in new_frames:
cv2.imshow("test", frame)
cv2.waitKey(delay=0)
print len(new_frames)
if option == "test-ratio-keep-resize":
frame, size = dataset.get_lenna()
frame = gui.resize(frame, (400, 300), ratio_keep=True)
print frame.shape
if option == "test-dict-compare":
para_dict_old = {}
para_dict_old["a"] = 1
para_dict_old["b"] = 2
para_dict_new = {}
para_dict_new["a"] = 1
para_dict_new["b"] = 2
print retina.compare_para_dict(para_dict_old, para_dict_new)
if option == "test-setup-function":
eye = retina.init_retina((300, 200))
print type(eye.setupOPLandIPLParvoChannel)
print type(eye.setupIPLMagnoChannel)
print eye.getInputSize()
if option == "test-movie-py":
video = FFMPEG_VideoReader("./simretina/retina-data/HorseRiding.avi")
frame = video.read_frame()
for i in range(video.nframes):
frame = video.read_frame()
cv2.imshow("test", frame)
cv2.waitKey(0)
| 24.405797 | 73 | 0.679929 | """This script disovers and tests OpenCV bioinspired module's API.
Author: Yuhuang Hu
Email : yuhuang.hu@uzh.ch
"""
import cv2
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
from simretina import dataset, gui, retina
option = "test-movie-py"
if option == "test-builtin-image":
# testing for builtin dataset
frame, size = dataset.get_lenna()
print frame.shape
print size
if option == "test-builtin-video":
# testing for builtin video
frames, size_v = dataset.get_taichi()
print len(frames)
print size_v
if option == "test-bgr2rgb-sequence":
frames, size = dataset.get_horse_riding()
new_frames = gui.trans_bgr2rgb_seq(frames)
for frame in new_frames:
cv2.imshow("test", frame)
cv2.waitKey(delay=0)
print len(new_frames)
if option == "test-ratio-keep-resize":
frame, size = dataset.get_lenna()
frame = gui.resize(frame, (400, 300), ratio_keep=True)
print frame.shape
if option == "test-dict-compare":
para_dict_old = {}
para_dict_old["a"] = 1
para_dict_old["b"] = 2
para_dict_new = {}
para_dict_new["a"] = 1
para_dict_new["b"] = 2
print retina.compare_para_dict(para_dict_old, para_dict_new)
if option == "test-setup-function":
eye = retina.init_retina((300, 200))
print type(eye.setupOPLandIPLParvoChannel)
print type(eye.setupIPLMagnoChannel)
print eye.getInputSize()
if option == "test-movie-py":
video = FFMPEG_VideoReader("./simretina/retina-data/HorseRiding.avi")
frame = video.read_frame()
for i in range(video.nframes):
frame = video.read_frame()
cv2.imshow("test", frame)
cv2.waitKey(0)
| 0 | 0 | 0 |
205f601218ac9a0759a22545df2e85b1a1aed191 | 2,443 | py | Python | nl/__init__.py | philomelus/nl3 | 0b70b018a94d964bd224015ceb23dcc370462de4 | [
"MIT"
] | null | null | null | nl/__init__.py | philomelus/nl3 | 0b70b018a94d964bd224015ceb23dcc370462de4 | [
"MIT"
] | null | null | null | nl/__init__.py | philomelus/nl3 | 0b70b018a94d964bd224015ceb23dcc370462de4 | [
"MIT"
] | null | null | null |
import logging
import os
from logging.handlers import SMTPHandler, RotatingFileHandler
from flask import Flask, g
from flask_security import Security, SQLAlchemyUserDatastore
from flask_sqlalchemy import SQLAlchemy
from turbo_flask import Turbo
from config import Config
__all__ = [
'app',
'db',
'turbo',
]
app = Flask(__name__)
app.config.from_object(Config)
# Setup logging and error reporting
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'],
subject='Newsledger Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/newsledger.log', maxBytes=10240,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Newsledger startup.')
# Initialize add ons
db = SQLAlchemy(app)
turbo = Turbo(app)
# Set up flask-security
from nl.models.auth import Role, User
app.user_datastore = SQLAlchemyUserDatastore(db, User, Role)
app.security = Security(app, app.user_datastore)
# Register main glue blueprint
from nl import main
app.register_blueprint(main.bp)
# Register administration blueprint
from nl import admin
app.register_blueprint(admin.bp)
# Register customers blueprint
from nl import customers
app.register_blueprint(customers.bp)
# Register routes blueprint
from nl import routes
app.register_blueprint(routes.bp)
# Register stores and racks blueprint
from nl import stores
app.register_blueprint(stores.bp)
from nl import models
from nl.models import auth
from nl.models import config
from nl.models import customers
from nl.models import routes
| 27.449438 | 79 | 0.710192 |
import logging
import os
from logging.handlers import SMTPHandler, RotatingFileHandler
from flask import Flask, g
from flask_security import Security, SQLAlchemyUserDatastore
from flask_sqlalchemy import SQLAlchemy
from turbo_flask import Turbo
from config import Config
__all__ = [
'app',
'db',
'turbo',
]
app = Flask(__name__)
app.config.from_object(Config)
# Setup logging and error reporting
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'],
subject='Newsledger Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/newsledger.log', maxBytes=10240,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Newsledger startup.')
# Initialize add ons
db = SQLAlchemy(app)
turbo = Turbo(app)
# Set up flask-security
from nl.models.auth import Role, User
app.user_datastore = SQLAlchemyUserDatastore(db, User, Role)
app.security = Security(app, app.user_datastore)
# Register main glue blueprint
from nl import main
app.register_blueprint(main.bp)
# Register administration blueprint
from nl import admin
app.register_blueprint(admin.bp)
# Register customers blueprint
from nl import customers
app.register_blueprint(customers.bp)
# Register routes blueprint
from nl import routes
app.register_blueprint(routes.bp)
# Register stores and racks blueprint
from nl import stores
app.register_blueprint(stores.bp)
from nl import models
from nl.models import auth
from nl.models import config
from nl.models import customers
from nl.models import routes
| 0 | 0 | 0 |
e206df885a3418a36db1953f68b747a3c3a2f557 | 3,397 | py | Python | quicktext/scripts/quicktext.py | aliscott/quicktext | e6bc51ba29da41c37f00fefbe19b9b31036c9000 | [
"BSD-2-Clause"
] | 1 | 2019-05-02T06:21:43.000Z | 2019-05-02T06:21:43.000Z | quicktext/scripts/quicktext.py | aliscott/quicktext | e6bc51ba29da41c37f00fefbe19b9b31036c9000 | [
"BSD-2-Clause"
] | null | null | null | quicktext/scripts/quicktext.py | aliscott/quicktext | e6bc51ba29da41c37f00fefbe19b9b31036c9000 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
A GTK quick message popup program.
Allows users to quickly share messages with others on their own screen. Useful
for when you need to be silent but want to communicate with the person sitting
next to you, e.g. for games of hangman during lectures.
TODO: allow multi-lines
"""
__author__ = 'Ali Scott'
import pygtk
pygtk.require('2.0')
import gtk
import pango
X_PADDING = 40
Y_PADDING = 20
X_MARGIN = 60
FONT_FACE = 'lucida sans unicode'
FONT_SIZE = '62'
BG_COLOR = '#000'
TEXT_COLOR = '#fff'
OPACITY = 0.8
class QuickText:
"""
Draws the window and handles the key events.
"""
def __init__(self):
"""
Sets up the window and the text area.
"""
self.window = self._setup_window()
self.textarea = self._setup_textarea()
self.window.connect('destroy', gtk.main_quit)
self.window.connect('key_press_event', self._on_key_press)
self.textarea.connect('changed', self._text_changed)
font_desc = pango.FontDescription(FONT_FACE + ' ' + FONT_SIZE)
self.textarea.modify_font(font_desc)
# layout used for finding pixel size of font
self.layout = pango.Layout(gtk.Widget \
.create_pango_context(self.window))
self.layout.set_font_description(font_desc)
(w, h) = self.layout.get_pixel_size()
# set starting height of the text area to be the height of the font
self.textarea.set_size_request(w, h)
# add padding to the size of the window
self.window.resize(w + X_PADDING, h + Y_PADDING)
self.window.add(self.textarea)
self.textarea.show()
self.window.show()
def _setup_window(self):
"""
Styles the window.
"""
w = gtk.Window(gtk.WINDOW_TOPLEVEL)
w.set_position(gtk.WIN_POS_CENTER_ALWAYS)
w.set_decorated(False)
w.set_has_frame(False)
w.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(BG_COLOR))
w.set_opacity(OPACITY)
return w
def _setup_textarea(self):
"""
Styles the text area.
"""
t = gtk.Entry()
t.set_alignment(0.5)
t.set_has_frame(False)
t.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(BG_COLOR))
t.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(BG_COLOR))
t.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse(TEXT_COLOR))
return t
def _on_key_press(self, widget, event):
"""
Handles key press events.
Quits when the enter key is pressed.
"""
if gtk.gdk.keyval_name(event.keyval) == 'Return':
gtk.Widget.destroy(self.window)
def _text_changed(self, widget):
"""
Handles resizing of window when text is written.
"""
# get size of text
self.layout.set_text(self.textarea.get_text())
(w, h) = self.layout.get_pixel_size()
# resize window and text area to fit text and screen size
max_width = gtk.gdk.screen_width() - X_MARGIN
self.textarea.set_size_request(min(w, max_width - X_PADDING), h)
self.window.resize(min(w + X_PADDING, max_width), h + Y_PADDING)
def main(self):
"""
Runs the program.
"""
gtk.main()
if __name__ == '__main__':
main()
| 28.308333 | 78 | 0.624374 | #!/usr/bin/env python
"""
A GTK quick message popup program.
Allows users to quickly share messages with others on their own screen. Useful
for when you need to be silent but want to communicate with the person sitting
next to you, e.g. for games of hangman during lectures.
TODO: allow multi-lines
"""
__author__ = 'Ali Scott'
import pygtk
pygtk.require('2.0')
import gtk
import pango
X_PADDING = 40
Y_PADDING = 20
X_MARGIN = 60
FONT_FACE = 'lucida sans unicode'
FONT_SIZE = '62'
BG_COLOR = '#000'
TEXT_COLOR = '#fff'
OPACITY = 0.8
class QuickText:
"""
Draws the window and handles the key events.
"""
def __init__(self):
"""
Sets up the window and the text area.
"""
self.window = self._setup_window()
self.textarea = self._setup_textarea()
self.window.connect('destroy', gtk.main_quit)
self.window.connect('key_press_event', self._on_key_press)
self.textarea.connect('changed', self._text_changed)
font_desc = pango.FontDescription(FONT_FACE + ' ' + FONT_SIZE)
self.textarea.modify_font(font_desc)
# layout used for finding pixel size of font
self.layout = pango.Layout(gtk.Widget \
.create_pango_context(self.window))
self.layout.set_font_description(font_desc)
(w, h) = self.layout.get_pixel_size()
# set starting height of the text area to be the height of the font
self.textarea.set_size_request(w, h)
# add padding to the size of the window
self.window.resize(w + X_PADDING, h + Y_PADDING)
self.window.add(self.textarea)
self.textarea.show()
self.window.show()
def _setup_window(self):
"""
Styles the window.
"""
w = gtk.Window(gtk.WINDOW_TOPLEVEL)
w.set_position(gtk.WIN_POS_CENTER_ALWAYS)
w.set_decorated(False)
w.set_has_frame(False)
w.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(BG_COLOR))
w.set_opacity(OPACITY)
return w
def _setup_textarea(self):
"""
Styles the text area.
"""
t = gtk.Entry()
t.set_alignment(0.5)
t.set_has_frame(False)
t.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(BG_COLOR))
t.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(BG_COLOR))
t.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse(TEXT_COLOR))
return t
def _on_key_press(self, widget, event):
"""
Handles key press events.
Quits when the enter key is pressed.
"""
if gtk.gdk.keyval_name(event.keyval) == 'Return':
gtk.Widget.destroy(self.window)
def _text_changed(self, widget):
"""
Handles resizing of window when text is written.
"""
# get size of text
self.layout.set_text(self.textarea.get_text())
(w, h) = self.layout.get_pixel_size()
# resize window and text area to fit text and screen size
max_width = gtk.gdk.screen_width() - X_MARGIN
self.textarea.set_size_request(min(w, max_width - X_PADDING), h)
self.window.resize(min(w + X_PADDING, max_width), h + Y_PADDING)
def main(self):
"""
Runs the program.
"""
gtk.main()
def main():
q = QuickText()
q.main()
if __name__ == '__main__':
main()
| 23 | 0 | 23 |
5e82b0e549d258f31943ed40500785dd7a49dd3f | 2,674 | py | Python | setup.py | showmen15/testEEE | 44619da6d0972903fc93691e30688b3c0fd4e6e7 | [
"MIT"
] | null | null | null | setup.py | showmen15/testEEE | 44619da6d0972903fc93691e30688b3c0fd4e6e7 | [
"MIT"
] | null | null | null | setup.py | showmen15/testEEE | 44619da6d0972903fc93691e30688b3c0fd4e6e7 | [
"MIT"
] | null | null | null | # coding=utf-8
# !/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
sys.stderr.write('using distutils\n')
from distutils.core import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='amber-python-drivers',
packages=[
'amberdriver',
'amberdriver.common',
'amberdriver.dummy',
'amberdriver.hokuyo',
'amberdriver.drive_to_point',
'amberdriver.collision_avoidance',
'amberdriver.null',
'amberdriver.tools',
'amberdriver.tests'
],
package_dir={
'amberdriver': 'src/amberdriver',
'amberdriver.common': 'src/amberdriver/common',
'amberdriver.dummy': 'src/amberdriver/dummy',
'amberdriver.hokuyo': 'src/amberdriver/hokuyo',
'amberdriver.drive_to_point': 'src/amberdriver/drive_to_point',
'amberdriver.collision_avoidance': 'src/amberdriver/collision_avoidance',
'amberdriver.null': 'src/amberdriver/null',
'amberdriver.tools': 'src/amberdriver/tools',
'amberdriver.tests': 'src/amberdriver/tests'
},
package_data={'': [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/collision_avoidance/collision_avoidance.ini',
'src/amberdriver/tools/main.ini'
]},
data_files=[
('', [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/collision_avoidance/collision_avoidance.ini',
'src/amberdriver/tools/main.ini'
]),
],
test_suite="amberdriver.tests",
include_package_data=True,
install_requires=required,
version='1.17',
description='Amber drivers in python',
author=u'Paweł Suder',
author_email='pawel@suder.info',
url='http://project-capo.github.io/',
download_url='http://github.com/project-capo/amber-python-drivers/',
keywords=[
'amber',
'dummy',
'hokuyo',
'drive to point',
'collision avoidance',
'panda'
],
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
],
long_description='''\
'''
)
| 31.833333 | 81 | 0.624907 | # coding=utf-8
# !/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
sys.stderr.write('using distutils\n')
from distutils.core import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='amber-python-drivers',
packages=[
'amberdriver',
'amberdriver.common',
'amberdriver.dummy',
'amberdriver.hokuyo',
'amberdriver.drive_to_point',
'amberdriver.collision_avoidance',
'amberdriver.null',
'amberdriver.tools',
'amberdriver.tests'
],
package_dir={
'amberdriver': 'src/amberdriver',
'amberdriver.common': 'src/amberdriver/common',
'amberdriver.dummy': 'src/amberdriver/dummy',
'amberdriver.hokuyo': 'src/amberdriver/hokuyo',
'amberdriver.drive_to_point': 'src/amberdriver/drive_to_point',
'amberdriver.collision_avoidance': 'src/amberdriver/collision_avoidance',
'amberdriver.null': 'src/amberdriver/null',
'amberdriver.tools': 'src/amberdriver/tools',
'amberdriver.tests': 'src/amberdriver/tests'
},
package_data={'': [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/collision_avoidance/collision_avoidance.ini',
'src/amberdriver/tools/main.ini'
]},
data_files=[
('', [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/collision_avoidance/collision_avoidance.ini',
'src/amberdriver/tools/main.ini'
]),
],
test_suite="amberdriver.tests",
include_package_data=True,
install_requires=required,
version='1.17',
description='Amber drivers in python',
author=u'Paweł Suder',
author_email='pawel@suder.info',
url='http://project-capo.github.io/',
download_url='http://github.com/project-capo/amber-python-drivers/',
keywords=[
'amber',
'dummy',
'hokuyo',
'drive to point',
'collision avoidance',
'panda'
],
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
],
long_description='''\
'''
)
| 0 | 0 | 0 |
bb332799c072a460ef4c506646761156e31cf633 | 1,231 | py | Python | superpixel.py | Jichao-Wang/MDOAU2-net | e46ddeb94d936144c936de6d123654991f86009f | [
"MIT"
] | null | null | null | superpixel.py | Jichao-Wang/MDOAU2-net | e46ddeb94d936144c936de6d123654991f86009f | [
"MIT"
] | null | null | null | superpixel.py | Jichao-Wang/MDOAU2-net | e46ddeb94d936144c936de6d123654991f86009f | [
"MIT"
] | null | null | null | # USAGE
# python superpixel.py --image cactus.jpg
import torch
import matplotlib.pyplot as plt
from skimage import io
from skimage.segmentation import quickshift, mark_boundaries # 导入mark_boundaries 以绘制实际的超像素分割
# 导入必要的包
from skimage.segmentation import slic # 导入包以使用SLIC superpixel segmentation
from skimage.util import img_as_float
my_input = torch.rand([128, 128]).numpy()
print('my_input.shape', my_input.shape)
image_path = './sample_1_image.png'
image = img_as_float(io.imread(image_path))
plt.imshow(image)
# 遍历超像素段的数量 研究3种尺寸不断增加的段,100、200、300
for numSegments in (350, 400):
# 执行SLTC 超像素分割,该功能仅获取原始图像并覆盖我们的超像素段。
# 仅有一个必需参数:
# image:待执行SLTC超像素分割的图像
# n_segments: 定义我们要生成多少个超像素段的参数,默认100
# sigma:在分割之前应用的平滑高斯核
segments = superpixel_segmentation(image, numSegments)
# 绘制SLTC 的分割结果
fig = plt.figure("Superpixels -- %d segments" % (numSegments))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(image, segments))
plt.axis("off")
# 展示图像
plt.show()
| 30.02439 | 93 | 0.745735 | # USAGE
# python superpixel.py --image cactus.jpg
import torch
import matplotlib.pyplot as plt
from skimage import io
from skimage.segmentation import quickshift, mark_boundaries # 导入mark_boundaries 以绘制实际的超像素分割
# 导入必要的包
from skimage.segmentation import slic # 导入包以使用SLIC superpixel segmentation
from skimage.util import img_as_float
def superpixel_segmentation(image, numSegments=250):
# segments = quickshift(image, ratio=0.8)
segments = slic(image, n_segments=numSegments)
print(type(segments), segments.shape, segments)
return segments
my_input = torch.rand([128, 128]).numpy()
print('my_input.shape', my_input.shape)
image_path = './sample_1_image.png'
image = img_as_float(io.imread(image_path))
plt.imshow(image)
# 遍历超像素段的数量 研究3种尺寸不断增加的段,100、200、300
for numSegments in (350, 400):
# 执行SLTC 超像素分割,该功能仅获取原始图像并覆盖我们的超像素段。
# 仅有一个必需参数:
# image:待执行SLTC超像素分割的图像
# n_segments: 定义我们要生成多少个超像素段的参数,默认100
# sigma:在分割之前应用的平滑高斯核
segments = superpixel_segmentation(image, numSegments)
# 绘制SLTC 的分割结果
fig = plt.figure("Superpixels -- %d segments" % (numSegments))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(image, segments))
plt.axis("off")
# 展示图像
plt.show()
| 200 | 0 | 23 |
177225ec3c70f4e6abe9fb5cea7ea276150fec10 | 179 | py | Python | ask_the_duck/session.py | OpenJarbas/ask-the-duck | 9ccc40752956e22f25a0b4cc4c90dabd6be76af8 | [
"Apache-2.0"
] | null | null | null | ask_the_duck/session.py | OpenJarbas/ask-the-duck | 9ccc40752956e22f25a0b4cc4c90dabd6be76af8 | [
"Apache-2.0"
] | 1 | 2021-08-09T13:43:54.000Z | 2021-08-09T13:44:18.000Z | ask_the_duck/session.py | OpenJarbas/ask-the-duck | 9ccc40752956e22f25a0b4cc4c90dabd6be76af8 | [
"Apache-2.0"
] | 1 | 2021-04-30T03:14:59.000Z | 2021-04-30T03:14:59.000Z | import requests_cache
USER_AGENT = "ask_the_duck v0.0.1"
SESSION = requests_cache.CachedSession(expire_after=5 * 60, backend="memory")
SESSION.headers = {"User-Agent": USER_AGENT} | 44.75 | 77 | 0.787709 | import requests_cache
USER_AGENT = "ask_the_duck v0.0.1"
SESSION = requests_cache.CachedSession(expire_after=5 * 60, backend="memory")
SESSION.headers = {"User-Agent": USER_AGENT} | 0 | 0 | 0 |
cec1f245eeedcfbca738d83c7fc61a222d4e78e7 | 20,895 | py | Python | tseries_m5/run_eval.py | arita37/data | 0d1a38fd9b564cfb9c34ad521e7df2b3b6e2316b | [
"MIT"
] | null | null | null | tseries_m5/run_eval.py | arita37/data | 0d1a38fd9b564cfb9c34ad521e7df2b3b6e2316b | [
"MIT"
] | null | null | null | tseries_m5/run_eval.py | arita37/data | 0d1a38fd9b564cfb9c34ad521e7df2b3b6e2316b | [
"MIT"
] | 1 | 2022-02-14T18:18:38.000Z | 2022-02-14T18:18:38.000Z | from sklearn import preprocessing, metrics
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold, KFold, RepeatedKFold, GroupKFold, GridSearchCV, train_test_split, TimeSeriesSplit
from datetime import datetime
import copy
import os
import fire
import glob
import pdb
##### import all Feature engineering functions
from util_feat_m5 import *
if __name__ == "__main__":
import fire
fire.Fire()
"""
import util_feat_m5
# df_meta= col_name, col_type, file_path
def featurestore_generate_feature(dir_in, dir_out, my_fun_features) :
# from util_feat_m5 import lag_featrues
# featurestore_generate_feature(dir_in, dir_out, lag_featrues)
train_df = pd.read_csv( dir_in + "/sales_train_val.csv.zip")
calendar_df = pd.read_csv(dir_in + "/calendar.csv")
price_df = pd.read_csv(dir_in + "/sell_prices.csv")
dfnew = my_fun_features(train_df, calendar_df, price_df) :
dfnew.to_parquet( dir_out +"/mfeaturesXXXX.parquet")
def featurestore_filter_features(mode="random") :
cols_cat0 = [ "feat1", "fewat2" ]
if mode == "random" :
### Random selection
cols_cat = cols_cat0[ np.random.c = hoice( 5, len(cols_cat) ) ]
cols_num = cols_num0[ np.random.c = hoice( 5, len(cols_num) ) ]
return cols_cat, col_num
if mode == "all" :
pass
if mode == "smartway" :
pass
def train(model, pars={} ) :
data_pars = {}
model_pars = {}
for ii in range(n_experiments) :
cols_cat, cols_num = featurestore_filter_features()
df = featurestore_get_feature_fromcolname(path, cols_cat + cols_num, "train")
dftest = featurestore_get_feature_fromcolname(path, cols_cat + cols_num, 'test')
X_train = X_transform( df, cols_num, cols_cat, pars) # select sri
y_train = y_transform(df, coly)
X_test = X_transform( dftest, cols_num, cols_cat, pars) # select variables
y_test = y_transform(dftest, coly)
lgbm = lgb.LGBMRegressor()
lgbm.fit( X_train, y_train)
# prediction + metrics
y_test_pred = lgbm.predict(X_test)
metric_val = metrics_calc(y_test, y_test_pred)
### Store in metrics :
# run_id, feat_name, feat_name_long, feat_type, model_params, metric_name, metric_val
# 3,roling_demand,Mean of the variable estimates,lag_features,params = {"objective" : "poisson","metric" :"rmse","force_row_wise" : True,"learning_rate" : 0.075,
"sub_row" : 0.75,"bagging_freq" : 1,"lambda_l2" : 0.1,"metric": ["rmse"],'verbosity': 1,'num_iterations' : 250,
},rmse,1.16548
df_metrics['run_id'] = time()
df_metrics['cols'].append( ",".join( cols_num + cols_cat ))
df_metrics['metrics_val'].append(metric_val)
def test_old():
from util_feat_m5 import lag_featrues
train_df = pd.read_csv("sales_train_val.csv")
calendar_df = pd.read_csv("calendar.csv")
price_df = pd.read_csv("sell_prices.csv")
sample = pd.read_csv("sample_submi.csv")
calendar_df["date_dt"] = pd.to_datetime(calendar_df["date"])
train = train_df.copy()
price = price_df.copy()
calendar = calendar_df.copy()
Train_data = train.iloc[:,:-56]
Val_data = train.iloc[:,:-28]
X_train = lag_featrues(Train_data).iloc[:,5:] # select variables
y_train = train.iloc[:,-56]
X_test = lag_featrues(Val_data).iloc[:,5:]
y_test = train.iloc[:,-28]
# Create instance
lgbm = lgb.LGBMRegressor()
# Training and score
learning_rate = [0.15, 0.2, 0.25]
max_depth = [15, 20, 25]
param_grid = {'learning_rate': learning_rate, 'max_depth': max_depth}
# Fitting
cv_lgbm = GridSearchCV(lgbm, param_grid, cv=10, n_jobs =1)
cv_lgbm.fit(X_train, y_train)
print("Best params:{}".format(cv_lgbm.best_params_))
# best params
best_lg = cv_lgbm.best_estimator_
# prediction
y_train_pred_lg = best_lg.predict(X_train)
y_test_pred_lg = best_lg.predict(X_test)
print("MSE train:{}".format(mean_squared_error(y_train, y_train_pred_lg)))
print("MSE test;{}".format(mean_squared_error(y_test, y_test_pred_lg)))
print("R2 score train:{}".format(r2_score(y_train, y_train_pred_lg)))
print("R2 score test:{}".format(r2_score(y_test, y_test_pred_lg)))
#Predict using only variables with an importance of 1 or higher.
importance = best_lg.feature_importances_
indices = np.argsort(importance)[::-1]
# print importance
importance_df = pd.DataFrame({})
columns = []
importance_ = []
for f in range(X_train.shape[1]):
print("%2d) %-*s %.2f" %(f+1, 30, X_train.columns[indices[f]], importance[indices[f]]))
col = X_train.columns[indices[f]]
imp = importance[indices[f]]
columns.append(col)
importance_.append(imp)
importance_df["col_name"] = columns
importance_df["importance"] = importance_
importance = best_lg.feature_importances_
indices = np.argsort(importance)[::-1]
# importance columns (>0)
imp_col = importance_df[importance_df["importance"]>0]["col_name"].values
# Train test split, select by imp_col
X_train = lag_featrues(Train_data).iloc[:,5:][imp_col] # select variables
y_train = train.iloc[:,-56]
X_test = lag_featrues(Val_data).iloc[:,5:][imp_col]
y_test = train.iloc[:,-28]
# Create instance
lgbm = lgb.LGBMRegressor()
# Training and score
learning_rate = [0.15, 0.2, 0.25]
max_depth = [15, 20, 25]
param_grid = {'learning_rate': learning_rate, 'max_depth': max_depth}
# Fitting
cv_lgbm = GridSearchCV(lgbm, param_grid, cv=10, n_jobs =1)
cv_lgbm.fit(X_train, y_train)
print("Best params:{}".format(cv_lgbm.best_params_))
# best params
best_lg = cv_lgbm.best_estimator_
# prediction
y_train_pred_lg = best_lg.predict(X_train)
y_test_pred_lg = best_lg.predict(X_test)
print("MSE train:{}".format(mean_squared_error(y_train, y_train_pred_lg)))
print("MSE test;{}".format(mean_squared_error(y_test, y_test_pred_lg)))
print("R2 score train:{}".format(r2_score(y_train, y_train_pred_lg)))
print("R2 score test:{}".format(r2_score(y_test, y_test_pred_lg)))
run_id=list(range(300))
df_metrics=pd.DataFrame(run_id,columns=['run_id'])
df_metrics['feat_name'] = pd.Series(X_train.columns[0:300], index=dataframe.index)
df_metrics['feat_type']=df_metrics.feat_name
df_metrics.replace({'feat_type': r'^lag_.*'}, {'feat_type': 'lag'}, regex=True,inplace=True)
df_metrics.replace({'feat_type': r'^rolling.*'}, {'feat_type': 'rolling'}, regex=True,inplace=True)
df_metrics['parameter'] = pd.Series(best_lg, index=dataframe.index)
df_metrics['metric_name'] ="MSE"
df_metrics['metric_val'] = pd.Series(pred_mse[:300], index=dataframe.index)
df_metrics.to_csv("train.csv")
"""
| 39.952199 | 718 | 0.661689 | from sklearn import preprocessing, metrics
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold, KFold, RepeatedKFold, GroupKFold, GridSearchCV, train_test_split, TimeSeriesSplit
from datetime import datetime
import copy
import os
import fire
import glob
import pdb
##### import all Feature engineering functions
from util_feat_m5 import *
def features_to_category(df, nan_cols, cat_cols):
nan_features = nan_cols
for feature in nan_features:
df[feature].fillna('unknown', inplace = True)
categorical_cols = cat_cols
for feature in categorical_cols:
encoder = preprocessing.LabelEncoder()
df[feature] = encoder.fit_transform(df[feature].astype(str))
return df
def update_meta_csv(featnames, filename, cat_cols):
meta_csv = pd.DataFrame(columns = ['featname', 'filename', 'feattype'])
if os.path.exists('meta_features.csv'):
meta_csv = pd.read_csv('meta_features.csv')
append_data_dict = {'featname' : [], 'filename' : [], 'feattype' : []}
for feat in featnames:
if feat not in meta_csv['featname'].unique():
append_data_dict['filename'].append(filename)
append_data_dict['featname'].append(feat)
feat_type = "numeric" if feat not in cat_cols else "categorical"
append_data_dict['feattype'].append(feat_type)
else:
meta_csv.loc[meta_csv['featname'] == feat, 'filename'] = filename
append_df = pd.DataFrame.from_dict(append_data_dict)
meta_csv = meta_csv.append(append_df)
meta_csv.to_csv('meta_features.csv', index = False)
def get_cat_num_features_from_meta_csv(id_cols, dep_col):
drop_cols = id_cols + [dep_col]
meta_csv = pd.read_csv('meta_features.csv')
num_feats = [ x for x in meta_csv[meta_csv["feattype"] == "numeric"]['featname'].tolist() if x not in drop_cols]
cat_feats = [ x for x in meta_csv[meta_csv["feattype"] == "categorical"]['featname'].tolist() if x not in drop_cols]
return cat_feats, num_feats
def get_file_feat_from_meta_csv(selected_cols, id_cols):
meta_csv = pd.read_csv('meta_features.csv')
file_feat_mapping = {k:id_cols for k in meta_csv['filename'].unique().tolist()}
for selected_col in selected_cols:
selected_col_meta_df = meta_csv[meta_csv["featname"] == selected_col]
file_feat_mapping[selected_col_meta_df['filename'].tolist()[0]].append(selected_col)
print(id_cols)
return {k:list(set(v)) for k,v in file_feat_mapping.items()}
def features_generate_file(dir_in, dir_out, my_fun_features, features_group_name, input_raw_path = None, auxiliary_csv_path = None, drop_cols = None, index_cols = None, merge_cols_mapping = None, cat_cols = None, id_cols=None, dep_col = None, max_rows = 5, step_wise_saving = False) :
# from util_feat_m5 import lag_featrues
# featurestore_generate_feature(dir_in, dir_out, lag_featrues)
merged_df = pd.read_parquet(dir_in + "/raw_merged.df.parquet")
dfnew, cat_cols= my_fun_features(merged_df, input_raw_path, dir_out, features_group_name, auxiliary_csv_path, drop_cols, index_cols, merge_cols_mapping, cat_cols, id_cols, dep_col, max_rows)
if not step_wise_saving:
dfnew.to_parquet(f'{dir_out}/{features_group_name}.parquet')
# num_cols = list(set(dfnew._get_numeric_data().columns))
update_meta_csv(dfnew.columns, f'{features_group_name}.parquet', cat_cols)
def feature_merge_df(df_list, cols_join):
print(cols_join)
dfall = None
for dfi in df_list :
print(dfi.columns)
cols_joini = [ t for t in cols_join if t in dfi.columns ]
dfall = dfall.join(dfi.set_index(cols_joini), on = cols_joini, how="left") if dfall is not None else dfi
return dfall
def raw_merged_df(input_path="data/", out_path='out/', index_cols = None, dep_col = None, raw_merge_cols = None, merge_cols_mapping = None, nan_cols = None, cat_cols = None, max_rows=10):
df_sales_train = pd.read_csv(input_path + "/sales_train_gen.csv")
df_calendar = pd.read_csv(input_path + "/calendar_gen.csv")
df_sales_val = pd.read_csv(input_path + "/sales_train_gen.csv")
df_sell_price = pd.read_csv(input_path + "/sell_prices_gen.csv")
# df_submi = pd.read_csv("data/sample_submi.csv")
if not max_rows == -1:
df_sales_val_melt = pd.melt(df_sales_val[0:max_rows], id_vars = index_cols, var_name = 'day', value_name = dep_col)
else:
df_sales_val_melt = pd.melt(df_sales_val, id_vars = index_cols, var_name = 'day', value_name = dep_col)
# val_rows = [row for row in df_submi['id'] if 'val' in row]
# eval_rows = [row for row in df_submi['id'] if 'eval' in row]
# df_submi_val = df_submi[df_submi['id'].isin(val_rows)][0:max_rows]
# df_submi_eval = df_submi[df_submi['id'].isin(eval_rows)][0:max_rows]
# df_submi_val = df_submi_val.merge(df_product, how = 'left', on = 'id')
# df_submi_eval = df_submi_eval.merge(df_product, how = 'left', on = 'id')
# df_submi_val = pd.melt(df_submi_val, id_vars = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'], var_name = 'day', value_name = 'demand')
# df_submi_eval = pd.melt(df_submi_eval, id_vars = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'], var_name = 'day', value_name = 'demand')
# df_sales_val_melt['part'] = 'train'
# df_submi_val['part'] = 'test1'
# df_submi_eval['part'] = 'test2'
# merged_df = pd.concat([df_sales_val_melt, df_submi_val, df_submi_eval], axis = 0)
merged_df = df_sales_val_melt
df_calendar.drop(['weekday', 'wday', 'month', 'year'], inplace = True, axis = 1)
merged_df = pd.merge(merged_df, df_calendar, how = 'left', left_on = [merge_cols_mapping["left"]], right_on = [merge_cols_mapping["right"]])
merged_df = merged_df.merge(df_sell_price, on = raw_merge_cols, how = 'left')
merged_df = features_to_category(merged_df, nan_cols = nan_cols, cat_cols = cat_cols)
# merged_df = add_time_features(merged_df)
os.makedirs(out_path, exist_ok=True)
fname = out_path + "/raw_merged.df.parquet"
merged_df.to_parquet(fname)
# return merged_df
def features_get_cols(mode = "random", id_cols = None, dep_col = None):
# categorical_cols = ['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id', 'event_name_1', 'event_type_1', 'event_name_2', 'event_type_2' ]
# numerical_cols = ['snap_TX', 'sell_price', 'week', 'snap_CA', 'month', 'snap_WI', 'dayofweek', 'year']
categorical_cols, numerical_cols = get_cat_num_features_from_meta_csv(id_cols = id_cols, dep_col =dep_col)
cols_cat = []
cols_num = []
if mode == "random":
cols_cat = [categorical_cols[i] for i in np.random.choice(len(categorical_cols), 3, replace = False)]
cols_num = [numerical_cols[i] for i in np.random.choice(len(numerical_cols), 5, replace = False) ]
if mode == "all":
cols_cat = categorical_cols
cols_num = numerical_cols
if mode == "smartway":
cols_cat = categorical_cols
cols_num = numerical_cols
# TODO: Need to update
return cols_cat, cols_num
def get_file_names_to_load_from(file_name, path):
file_name_ext_list = file_name.split(".")
flist = glob.glob( f'{path}/{file_name_ext_list[0]}' + "*")
return flist
def load_data(path, selected_cols, id_cols):
print("**********")
selected_cols = id_cols + selected_cols
print(id_cols)
file_col_mapping = get_file_feat_from_meta_csv(selected_cols = selected_cols, id_cols = id_cols[:])
# merged_df = pd.DataFrame()
# for file_name,file_cols in file_col_mapping.items():
# file_df = pd.read_parquet(path + file_name, columns = file_cols)
# merged_df = pd.concat([merged_df, file_df], axis = 0)
# for file_name,file_cols in file_col_mapping.items():
# print(file_name)
# print(file_cols)
# pd.read_parquet(f'{path}/{file_name}', columns = file_cols)
print(id_cols)
feature_dfs = []
for file_name,file_cols in file_col_mapping.items():
print(file_name)
print(file_cols)
file_name_feature_df = None
for x in get_file_names_to_load_from(file_name, path):
dfi = pd.read_parquet(f'{x}', columns = file_cols)
file_name_feature_df = pd.concat((file_name_feature_df, dfi))
feature_dfs.append(file_name_feature_df)
print(id_cols)
merged_df = feature_merge_df(feature_dfs, id_cols)
merged_df = merged_df.sort_values('date')
non_date_col = [x for x in id_cols if not x == "date"]
merged_df.drop(non_date_col, inplace = True, axis = 1)
return merged_df
def X_transform(df, selected_cols):
X_df = df[ selected_cols ] #.drop(['demand', 'date'], axis =1)
return X_df
def Y_transform(df, selected_col):
Y_df= df[selected_col]
return Y_df
def run_eval(input_path, max_rows = None, n_experiments = 3, id_cols = None, dep_col = None):
model_params = {'num_leaves': 555,
'min_child_weight' : 0.034,
'feature_fraction' : 0.379,
'bagging_fraction' : 0.418,
'min_data_in_leaf' : 106,
'objective' : 'regression',
'max_depth' : -1,
'learning_rate' : 0.005,
"boosting_type" : "gbdt",
"bagging_seed" : 11,
"metric" : 'rmse',
"verbosity" : -1,
'reg_alpha' : 0.3899,
'reg_lambda' : 0.648,
'random_state' : 222,
}
print("hello eval")
dict_metrics = {'run_id' : [], 'cols' : [], 'metric_name': [], 'model_params': [], 'metrics_val' : []}
for ii in range(n_experiments):
cols_cat, cols_num = features_get_cols(id_cols = id_cols, dep_col = dep_col)
df = load_data(input_path, cols_cat + cols_num + [dep_col], id_cols)
# df_output = featurestore_get_feature_fromcolname('data/output', cols_cat + cols_num, 'test1')
X = X_transform(df, cols_cat + cols_num)
y = Y_transform(df, 'demand')
# X_output = X_transform(df_output, cols_cat + cols_num)
# Y_test = Y_transform(df_test, 'demand')
# preparing split
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Y_test = np.zeros(X_test.shape[0])
dtrain = lgb.Dataset(X_train, label=Y_train)
dtest = lgb.Dataset(X_test, label=Y_test)
clf = lgb.train(model_params, dtrain, 2500,
valid_sets = [dtrain, dtest],
early_stopping_rounds = 50, verbose_eval=100)
Y_test_pred = clf.predict(X_test,num_iteration=clf.best_iteration)
val_score = np.sqrt(metrics.mean_squared_error(Y_test_pred, Y_test))
#print(f'val rmse score is {val_score}')
# Y_test += clf.predict(X_test, num_iteration=clf.best_iteration)/n_fold
dict_metrics['run_id'].append(datetime.now())
dict_metrics['cols'].append(";".join(X_train.columns.tolist()))
dict_metrics['model_params'].append(model_params)
dict_metrics['metric_name'].append('rmse')
dict_metrics['metrics_val'].append(val_score)
df_metrics = pd.DataFrame.from_dict(dict_metrics)
print(" DF metrics ")
print(df_metrics)
df_metrics.to_csv("df_metrics.csv")
def generate_feature_all( input_path = "data/output", out_path="." , input_raw_path = ".", auxiliary_csv_path = None, drop_cols = None, index_cols = None, merge_cols_mapping = None, cat_cols = None, id_cols = None, dep_col = None, max_rows = 10):
features_generate_file(".", input_path, features_time_basic, "basic_time", id_cols = id_cols)
features_generate_file(".", input_path, features_rolling, "rolling", dep_col = dep_col, id_cols = id_cols)
features_generate_file(".", input_path, features_lag, "lag", dep_col = dep_col, id_cols = id_cols)
features_generate_file(".", input_path, features_tsfresh, "tsfresh", input_raw_path, auxiliary_csv_path, drop_cols, index_cols, merge_cols_mapping, max_rows, step_wise_saving = True, id_cols = id_cols)
features_generate_file(".", input_path, identity_features, "identity", cat_cols = cat_cols, drop_cols = ['d', 'id', 'day', 'wm_yr_wk'])
print("hello")
def main( input_path = "data/output", out_path="",
do_generate_raw=True, do_generate_feature=True,
max_rows = 10):
# create_and_save_features(100, ["set1", "set2"])
#train(100)
# To be run once
if do_generate_raw :
raw_merged_df(input_path= input_path, out_path=out_path, max_rows = max_rows, index_cols =
[ 'id', 'cat_id_col', 'dept_id_col', 'store_id_col', 'item_id_col', 'state_id_col'], dep_col = "demand",
raw_merge_cols = ['store_id_col', 'item_id_col', 'wm_yr_wk'], merge_cols_mapping = {"left" : "day", "right" : "d"},
nan_cols = ['event_name_1_col', 'event_type_1_col', 'event_name_2_col', 'event_type_2_col'],
cat_cols = ['dept_id_col', 'cat_id_col', 'store_id_col', 'state_id_col', 'event_name_1_col', 'event_type_1_col', 'event_name_2_col', 'event_type_2_col'])
if do_generate_feature :
generate_feature_all(input_path="data/output", out_path="", input_raw_path = input_path + "/sales_train_gen.csv", auxiliary_csv_path = input_path + "/calendar_gen.csv", drop_cols = [ 'id', 'cat_id_col', 'dept_id_col', 'store_id_col', 'variable', 'day', 'demand', 'state_id_col', 'weekday', 'wday', 'month', 'year'], index_cols = [ 'id', 'cat_id_col', 'dept_id_col', 'store_id_col', 'item_id_col', 'state_id_col'], merge_cols_mapping = {"left" : "day", "right" : "d"}, cat_cols = ['item_id_col', 'dept_id_col', 'cat_id_col', 'store_id_col', 'state_id_col', 'event_name_1_col', 'event_type_1_col', 'event_name_2_col', 'event_type_2_col'], id_cols = ["date", "item_id_col"], dep_col = "demand", max_rows = max_rows)
run_eval(input_path="data/output", id_cols = ["date", "item_id_col"], dep_col = "demand")
if __name__ == "__main__":
import fire
fire.Fire()
"""
import util_feat_m5
# df_meta= col_name, col_type, file_path
def featurestore_generate_feature(dir_in, dir_out, my_fun_features) :
# from util_feat_m5 import lag_featrues
# featurestore_generate_feature(dir_in, dir_out, lag_featrues)
train_df = pd.read_csv( dir_in + "/sales_train_val.csv.zip")
calendar_df = pd.read_csv(dir_in + "/calendar.csv")
price_df = pd.read_csv(dir_in + "/sell_prices.csv")
dfnew = my_fun_features(train_df, calendar_df, price_df) :
dfnew.to_parquet( dir_out +"/mfeaturesXXXX.parquet")
def featurestore_filter_features(mode="random") :
cols_cat0 = [ "feat1", "fewat2" ]
if mode == "random" :
### Random selection
cols_cat = cols_cat0[ np.random.c = hoice( 5, len(cols_cat) ) ]
cols_num = cols_num0[ np.random.c = hoice( 5, len(cols_num) ) ]
return cols_cat, col_num
if mode == "all" :
pass
if mode == "smartway" :
pass
def train(model, pars={} ) :
data_pars = {}
model_pars = {}
for ii in range(n_experiments) :
cols_cat, cols_num = featurestore_filter_features()
df = featurestore_get_feature_fromcolname(path, cols_cat + cols_num, "train")
dftest = featurestore_get_feature_fromcolname(path, cols_cat + cols_num, 'test')
X_train = X_transform( df, cols_num, cols_cat, pars) # select sri
y_train = y_transform(df, coly)
X_test = X_transform( dftest, cols_num, cols_cat, pars) # select variables
y_test = y_transform(dftest, coly)
lgbm = lgb.LGBMRegressor()
lgbm.fit( X_train, y_train)
# prediction + metrics
y_test_pred = lgbm.predict(X_test)
metric_val = metrics_calc(y_test, y_test_pred)
### Store in metrics :
# run_id, feat_name, feat_name_long, feat_type, model_params, metric_name, metric_val
# 3,roling_demand,Mean of the variable estimates,lag_features,params = {"objective" : "poisson","metric" :"rmse","force_row_wise" : True,"learning_rate" : 0.075,
"sub_row" : 0.75,"bagging_freq" : 1,"lambda_l2" : 0.1,"metric": ["rmse"],'verbosity': 1,'num_iterations' : 250,
},rmse,1.16548
df_metrics['run_id'] = time()
df_metrics['cols'].append( ",".join( cols_num + cols_cat ))
df_metrics['metrics_val'].append(metric_val)
def test_old():
from util_feat_m5 import lag_featrues
train_df = pd.read_csv("sales_train_val.csv")
calendar_df = pd.read_csv("calendar.csv")
price_df = pd.read_csv("sell_prices.csv")
sample = pd.read_csv("sample_submi.csv")
calendar_df["date_dt"] = pd.to_datetime(calendar_df["date"])
train = train_df.copy()
price = price_df.copy()
calendar = calendar_df.copy()
Train_data = train.iloc[:,:-56]
Val_data = train.iloc[:,:-28]
X_train = lag_featrues(Train_data).iloc[:,5:] # select variables
y_train = train.iloc[:,-56]
X_test = lag_featrues(Val_data).iloc[:,5:]
y_test = train.iloc[:,-28]
# Create instance
lgbm = lgb.LGBMRegressor()
# Training and score
learning_rate = [0.15, 0.2, 0.25]
max_depth = [15, 20, 25]
param_grid = {'learning_rate': learning_rate, 'max_depth': max_depth}
# Fitting
cv_lgbm = GridSearchCV(lgbm, param_grid, cv=10, n_jobs =1)
cv_lgbm.fit(X_train, y_train)
print("Best params:{}".format(cv_lgbm.best_params_))
# best params
best_lg = cv_lgbm.best_estimator_
# prediction
y_train_pred_lg = best_lg.predict(X_train)
y_test_pred_lg = best_lg.predict(X_test)
print("MSE train:{}".format(mean_squared_error(y_train, y_train_pred_lg)))
print("MSE test;{}".format(mean_squared_error(y_test, y_test_pred_lg)))
print("R2 score train:{}".format(r2_score(y_train, y_train_pred_lg)))
print("R2 score test:{}".format(r2_score(y_test, y_test_pred_lg)))
#Predict using only variables with an importance of 1 or higher.
importance = best_lg.feature_importances_
indices = np.argsort(importance)[::-1]
# print importance
importance_df = pd.DataFrame({})
columns = []
importance_ = []
for f in range(X_train.shape[1]):
print("%2d) %-*s %.2f" %(f+1, 30, X_train.columns[indices[f]], importance[indices[f]]))
col = X_train.columns[indices[f]]
imp = importance[indices[f]]
columns.append(col)
importance_.append(imp)
importance_df["col_name"] = columns
importance_df["importance"] = importance_
importance = best_lg.feature_importances_
indices = np.argsort(importance)[::-1]
# importance columns (>0)
imp_col = importance_df[importance_df["importance"]>0]["col_name"].values
# Train test split, select by imp_col
X_train = lag_featrues(Train_data).iloc[:,5:][imp_col] # select variables
y_train = train.iloc[:,-56]
X_test = lag_featrues(Val_data).iloc[:,5:][imp_col]
y_test = train.iloc[:,-28]
# Create instance
lgbm = lgb.LGBMRegressor()
# Training and score
learning_rate = [0.15, 0.2, 0.25]
max_depth = [15, 20, 25]
param_grid = {'learning_rate': learning_rate, 'max_depth': max_depth}
# Fitting
cv_lgbm = GridSearchCV(lgbm, param_grid, cv=10, n_jobs =1)
cv_lgbm.fit(X_train, y_train)
print("Best params:{}".format(cv_lgbm.best_params_))
# best params
best_lg = cv_lgbm.best_estimator_
# prediction
y_train_pred_lg = best_lg.predict(X_train)
y_test_pred_lg = best_lg.predict(X_test)
print("MSE train:{}".format(mean_squared_error(y_train, y_train_pred_lg)))
print("MSE test;{}".format(mean_squared_error(y_test, y_test_pred_lg)))
print("R2 score train:{}".format(r2_score(y_train, y_train_pred_lg)))
print("R2 score test:{}".format(r2_score(y_test, y_test_pred_lg)))
run_id=list(range(300))
df_metrics=pd.DataFrame(run_id,columns=['run_id'])
df_metrics['feat_name'] = pd.Series(X_train.columns[0:300], index=dataframe.index)
df_metrics['feat_type']=df_metrics.feat_name
df_metrics.replace({'feat_type': r'^lag_.*'}, {'feat_type': 'lag'}, regex=True,inplace=True)
df_metrics.replace({'feat_type': r'^rolling.*'}, {'feat_type': 'rolling'}, regex=True,inplace=True)
df_metrics['parameter'] = pd.Series(best_lg, index=dataframe.index)
df_metrics['metric_name'] ="MSE"
df_metrics['metric_val'] = pd.Series(pred_mse[:300], index=dataframe.index)
df_metrics.to_csv("train.csv")
"""
| 13,610 | 0 | 345 |
b2076d58fa1b23dc908d072aad7a4364d1c6f1a0 | 42,813 | py | Python | python/mxnet/ndarray.py | Abusnina/mxnet | 7f8d94a24bf64fe0f24712a7952a09725c2df9bd | [
"Apache-2.0"
] | 6 | 2017-06-09T02:32:10.000Z | 2020-03-18T03:17:00.000Z | python/mxnet/ndarray.py | Abusnina/mxnet | 7f8d94a24bf64fe0f24712a7952a09725c2df9bd | [
"Apache-2.0"
] | null | null | null | python/mxnet/ndarray.py | Abusnina/mxnet | 7f8d94a24bf64fe0f24712a7952a09725c2df9bd | [
"Apache-2.0"
] | 6 | 2017-06-27T06:52:40.000Z | 2019-11-04T14:34:25.000Z | # coding: utf-8
# pylint: disable= too-many-lines, redefined-builtin, protected-access
"""NDArray API of mxnet."""
from __future__ import absolute_import
from __future__ import division
import ctypes
import warnings
import sys
import functools
import operator
import numpy as np
from .base import _LIB, string_types, numeric_types
from .base import c_array, mx_float, py_str, c_str, mx_real_t
from .base import mx_uint, NDArrayHandle, FunctionHandle
from .base import ctypes2buffer
from .base import check_call, ctypes2docstring
from .context import Context
from . import _ndarray_internal as _internal
# pylint: disable= no-member
_DTYPE_NP_TO_MX = {
np.float32 : 0,
np.float64 : 1,
np.float16 : 2,
np.uint8 : 3,
np.int32 : 4
}
_DTYPE_MX_TO_NP = {
0 : np.float32,
1 : np.float64,
2 : np.float16,
3 : np.uint8,
4 : np.int32
}
# pylint: enable= no-member
def _new_empty_handle():
"""Return a new empty handle.
Empty handle can be used to hold result
Returns
-------
a new empty ndarray handle
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results
Returns
-------
a new empty ndarray handle
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def waitall():
"""Wait all async operation to finish in MXNet
This function is used for benchmark only
"""
check_call(_LIB.MXNDArrayWaitAll())
class NDArray(object):
"""NDArray object in mxnet.
NDArray is basic ndarray/Tensor like data structure in mxnet.
"""
# pylint: disable= no-member
def __init__(self, handle, writable=True):
"""initialize a new NDArray
Parameters
----------
handle : NDArrayHandle
NDArray handle of C API
"""
assert isinstance(handle, NDArrayHandle)
self.handle = handle
self.writable = writable
def __setitem__(self, in_slice, value):
"""Set ndarray value.
`value` can be a scalar, an `NDArray` or numpy array of compatible shape.
The following modes are supported:
- `array[:] = value`: set all the contents
- `array[i] = value`: set the i-th slice. If the array is of dimension
`(d1, d2, d3)`, it sets value of a slice of shape `(1, d2, d3)`.
- `array[i:j] = value`: similarly, if the array is of dimension
`(d1, d2, d3)`, it sets value of a slice of shape `(j-i, d2, d3)`.
Fully-dimensional indexing is also supported. For example, if array is
of shape `(d1, d2, d3)`, one can do
- `array[:, :, :] = value`: achieving the same effect of `array[:] = value`
- `array[:, i, j:k] = value`: each index could be a python slice or an int.
"""
# pylint: disable=too-many-branches
if not self.writable:
raise ValueError('trying to assign to a readonly NDArray')
if isinstance(in_slice, int):
sliced_arr = self._at(in_slice)
sliced_arr[:] = value
return
if isinstance(in_slice, slice):
if in_slice.step is not None:
raise ValueError('NDArray only support continuous slicing on axis 0')
if in_slice.start is not None or in_slice.stop is not None:
sliced_arr = self._slice(in_slice.start, in_slice.stop)
sliced_arr[:] = value
return
if isinstance(value, NDArray):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._set_value(float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
self._sync_copyfrom(value)
else:
raise TypeError('type %s not supported' % str(type(value)))
if isinstance(in_slice, tuple):
# multi-dimension indexing
my_shape = self.shape
assert len(in_slice) == len(my_shape)
for slice_i in in_slice:
assert isinstance(slice_i, (slice, int))
begin = [0 for _ in my_shape]
end = [x for x in my_shape]
for i, slice_i in enumerate(in_slice):
if isinstance(slice_i, int):
assert slice_i < my_shape[i]
begin[i] = slice_i
end[i] = slice_i + 1
if isinstance(slice_i, slice):
# only support continuous slicing
assert slice_i.step is None
begin[i] = slice_i.start or 0
end[i] = slice_i.stop or my_shape[i]
assert begin[i] < end[i]
assert end[i] <= my_shape[i]
begin = tuple(begin)
end = tuple(end)
if isinstance(value, NDArray):
value = value.as_in_context(self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
elif isinstance(value, numeric_types):
_internal._crop_assign_scalar(self, out=self,
begin=begin, end=end,
scalar=value)
elif isinstance(value, (np.ndarray, np.generic)):
value = array(value, ctx=self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
else:
raise TypeError('type %s not supported' % str(type(value)))
# pylint: enable=too-many-branches
def __getitem__(self, in_slice):
"""Get ndarray"""
if isinstance(in_slice, int):
return self._at(in_slice)
if not isinstance(in_slice, slice) or in_slice.step is not None:
raise ValueError('NDArray only support continuous slicing on axis 0')
if in_slice.start is not None or in_slice.stop is not None:
return self._slice(in_slice.start, in_slice.stop)
else:
return self
def _sync_copyfrom(self, source_array):
"""Peform an synchronize copy from the array.
Parameters
----------
source_array : array_like
The data source we should like to copy from.
"""
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=self.dtype)
except:
raise TypeError('array must be an array_like data,' +
'type %s is not supported' % str(type(array)))
source_array = np.ascontiguousarray(source_array, dtype=self.dtype)
if source_array.shape != self.shape:
raise ValueError('Shape inconsistant: expected %s vs got %s'%(
str(self.shape), str(source_array.shape)))
check_call(_LIB.MXNDArraySyncCopyFromCPU(
self.handle,
source_array.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(source_array.size)))
def _slice(self, start, stop):
"""Return a sliced NDArray that shares memory with current one.
Parameters
----------
start : int
Starting index of slice.
stop : int
Finishing index of slice.
"""
handle = NDArrayHandle()
start = mx_uint(start) if start else mx_uint(0)
stop = mx_uint(stop) if stop else mx_uint(self.shape[0])
check_call(_LIB.MXNDArraySlice(
self.handle, start, stop, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def _at(self, idx):
"""Return a sub NDArray that shares memory with current one.
Parameters
----------
idx : int
index of sub array.
"""
handle = NDArrayHandle()
idx = mx_uint(idx)
check_call(_LIB.MXNDArrayAt(
self.handle, idx, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def reshape(self, new_shape):
"""Return a reshaped NDArray that shares memory with current one.
Parameters
----------
new_shape : iterable of int
new shape of NDArray
"""
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayReshape(self.handle,
len(new_shape),
c_array(ctypes.c_int, new_shape),
ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
# pylint: disable= undefined-variable
def broadcast_to(self, shape):
""" Broadcasting the current NDArray into the given shape. The semantics is
the same with `numpy`'s broadcasting
Parameters
---------
shape : the shape to broadcast
the broadcast shape
"""
cur_shape = self.shape
err_str = 'operands could not be broadcast together with remapped shapes' \
'[original->remapped]: {} and requested shape {}'.format(cur_shape, shape)
if len(shape) < len(cur_shape):
raise ValueError(err_str)
cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape
cur_shape_arr = np.array(cur_shape)
broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape))
if (cur_shape_arr[broadcasting_axes] != 1).any():
raise ValueError(err_str)
if cur_shape != self.shape:
return broadcast_to(self.reshape(cur_shape), shape=shape)
else:
return broadcast_to(self, shape=tuple(shape))
# pylint: enable= undefined-variable
def wait_to_read(self):
"""Block until all pending writes operations on current NDArray are finished.
This function will return when all the pending writes to the current
NDArray finishes. There can still be pending read going on when the
function returns.
"""
check_call(_LIB.MXNDArrayWaitToRead(self.handle))
@property
def shape(self):
"""Get shape of current NDArray.
Returns
-------
a tuple representing shape of current ndarray
"""
ndim = mx_uint()
pdata = ctypes.POINTER(mx_uint)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
return tuple(pdata[:ndim.value])
@property
def size(self):
"""Get size of current NDArray.
Returns
-------
an int representing size of current ndarray
"""
return np.prod(self.shape)
@property
def context(self):
"""Get context of current NDArray.
Returns
-------
context : mxnet.Context
The context of current NDArray.
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def dtype(self):
"""Get data type of current NDArray.
Returns
-------
an numpy.dtype object representing type of current ndarray
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value]
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Get transpose of current NDArray"""
if len(self.shape) != 2:
raise ValueError('Only 2D matrix is allowed to be transposed')
return transpose(self)
# pylint: enable= invalid-name, undefined-variable
def asnumpy(self):
"""Return a copied numpy array of current array.
Returns
-------
array : numpy.ndarray
A copy of array content.
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data
def asscalar(self):
"""Return a CPU scalar(float) of current ndarray.
This ndarray must have shape (1,)
Returns
-------
scalar : np.float
The scalar representation of the ndarray.
"""
if self.shape != (1,):
raise ValueError("The current array is not a scalar")
return self.asnumpy()[0]
def astype(self, dtype):
"""Return a copied numpy array of current array with specified type.
Parameters
----------
dtype : numpy.dtype or string
Desired type of result array.
Returns
-------
array : numpy.ndarray
A copy of array content.
"""
res = empty(self.shape, ctx=self.context, dtype=dtype)
self.copyto(res)
return res
def copyto(self, other):
"""Copy the content of current array to other.
When other is NDArray, the content is copied over.
When other is a Context, a new NDArray in the context
will be created as target
Parameters
----------
other : NDArray or Context
Target NDArray or context we want to copy data to.
Returns
-------
dst : NDArray
The copy target NDArray
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('copy an array to itself, is it intended?',
RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto do not support type ' + str(type(other)))
def copy(self):
"""Make a copy of the current ndarray on the same context
Return
------
cpy : NDArray
The copy
"""
return self.copyto(self.context)
# pylint: enable= no-member
def as_in_context(self, context):
"""Return an `NDArray` that lives in the target context. If the array
is already in that context, `self` is returned. Otherwise, a copy is
made.
Parameters
----------
context : Context
The target context we want the return value to live in.
Returns
-------
A copy or `self` as an `NDArray` that lives in the target context.
"""
if self.context == context:
return self
return self.copyto(context)
def onehot_encode(indices, out):
"""One hot encoding indices into matrix out.
Parameters
----------
indices: NDArray
An NDArray containing indices of the categorical features.
out: NDArray
The result holder of the encoding.
Returns
-------
out: Array
Same as out.
"""
# pylint: disable= no-member, protected-access
return _internal._onehot_encode(indices, out, out=out)
# pylint: enable= no-member, protected-access
def empty(shape, ctx=None, dtype=mx_real_t):
"""Create an empty uninitialized new NDArray, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
return NDArray(handle=_new_alloc_handle(shape, ctx, False, dtype))
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation
The function will perform numpy-like broadcasting if needed and call different functions
Parameters
----------
lhs : NDArray or numeric value
left hande side operand
rhs : NDArray or numeric value
right hand side operand
fn_array : function
function to be called if both lhs and rhs are of NDArray type
fn_scalar : function
function to be called if both lhs and rhs are numeric values
lfn_scalar : function
function to be called if lhs is NDArray while rhs is numeric value
rfn_scalar : function
function to be called if lhs is numeric value while rhs is NDArray;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
-------
out: NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
# check whether broadcasting is needed
lsize = functools.reduce(operator.mul, lhs.shape)
rsize = functools.reduce(operator.mul, rhs.shape)
if lsize < rsize:
lhs = lhs.broadcast_to(rhs.shape)
elif lsize > rsize:
rhs = rhs.broadcast_to(lhs.shape)
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs)))
#pylint: enable= too-many-arguments, no-member, protected-access
def add(lhs, rhs):
""" Perform element-wise addition
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._plus,
operator.add,
_internal._plus_scalar,
None)
# pylint: enable= no-member, protected-access
def subtract(lhs, rhs):
""" Perform element-wise subtract
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._minus,
operator.sub,
_internal._minus_scalar,
_internal._rminus_scalar)
# pylint: enable= no-member, protected-access
def multiply(lhs, rhs):
""" Perform element-wise multiplication
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._mul,
operator.mul,
_internal._mul_scalar,
None)
# pylint: enable= no-member, protected-access
def divide(lhs, rhs):
""" Perform element-wise divide
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._div,
operator.truediv,
_internal._div_scalar,
_internal._rdiv_scalar)
# pylint: enable= no-member, protected-access
def power(lhs, rhs):
""" Perform power operator
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar)
# pylint: enable= no-member, protected-access
def maximum(lhs, rhs):
""" Perform maximum operator
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._maximum,
lambda x, y: x if x > y else y,
_internal._maximum_scalar,
None)
# pylint: enable= no-member, protected-access
def minimum(lhs, rhs):
""" Perform minimum operator
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._minimum,
lambda x, y: x if x < y else y,
_internal._minimum_scalar,
None)
# pylint: enable= no-member, protected-access
def true_divide(lhs, rhs):
""" Same as numpy's true_divide. It adjusts the output type to present the best answer,
regardless of input types.
"""
return divide(lhs, rhs)
def negative(arr):
""" Return the negation of array values """
return multiply(arr, -1.0)
def zeros(shape, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with 0, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
ctx : Context, optional.
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
arr = empty(shape, ctx, dtype)
arr[:] = 0.0
return arr
def ones(shape, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with 1, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
arr = empty(shape, ctx, dtype)
arr[:] = 1.0
return arr
def full(shape, val, ctx=None):
"""Create a new NDArray filled with given value, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
val : float
value to be filled with.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
arr = empty(shape, ctx)
arr[:] = val
return arr
def array(source_array, ctx=None, dtype=mx_real_t):
"""Create a new NDArray that copies content from source_array.
Parameters
----------
source_array : array_like
Source data to create NDArray from.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('source_array must be array like object')
arr = empty(source_array.shape, ctx, dtype)
arr[:] = source_array
return arr
def concatenate(arrays, axis=0, always_copy=True):
"""Concatenate a list of NDArrays along the first dimension.
Parameters
----------
arrays : list of NDArray
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
An `NDArray` that lives on the same context as `arrays[0].context`.
"""
assert isinstance(arrays, list)
assert len(arrays) > 0
assert isinstance(arrays[0], NDArray)
if not always_copy and len(arrays) == 1:
return arrays[0]
shape_axis = arrays[0].shape[axis]
shape_rest1 = arrays[0].shape[0:axis]
shape_rest2 = arrays[0].shape[axis+1:]
dtype = arrays[0].dtype
for arr in arrays[1:]:
shape_axis += arr.shape[axis]
assert shape_rest1 == arr.shape[0:axis]
assert shape_rest2 == arr.shape[axis+1:]
assert dtype == arr.dtype
ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)
idx = 0
begin = [0 for _ in ret_shape]
end = list(ret_shape)
for arr in arrays:
if axis == 0:
ret[idx:idx+arr.shape[0]] = arr
else:
begin[axis] = idx
end[axis] = idx+arr.shape[axis]
# pylint: disable=no-member,protected-access
_internal._crop_assign(ret, arr, out=ret,
begin=tuple(begin),
end=tuple(end))
# pylint: enable=no-member,protected-access
idx += arr.shape[axis]
return ret
def load(fname):
"""Load ndarray from binary file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
Parameters
----------
fname : str
The name of the file.Can be S3 or HDFS address (remember built with S3 support).
Example of fname:
- `s3://my-bucket/path/my-s3-ndarray`
- `hdfs://my-bucket/path/my-hdfs-ndarray`
- `/path-to/my-local-ndarray`
Returns
-------
out : list of NDArray or dict of str to NDArray
List of NDArray or dict of str->NDArray, depending on what was saved.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [NDArray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), NDArray(NDArrayHandle(handles[i]))) for i in range(out_size.value))
def save(fname, data):
"""Save list of NDArray or dict of str->NDArray to binary file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
Parameters
----------
fname : str
The name of the file.Can be S3 or HDFS address (remember built with S3 support).
Example of fname:
- `s3://my-bucket/path/my-s3-ndarray`
- `hdfs://my-bucket/path/my-hdfs-ndarray`
- `/path-to/my-local-ndarray`
data : list of NDArray or dict of str to NDArray
The data to be saved.
"""
handles = []
if isinstance(data, dict):
keys = []
for key, val in data.items():
if not isinstance(key, string_types):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys.append(c_str(key))
handles.append(val.handle)
keys = c_array(ctypes.c_char_p, keys)
else:
for val in data:
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
handles.append(val.handle)
keys = None
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
c_array(NDArrayHandle, handles),
keys))
def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None):
"""Decode an image from string. Requires OpenCV to work.
Parameters
----------
str_img : str
binary image data
clip_rect : iterable of 4 int
clip decoded image to rectangle (x0, y0, x1, y1)
out : NDArray
output buffer. can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w)
index : int
output decoded image to i-th slice of 4 dimensional buffer
channels : int
number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
subtract mean from decode image before outputing.
"""
# pylint: disable= no-member, protected-access, too-many-arguments
if mean is None:
mean = NDArray(_new_empty_handle())
if out is None:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img)
else:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img,
out=out)
# pylint: disable=too-many-locals, invalid-name
def _make_ndarray_function(handle):
"""Create a NDArray function from the FunctionHandle."""
NDARRAY_ARG_BEFORE_SCALAR = 1
ACCEPT_EMPTY_MUTATE_TARGET = 1 << 2
# Get the property of NDArray
n_used_vars = mx_uint()
n_scalars = mx_uint()
n_mutate_vars = mx_uint()
type_mask = ctypes.c_int()
check_call(_LIB.MXFuncDescribe(
handle,
ctypes.byref(n_used_vars),
ctypes.byref(n_scalars),
ctypes.byref(n_mutate_vars),
ctypes.byref(type_mask)))
n_mutate_vars = n_mutate_vars.value
n_used_vars = n_used_vars.value
n_scalars = n_scalars.value
type_mask = type_mask.value
accept_empty_mutate = (type_mask & ACCEPT_EMPTY_MUTATE_TARGET) != 0
# infer type of the function
if (type_mask & NDARRAY_ARG_BEFORE_SCALAR) != 0:
use_vars_range = range(0, n_used_vars)
scalar_range = range(n_used_vars, n_used_vars + n_scalars)
else:
scalar_range = range(0, n_scalars)
use_vars_range = range(n_scalars, n_used_vars + n_scalars)
# Get the information from the function
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXFuncGetInfo(
handle, ctypes.byref(name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(ret_type)))
func_name = py_str(name.value)
param_str = ctypes2docstring(num_args, arg_names, arg_types, arg_descs)
doc_str = ('%s\n\n' +
'%s\n' +
'out : NDArray, optional\n' +
' The output NDArray to hold the result.\n\n'+
'Returns\n' +
'-------\n' +
'out : NDArray\n'+
' The output of binary function.')
doc_str = doc_str % (py_str(desc.value), param_str)
# Definition of internal functions.
def binary_ndarray_function(lhs, rhs, out=None, **kwargs):
"""Internal binary function
"""
if out:
if not isinstance(out, NDArray):
raise TypeError('out must be NDArray')
if not out.writable:
raise TypeError('out must be writable')
else:
if not accept_empty_mutate:
raise TypeError('argument out is required to call %s' % func_name)
out = NDArray(_new_empty_handle())
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, (lhs.handle, rhs.handle)), \
c_array(mx_float, ()), \
c_array(NDArrayHandle, (out.handle,)), \
ctypes.c_int(len(kwargs)), \
c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()]), \
c_array(ctypes.c_char_p, [c_str(str(i)) for i in kwargs.values()])))
return out
def unary_ndarray_function(src, out=None, *args, **kwargs):
"""internal NDArray function"""
if out:
if not isinstance(out, NDArray):
raise TypeError('out must be NDArray')
if not out.writable:
raise TypeError('out must be writable')
else:
if not accept_empty_mutate:
raise TypeError('argument out is required to call %s' % func_name)
out = NDArray(_new_empty_handle())
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, (src.handle,)), \
c_array(mx_float, [args[i] for i in scalar_range]), \
c_array(NDArrayHandle, (out.handle,)), \
ctypes.c_int(len(kwargs)), \
c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()]), \
c_array(ctypes.c_char_p, [c_str(str(i)) for i in kwargs.values()])))
return out
def generic_ndarray_function(*args, **kwargs):
"""Invoke this function by passing in parameters
Parameters
----------
*args
Positional arguments of input scalars and NDArray
out : NDArray or tuple of NDArray, optional
Output NDArray, used to hold the output result.
Returns
-------
out : NDArray
The result NDArray(tuple) of result of computation.
"""
if 'out' in kwargs:
mutate_vars = kwargs['out']
if isinstance(mutate_vars, NDArray):
mutate_vars = (mutate_vars,)
if len(mutate_vars) != n_mutate_vars:
raise TypeError('expect %d out in %s', n_mutate_vars, func_name)
del kwargs['out']
else:
if accept_empty_mutate:
mutate_vars = tuple(
NDArray(_new_empty_handle()) for i in range(n_mutate_vars))
else:
raise TypeError('argument out is required to call %s' % func_name)
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, [args[i].handle for i in use_vars_range]), \
c_array(mx_float, [args[i] for i in scalar_range]), \
c_array(NDArrayHandle, [v.handle for v in mutate_vars]), \
ctypes.c_int(len(kwargs)), \
c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()]), \
c_array(ctypes.c_char_p, [c_str(str(i)) for i in kwargs.values()])))
if n_mutate_vars == 1:
return mutate_vars[0]
else:
return mutate_vars
# End of function declaration
if n_mutate_vars == 1 and n_used_vars == 2 and n_scalars == 0:
ret_function = binary_ndarray_function
elif n_mutate_vars == 1 and n_used_vars == 1 and n_scalars == 0:
ret_function = unary_ndarray_function
else:
ret_function = generic_ndarray_function
ret_function.__name__ = func_name
ret_function.__doc__ = doc_str
return ret_function
# pylint: enable=too-many-locals, invalid-name
def _init_ndarray_module():
"""List and add all the ndarray functions to current module."""
plist = ctypes.POINTER(FunctionHandle)()
size = ctypes.c_uint()
check_call(_LIB.MXListFunctions(ctypes.byref(size),
ctypes.byref(plist)))
module_obj = sys.modules[__name__]
module_internal = sys.modules["mxnet._ndarray_internal"]
for i in range(size.value):
hdl = FunctionHandle(plist[i])
function = _make_ndarray_function(hdl)
# if function name starts with underscore, register as internal namespace
if function.__name__.startswith('_'):
setattr(module_internal, function.__name__, function)
else:
fname = function.__name__
fn_obj = getattr(module_obj, fname, None)
if fn_obj is None:
setattr(module_obj, fname, function)
else:
setattr(module_obj, fname + '_internal', function)
# Initialize the NDArray module
_init_ndarray_module()
| 32.731651 | 99 | 0.585687 | # coding: utf-8
# pylint: disable= too-many-lines, redefined-builtin, protected-access
"""NDArray API of mxnet."""
from __future__ import absolute_import
from __future__ import division
import ctypes
import warnings
import sys
import functools
import operator
import numpy as np
from .base import _LIB, string_types, numeric_types
from .base import c_array, mx_float, py_str, c_str, mx_real_t
from .base import mx_uint, NDArrayHandle, FunctionHandle
from .base import ctypes2buffer
from .base import check_call, ctypes2docstring
from .context import Context
from . import _ndarray_internal as _internal
# pylint: disable= no-member
_DTYPE_NP_TO_MX = {
np.float32 : 0,
np.float64 : 1,
np.float16 : 2,
np.uint8 : 3,
np.int32 : 4
}
_DTYPE_MX_TO_NP = {
0 : np.float32,
1 : np.float64,
2 : np.float16,
3 : np.uint8,
4 : np.int32
}
# pylint: enable= no-member
def _new_empty_handle():
"""Return a new empty handle.
Empty handle can be used to hold result
Returns
-------
a new empty ndarray handle
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results
Returns
-------
a new empty ndarray handle
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def waitall():
"""Wait all async operation to finish in MXNet
This function is used for benchmark only
"""
check_call(_LIB.MXNDArrayWaitAll())
class NDArray(object):
"""NDArray object in mxnet.
NDArray is basic ndarray/Tensor like data structure in mxnet.
"""
# pylint: disable= no-member
def __init__(self, handle, writable=True):
"""initialize a new NDArray
Parameters
----------
handle : NDArrayHandle
NDArray handle of C API
"""
assert isinstance(handle, NDArrayHandle)
self.handle = handle
self.writable = writable
def __repr__(self):
shape_info = 'x'.join(['%d' % x for x in self.shape])
return '<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __del__(self):
check_call(_LIB.MXNDArrayFree(self.handle))
def __add__(self, other):
return add(self, other)
def __iadd__(self, other):
if not self.writable:
raise ValueError('trying to add to a readonly NDArray')
if isinstance(other, NDArray):
return _internal._plus(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._plus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return subtract(self, other)
def __isub__(self, other):
if not self.writable:
raise ValueError('trying to subtract from a readonly NDArray')
if isinstance(other, NDArray):
return _internal._minus(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._minus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rsub__(self, other):
return subtract(other, self)
def __mul__(self, other):
return multiply(self, other)
def __neg__(self):
return _internal._mul_scalar(self, -1.0)
def __imul__(self, other):
if not self.writable:
raise ValueError('trying to multiply to a readonly NDArray')
if isinstance(other, NDArray):
return _internal._mul(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mul_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
return divide(self, other)
def __rdiv__(self, other):
return divide(other, self)
def __idiv__(self, other):
if not self.writable:
raise ValueError('trying to divide from a readonly NDArray')
if isinstance(other, NDArray):
return _internal._div(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._div_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __truediv__(self, other):
return divide(self, other)
def __rtruediv__(self, other):
return divide(other, self)
def __itruediv__(self, other):
return self.__idiv__(other)
def __pow__(self, other):
return power(self, other)
def __rpow__(self, other):
return power(other, self)
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
if handle is not None:
length = ctypes.c_size_t()
cptr = ctypes.POINTER(ctypes.c_char)()
check_call(_LIB.MXNDArraySaveRawBytes(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
this['handle'] = ctypes2buffer(cptr, length.value)
return this
def __setstate__(self, state):
handle = state['handle']
if handle is not None:
buf = handle
handle = NDArrayHandle()
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
length = ctypes.c_size_t(len(buf))
check_call(_LIB.MXNDArrayLoadFromRawBytes(ptr, length, ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def __setitem__(self, in_slice, value):
"""Set ndarray value.
`value` can be a scalar, an `NDArray` or numpy array of compatible shape.
The following modes are supported:
- `array[:] = value`: set all the contents
- `array[i] = value`: set the i-th slice. If the array is of dimension
`(d1, d2, d3)`, it sets value of a slice of shape `(1, d2, d3)`.
- `array[i:j] = value`: similarly, if the array is of dimension
`(d1, d2, d3)`, it sets value of a slice of shape `(j-i, d2, d3)`.
Fully-dimensional indexing is also supported. For example, if array is
of shape `(d1, d2, d3)`, one can do
- `array[:, :, :] = value`: achieving the same effect of `array[:] = value`
- `array[:, i, j:k] = value`: each index could be a python slice or an int.
"""
# pylint: disable=too-many-branches
if not self.writable:
raise ValueError('trying to assign to a readonly NDArray')
if isinstance(in_slice, int):
sliced_arr = self._at(in_slice)
sliced_arr[:] = value
return
if isinstance(in_slice, slice):
if in_slice.step is not None:
raise ValueError('NDArray only support continuous slicing on axis 0')
if in_slice.start is not None or in_slice.stop is not None:
sliced_arr = self._slice(in_slice.start, in_slice.stop)
sliced_arr[:] = value
return
if isinstance(value, NDArray):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._set_value(float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
self._sync_copyfrom(value)
else:
raise TypeError('type %s not supported' % str(type(value)))
if isinstance(in_slice, tuple):
# multi-dimension indexing
my_shape = self.shape
assert len(in_slice) == len(my_shape)
for slice_i in in_slice:
assert isinstance(slice_i, (slice, int))
begin = [0 for _ in my_shape]
end = [x for x in my_shape]
for i, slice_i in enumerate(in_slice):
if isinstance(slice_i, int):
assert slice_i < my_shape[i]
begin[i] = slice_i
end[i] = slice_i + 1
if isinstance(slice_i, slice):
# only support continuous slicing
assert slice_i.step is None
begin[i] = slice_i.start or 0
end[i] = slice_i.stop or my_shape[i]
assert begin[i] < end[i]
assert end[i] <= my_shape[i]
begin = tuple(begin)
end = tuple(end)
if isinstance(value, NDArray):
value = value.as_in_context(self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
elif isinstance(value, numeric_types):
_internal._crop_assign_scalar(self, out=self,
begin=begin, end=end,
scalar=value)
elif isinstance(value, (np.ndarray, np.generic)):
value = array(value, ctx=self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
else:
raise TypeError('type %s not supported' % str(type(value)))
# pylint: enable=too-many-branches
def __getitem__(self, in_slice):
"""Get ndarray"""
if isinstance(in_slice, int):
return self._at(in_slice)
if not isinstance(in_slice, slice) or in_slice.step is not None:
raise ValueError('NDArray only support continuous slicing on axis 0')
if in_slice.start is not None or in_slice.stop is not None:
return self._slice(in_slice.start, in_slice.stop)
else:
return self
def _sync_copyfrom(self, source_array):
"""Peform an synchronize copy from the array.
Parameters
----------
source_array : array_like
The data source we should like to copy from.
"""
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=self.dtype)
except:
raise TypeError('array must be an array_like data,' +
'type %s is not supported' % str(type(array)))
source_array = np.ascontiguousarray(source_array, dtype=self.dtype)
if source_array.shape != self.shape:
raise ValueError('Shape inconsistant: expected %s vs got %s'%(
str(self.shape), str(source_array.shape)))
check_call(_LIB.MXNDArraySyncCopyFromCPU(
self.handle,
source_array.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(source_array.size)))
def _slice(self, start, stop):
"""Return a sliced NDArray that shares memory with current one.
Parameters
----------
start : int
Starting index of slice.
stop : int
Finishing index of slice.
"""
handle = NDArrayHandle()
start = mx_uint(start) if start else mx_uint(0)
stop = mx_uint(stop) if stop else mx_uint(self.shape[0])
check_call(_LIB.MXNDArraySlice(
self.handle, start, stop, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def _at(self, idx):
"""Return a sub NDArray that shares memory with current one.
Parameters
----------
idx : int
index of sub array.
"""
handle = NDArrayHandle()
idx = mx_uint(idx)
check_call(_LIB.MXNDArrayAt(
self.handle, idx, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def reshape(self, new_shape):
"""Return a reshaped NDArray that shares memory with current one.
Parameters
----------
new_shape : iterable of int
new shape of NDArray
"""
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayReshape(self.handle,
len(new_shape),
c_array(ctypes.c_int, new_shape),
ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
# pylint: disable= undefined-variable
def broadcast_to(self, shape):
""" Broadcasting the current NDArray into the given shape. The semantics is
the same with `numpy`'s broadcasting
Parameters
---------
shape : the shape to broadcast
the broadcast shape
"""
cur_shape = self.shape
err_str = 'operands could not be broadcast together with remapped shapes' \
'[original->remapped]: {} and requested shape {}'.format(cur_shape, shape)
if len(shape) < len(cur_shape):
raise ValueError(err_str)
cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape
cur_shape_arr = np.array(cur_shape)
broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape))
if (cur_shape_arr[broadcasting_axes] != 1).any():
raise ValueError(err_str)
if cur_shape != self.shape:
return broadcast_to(self.reshape(cur_shape), shape=shape)
else:
return broadcast_to(self, shape=tuple(shape))
# pylint: enable= undefined-variable
def wait_to_read(self):
"""Block until all pending writes operations on current NDArray are finished.
This function will return when all the pending writes to the current
NDArray finishes. There can still be pending read going on when the
function returns.
"""
check_call(_LIB.MXNDArrayWaitToRead(self.handle))
@property
def shape(self):
"""Get shape of current NDArray.
Returns
-------
a tuple representing shape of current ndarray
"""
ndim = mx_uint()
pdata = ctypes.POINTER(mx_uint)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
return tuple(pdata[:ndim.value])
@property
def size(self):
"""Get size of current NDArray.
Returns
-------
an int representing size of current ndarray
"""
return np.prod(self.shape)
@property
def context(self):
"""Get context of current NDArray.
Returns
-------
context : mxnet.Context
The context of current NDArray.
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def dtype(self):
"""Get data type of current NDArray.
Returns
-------
an numpy.dtype object representing type of current ndarray
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value]
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Get transpose of current NDArray"""
if len(self.shape) != 2:
raise ValueError('Only 2D matrix is allowed to be transposed')
return transpose(self)
# pylint: enable= invalid-name, undefined-variable
def asnumpy(self):
"""Return a copied numpy array of current array.
Returns
-------
array : numpy.ndarray
A copy of array content.
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data
def asscalar(self):
"""Return a CPU scalar(float) of current ndarray.
This ndarray must have shape (1,)
Returns
-------
scalar : np.float
The scalar representation of the ndarray.
"""
if self.shape != (1,):
raise ValueError("The current array is not a scalar")
return self.asnumpy()[0]
def astype(self, dtype):
"""Return a copied numpy array of current array with specified type.
Parameters
----------
dtype : numpy.dtype or string
Desired type of result array.
Returns
-------
array : numpy.ndarray
A copy of array content.
"""
res = empty(self.shape, ctx=self.context, dtype=dtype)
self.copyto(res)
return res
def copyto(self, other):
"""Copy the content of current array to other.
When other is NDArray, the content is copied over.
When other is a Context, a new NDArray in the context
will be created as target
Parameters
----------
other : NDArray or Context
Target NDArray or context we want to copy data to.
Returns
-------
dst : NDArray
The copy target NDArray
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('copy an array to itself, is it intended?',
RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto do not support type ' + str(type(other)))
def copy(self):
"""Make a copy of the current ndarray on the same context
Return
------
cpy : NDArray
The copy
"""
return self.copyto(self.context)
# pylint: enable= no-member
def as_in_context(self, context):
"""Return an `NDArray` that lives in the target context. If the array
is already in that context, `self` is returned. Otherwise, a copy is
made.
Parameters
----------
context : Context
The target context we want the return value to live in.
Returns
-------
A copy or `self` as an `NDArray` that lives in the target context.
"""
if self.context == context:
return self
return self.copyto(context)
def onehot_encode(indices, out):
"""One hot encoding indices into matrix out.
Parameters
----------
indices: NDArray
An NDArray containing indices of the categorical features.
out: NDArray
The result holder of the encoding.
Returns
-------
out: Array
Same as out.
"""
# pylint: disable= no-member, protected-access
return _internal._onehot_encode(indices, out, out=out)
# pylint: enable= no-member, protected-access
def empty(shape, ctx=None, dtype=mx_real_t):
"""Create an empty uninitialized new NDArray, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
return NDArray(handle=_new_alloc_handle(shape, ctx, False, dtype))
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation
The function will perform numpy-like broadcasting if needed and call different functions
Parameters
----------
lhs : NDArray or numeric value
left hande side operand
rhs : NDArray or numeric value
right hand side operand
fn_array : function
function to be called if both lhs and rhs are of NDArray type
fn_scalar : function
function to be called if both lhs and rhs are numeric values
lfn_scalar : function
function to be called if lhs is NDArray while rhs is numeric value
rfn_scalar : function
function to be called if lhs is numeric value while rhs is NDArray;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
-------
out: NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
# check whether broadcasting is needed
lsize = functools.reduce(operator.mul, lhs.shape)
rsize = functools.reduce(operator.mul, rhs.shape)
if lsize < rsize:
lhs = lhs.broadcast_to(rhs.shape)
elif lsize > rsize:
rhs = rhs.broadcast_to(lhs.shape)
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs)))
#pylint: enable= too-many-arguments, no-member, protected-access
def add(lhs, rhs):
""" Perform element-wise addition
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._plus,
operator.add,
_internal._plus_scalar,
None)
# pylint: enable= no-member, protected-access
def subtract(lhs, rhs):
""" Perform element-wise subtract
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._minus,
operator.sub,
_internal._minus_scalar,
_internal._rminus_scalar)
# pylint: enable= no-member, protected-access
def multiply(lhs, rhs):
""" Perform element-wise multiplication
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._mul,
operator.mul,
_internal._mul_scalar,
None)
# pylint: enable= no-member, protected-access
def divide(lhs, rhs):
""" Perform element-wise divide
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._div,
operator.truediv,
_internal._div_scalar,
_internal._rdiv_scalar)
# pylint: enable= no-member, protected-access
def power(lhs, rhs):
""" Perform power operator
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar)
# pylint: enable= no-member, protected-access
def maximum(lhs, rhs):
""" Perform maximum operator
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._maximum,
lambda x, y: x if x > y else y,
_internal._maximum_scalar,
None)
# pylint: enable= no-member, protected-access
def minimum(lhs, rhs):
""" Perform minimum operator
Parameters
----------
lhs : Array or float value
left hand side operand
rhs : Array of float value
right hand side operand
Returns
-------
out: Array
result array
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
_internal._minimum,
lambda x, y: x if x < y else y,
_internal._minimum_scalar,
None)
# pylint: enable= no-member, protected-access
def true_divide(lhs, rhs):
""" Same as numpy's true_divide. It adjusts the output type to present the best answer,
regardless of input types.
"""
return divide(lhs, rhs)
def negative(arr):
""" Return the negation of array values """
return multiply(arr, -1.0)
def zeros(shape, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with 0, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
ctx : Context, optional.
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
arr = empty(shape, ctx, dtype)
arr[:] = 0.0
return arr
def ones(shape, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with 1, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
arr = empty(shape, ctx, dtype)
arr[:] = 1.0
return arr
def full(shape, val, ctx=None):
"""Create a new NDArray filled with given value, with specified shape.
Parameters
----------
shape : tuple
shape of the NDArray.
val : float
value to be filled with.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
arr = empty(shape, ctx)
arr[:] = val
return arr
def array(source_array, ctx=None, dtype=mx_real_t):
"""Create a new NDArray that copies content from source_array.
Parameters
----------
source_array : array_like
Source data to create NDArray from.
ctx : Context, optional
The context of the NDArray, default to current default context.
Returns
-------
out: Array
The created NDArray.
"""
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('source_array must be array like object')
arr = empty(source_array.shape, ctx, dtype)
arr[:] = source_array
return arr
def concatenate(arrays, axis=0, always_copy=True):
"""Concatenate a list of NDArrays along the first dimension.
Parameters
----------
arrays : list of NDArray
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
An `NDArray` that lives on the same context as `arrays[0].context`.
"""
assert isinstance(arrays, list)
assert len(arrays) > 0
assert isinstance(arrays[0], NDArray)
if not always_copy and len(arrays) == 1:
return arrays[0]
shape_axis = arrays[0].shape[axis]
shape_rest1 = arrays[0].shape[0:axis]
shape_rest2 = arrays[0].shape[axis+1:]
dtype = arrays[0].dtype
for arr in arrays[1:]:
shape_axis += arr.shape[axis]
assert shape_rest1 == arr.shape[0:axis]
assert shape_rest2 == arr.shape[axis+1:]
assert dtype == arr.dtype
ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)
idx = 0
begin = [0 for _ in ret_shape]
end = list(ret_shape)
for arr in arrays:
if axis == 0:
ret[idx:idx+arr.shape[0]] = arr
else:
begin[axis] = idx
end[axis] = idx+arr.shape[axis]
# pylint: disable=no-member,protected-access
_internal._crop_assign(ret, arr, out=ret,
begin=tuple(begin),
end=tuple(end))
# pylint: enable=no-member,protected-access
idx += arr.shape[axis]
return ret
def load(fname):
"""Load ndarray from binary file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
Parameters
----------
fname : str
The name of the file.Can be S3 or HDFS address (remember built with S3 support).
Example of fname:
- `s3://my-bucket/path/my-s3-ndarray`
- `hdfs://my-bucket/path/my-hdfs-ndarray`
- `/path-to/my-local-ndarray`
Returns
-------
out : list of NDArray or dict of str to NDArray
List of NDArray or dict of str->NDArray, depending on what was saved.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [NDArray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), NDArray(NDArrayHandle(handles[i]))) for i in range(out_size.value))
def save(fname, data):
"""Save list of NDArray or dict of str->NDArray to binary file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
Parameters
----------
fname : str
The name of the file.Can be S3 or HDFS address (remember built with S3 support).
Example of fname:
- `s3://my-bucket/path/my-s3-ndarray`
- `hdfs://my-bucket/path/my-hdfs-ndarray`
- `/path-to/my-local-ndarray`
data : list of NDArray or dict of str to NDArray
The data to be saved.
"""
handles = []
if isinstance(data, dict):
keys = []
for key, val in data.items():
if not isinstance(key, string_types):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys.append(c_str(key))
handles.append(val.handle)
keys = c_array(ctypes.c_char_p, keys)
else:
for val in data:
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
handles.append(val.handle)
keys = None
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
c_array(NDArrayHandle, handles),
keys))
def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None):
"""Decode an image from string. Requires OpenCV to work.
Parameters
----------
str_img : str
binary image data
clip_rect : iterable of 4 int
clip decoded image to rectangle (x0, y0, x1, y1)
out : NDArray
output buffer. can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w)
index : int
output decoded image to i-th slice of 4 dimensional buffer
channels : int
number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
subtract mean from decode image before outputing.
"""
# pylint: disable= no-member, protected-access, too-many-arguments
if mean is None:
mean = NDArray(_new_empty_handle())
if out is None:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img)
else:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img,
out=out)
# pylint: disable=too-many-locals, invalid-name
def _make_ndarray_function(handle):
"""Create a NDArray function from the FunctionHandle."""
NDARRAY_ARG_BEFORE_SCALAR = 1
ACCEPT_EMPTY_MUTATE_TARGET = 1 << 2
# Get the property of NDArray
n_used_vars = mx_uint()
n_scalars = mx_uint()
n_mutate_vars = mx_uint()
type_mask = ctypes.c_int()
check_call(_LIB.MXFuncDescribe(
handle,
ctypes.byref(n_used_vars),
ctypes.byref(n_scalars),
ctypes.byref(n_mutate_vars),
ctypes.byref(type_mask)))
n_mutate_vars = n_mutate_vars.value
n_used_vars = n_used_vars.value
n_scalars = n_scalars.value
type_mask = type_mask.value
accept_empty_mutate = (type_mask & ACCEPT_EMPTY_MUTATE_TARGET) != 0
# infer type of the function
if (type_mask & NDARRAY_ARG_BEFORE_SCALAR) != 0:
use_vars_range = range(0, n_used_vars)
scalar_range = range(n_used_vars, n_used_vars + n_scalars)
else:
scalar_range = range(0, n_scalars)
use_vars_range = range(n_scalars, n_used_vars + n_scalars)
# Get the information from the function
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXFuncGetInfo(
handle, ctypes.byref(name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(ret_type)))
func_name = py_str(name.value)
param_str = ctypes2docstring(num_args, arg_names, arg_types, arg_descs)
doc_str = ('%s\n\n' +
'%s\n' +
'out : NDArray, optional\n' +
' The output NDArray to hold the result.\n\n'+
'Returns\n' +
'-------\n' +
'out : NDArray\n'+
' The output of binary function.')
doc_str = doc_str % (py_str(desc.value), param_str)
# Definition of internal functions.
def binary_ndarray_function(lhs, rhs, out=None, **kwargs):
"""Internal binary function
"""
if out:
if not isinstance(out, NDArray):
raise TypeError('out must be NDArray')
if not out.writable:
raise TypeError('out must be writable')
else:
if not accept_empty_mutate:
raise TypeError('argument out is required to call %s' % func_name)
out = NDArray(_new_empty_handle())
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, (lhs.handle, rhs.handle)), \
c_array(mx_float, ()), \
c_array(NDArrayHandle, (out.handle,)), \
ctypes.c_int(len(kwargs)), \
c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()]), \
c_array(ctypes.c_char_p, [c_str(str(i)) for i in kwargs.values()])))
return out
def unary_ndarray_function(src, out=None, *args, **kwargs):
"""internal NDArray function"""
if out:
if not isinstance(out, NDArray):
raise TypeError('out must be NDArray')
if not out.writable:
raise TypeError('out must be writable')
else:
if not accept_empty_mutate:
raise TypeError('argument out is required to call %s' % func_name)
out = NDArray(_new_empty_handle())
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, (src.handle,)), \
c_array(mx_float, [args[i] for i in scalar_range]), \
c_array(NDArrayHandle, (out.handle,)), \
ctypes.c_int(len(kwargs)), \
c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()]), \
c_array(ctypes.c_char_p, [c_str(str(i)) for i in kwargs.values()])))
return out
def generic_ndarray_function(*args, **kwargs):
"""Invoke this function by passing in parameters
Parameters
----------
*args
Positional arguments of input scalars and NDArray
out : NDArray or tuple of NDArray, optional
Output NDArray, used to hold the output result.
Returns
-------
out : NDArray
The result NDArray(tuple) of result of computation.
"""
if 'out' in kwargs:
mutate_vars = kwargs['out']
if isinstance(mutate_vars, NDArray):
mutate_vars = (mutate_vars,)
if len(mutate_vars) != n_mutate_vars:
raise TypeError('expect %d out in %s', n_mutate_vars, func_name)
del kwargs['out']
else:
if accept_empty_mutate:
mutate_vars = tuple(
NDArray(_new_empty_handle()) for i in range(n_mutate_vars))
else:
raise TypeError('argument out is required to call %s' % func_name)
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, [args[i].handle for i in use_vars_range]), \
c_array(mx_float, [args[i] for i in scalar_range]), \
c_array(NDArrayHandle, [v.handle for v in mutate_vars]), \
ctypes.c_int(len(kwargs)), \
c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()]), \
c_array(ctypes.c_char_p, [c_str(str(i)) for i in kwargs.values()])))
if n_mutate_vars == 1:
return mutate_vars[0]
else:
return mutate_vars
# End of function declaration
if n_mutate_vars == 1 and n_used_vars == 2 and n_scalars == 0:
ret_function = binary_ndarray_function
elif n_mutate_vars == 1 and n_used_vars == 1 and n_scalars == 0:
ret_function = unary_ndarray_function
else:
ret_function = generic_ndarray_function
ret_function.__name__ = func_name
ret_function.__doc__ = doc_str
return ret_function
# pylint: enable=too-many-locals, invalid-name
def _init_ndarray_module():
"""List and add all the ndarray functions to current module."""
plist = ctypes.POINTER(FunctionHandle)()
size = ctypes.c_uint()
check_call(_LIB.MXListFunctions(ctypes.byref(size),
ctypes.byref(plist)))
module_obj = sys.modules[__name__]
module_internal = sys.modules["mxnet._ndarray_internal"]
for i in range(size.value):
hdl = FunctionHandle(plist[i])
function = _make_ndarray_function(hdl)
# if function name starts with underscore, register as internal namespace
if function.__name__.startswith('_'):
setattr(module_internal, function.__name__, function)
else:
fname = function.__name__
fn_obj = getattr(module_obj, fname, None)
if fn_obj is None:
setattr(module_obj, fname, function)
else:
setattr(module_obj, fname + '_internal', function)
# Initialize the NDArray module
_init_ndarray_module()
| 3,320 | 0 | 594 |
676b46cf41475ec292d77840a6f1202b682cf9e1 | 1,518 | py | Python | project/i10n.py | DanielGrams/gsevp | e94034f7b64de76f38754b56455e83092378261f | [
"MIT"
] | 1 | 2021-06-01T14:49:18.000Z | 2021-06-01T14:49:18.000Z | project/i10n.py | DanielGrams/gsevp | e94034f7b64de76f38754b56455e83092378261f | [
"MIT"
] | 286 | 2020-12-04T14:13:00.000Z | 2022-03-09T19:05:16.000Z | project/i10n.py | DanielGrams/gsevpt | a92f71694388e227e65ed1b24446246ee688d00e | [
"MIT"
] | null | null | null | from flask import request
from flask_babelex import gettext
from project import app, babel
@babel.localeselector
| 28.111111 | 71 | 0.71278 | from flask import request
from flask_babelex import gettext
from project import app, babel
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(app.config["LANGUAGES"])
def print_dynamic_texts():
gettext("Event_Art")
gettext("Event_Book")
gettext("Event_Movie")
gettext("Event_Family")
gettext("Event_Festival")
gettext("Event_Religious")
gettext("Event_Shopping")
gettext("Event_Comedy")
gettext("Event_Music")
gettext("Event_Dance")
gettext("Event_Nightlife")
gettext("Event_Theater")
gettext("Event_Dining")
gettext("Event_Conference")
gettext("Event_Meetup")
gettext("Event_Fitness")
gettext("Event_Sports")
gettext("Event_Other")
gettext("Event_Exhibition")
gettext("Event_Culture")
gettext("Event_Tour")
gettext("Event_OpenAir")
gettext("Event_Stage")
gettext("Event_Lecture")
gettext("Typical Age range")
gettext("Administrator")
gettext("Event expert")
gettext("EventReviewStatus.inbox")
gettext("EventReviewStatus.verified")
gettext("EventReviewStatus.rejected")
gettext("Scope_openid")
gettext("Scope_profile")
gettext("Scope_user:read")
gettext("Scope_user:write")
gettext("Scope_organizer:write")
gettext("Scope_place:write")
gettext("Scope_event:write")
gettext("Scope_eventlist:write")
gettext("Scope_organization:read")
gettext("Scope_organization:write")
gettext("There must be no self-reference.")
| 1,356 | 0 | 45 |
38aa595c4de8dd7bd0c61e74d07b78c05d1ee8b7 | 1,021 | py | Python | backend/accounts/migrations/0003_auto_20200505_1756.py | aibek79/Django-React-knboard | 074f4b1388a440290f9ae4a88c71fef749775932 | [
"MIT"
] | 665 | 2020-05-22T16:13:59.000Z | 2022-03-30T01:22:51.000Z | backend/accounts/migrations/0003_auto_20200505_1756.py | aibek79/Django-React-knboard | 074f4b1388a440290f9ae4a88c71fef749775932 | [
"MIT"
] | 70 | 2020-05-02T12:09:43.000Z | 2022-02-27T12:54:48.000Z | backend/accounts/migrations/0003_auto_20200505_1756.py | aibek79/Django-React-knboard | 074f4b1388a440290f9ae4a88c71fef749775932 | [
"MIT"
] | 161 | 2020-05-25T21:10:09.000Z | 2022-03-08T15:39:24.000Z | # Generated by Django 3.0.4 on 2020-05-05 17:56
import django.contrib.auth.validators
import django.core.validators
from django.db import migrations, models
| 31.90625 | 99 | 0.573947 | # Generated by Django 3.0.4 on 2020-05-05 17:56
import django.contrib.auth.validators
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("accounts", "0002_auto_20200327_1817"),
]
operations = [
migrations.AlterModelOptions(name="user", options={"ordering": ["-id"]},),
migrations.AlterField(
model_name="user",
name="username",
field=models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator(),
django.core.validators.MinLengthValidator(3),
],
verbose_name="username",
),
),
]
| 0 | 839 | 23 |
92551d5b16646be075992ceebc374a6bc69bbdb0 | 509 | py | Python | scripts/examples.py | lopiola/integracja_wypadki | 270c8784041c9b857c32f06099434d3ecb57319f | [
"MIT"
] | null | null | null | scripts/examples.py | lopiola/integracja_wypadki | 270c8784041c9b857c32f06099434d3ecb57319f | [
"MIT"
] | null | null | null | scripts/examples.py | lopiola/integracja_wypadki | 270c8784041c9b857c32f06099434d3ecb57319f | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples of how to use scripts in this directory
"""
from db_api import accident
accident1_id = 11
accident1 = accident.new(
id=accident1_id,
country='USA',
timestamp='TIMESTAMP \'2014-05-16 15:36:38\'',
day_of_week=7,
latitude=23.3453451,
longitude=56.23424234,
persons_count=3,
fatalities_count=2,
vehicles_count=1,
speed_limit=-1
)
accident.insert(accident1)
accident.delete(accident1_id)
accident.insert(accident1)
| 18.851852 | 50 | 0.695481 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples of how to use scripts in this directory
"""
from db_api import accident
accident1_id = 11
accident1 = accident.new(
id=accident1_id,
country='USA',
timestamp='TIMESTAMP \'2014-05-16 15:36:38\'',
day_of_week=7,
latitude=23.3453451,
longitude=56.23424234,
persons_count=3,
fatalities_count=2,
vehicles_count=1,
speed_limit=-1
)
accident.insert(accident1)
accident.delete(accident1_id)
accident.insert(accident1)
| 0 | 0 | 0 |
b3b3eadd9ab82edb42863b02d00e40384426ade5 | 121 | py | Python | pfrock/core/plugin.py | knightliao/pfrock | 33587f11caeeccc11d0b8219b4e02df153905486 | [
"Apache-2.0"
] | 62 | 2016-02-24T10:47:17.000Z | 2019-04-27T01:36:56.000Z | pfrock/core/plugin.py | knightliao/pfrock | 33587f11caeeccc11d0b8219b4e02df153905486 | [
"Apache-2.0"
] | 1 | 2019-04-19T12:13:21.000Z | 2021-08-10T09:16:09.000Z | pfrock/core/plugin.py | knightliao/pfrock | 33587f11caeeccc11d0b8219b4e02df153905486 | [
"Apache-2.0"
] | 24 | 2016-03-01T14:59:29.000Z | 2019-09-02T08:12:00.000Z | #!/usr/bin/env python
# coding=utf8
PLUGIN_CLASS_GET_HANDLER = 'get_handler'
PLUGIN_CLASS_KEY_REGISTER = '__register__'
| 20.166667 | 42 | 0.801653 | #!/usr/bin/env python
# coding=utf8
PLUGIN_CLASS_GET_HANDLER = 'get_handler'
PLUGIN_CLASS_KEY_REGISTER = '__register__'
| 0 | 0 | 0 |
ec0187127470df40ae0df189c13e415c81863265 | 5,818 | py | Python | config.py | diccooo/Deep_Enhanced_Repr_for_IDRR | f0c3e09034e18cf24df59aa5bd6952ca9d6acade | [
"MIT"
] | 28 | 2018-07-14T05:12:21.000Z | 2021-12-09T09:22:11.000Z | config.py | cxncu001/Deep_Enhanced_Repr_for_IDRR | f0c3e09034e18cf24df59aa5bd6952ca9d6acade | [
"MIT"
] | 3 | 2019-06-26T04:36:55.000Z | 2020-12-17T12:25:20.000Z | config.py | cxncu001/Deep_Enhanced_Repr_for_IDRR | f0c3e09034e18cf24df59aa5bd6952ca9d6acade | [
"MIT"
] | 7 | 2018-11-20T09:52:20.000Z | 2020-04-23T15:03:10.000Z | import torch
from datetime import datetime
| 37.294872 | 96 | 0.523376 | import torch
from datetime import datetime
class Config(object):
def __init__(self, classnum=11, splitting=2):
self.i2sense = [
'Temporal.Asynchronous', 'Temporal.Synchrony', 'Contingency.Cause',
'Contingency.Pragmatic cause', 'Comparison.Contrast', 'Comparison.Concession',
'Expansion.Conjunction', 'Expansion.Instantiation', 'Expansion.Restatement',
'Expansion.Alternative','Expansion.List'
]
self.sense2i = {
'Temporal.Asynchronous':0, 'Temporal.Synchrony':1, 'Contingency.Cause':2,
'Contingency.Pragmatic cause':3, 'Comparison.Contrast':4, 'Comparison.Concession':5,
'Expansion.Conjunction':6, 'Expansion.Instantiation':7, 'Expansion.Restatement':8,
'Expansion.Alternative':9,'Expansion.List':10
}
self.i2senseclass = ['Temporal', 'Contingency', 'Comparison', 'Expansion']
self.senseclass2i = {'Temporal':0, 'Contingency':1, 'Comparison':2, 'Expansion':3}
self.four_or_eleven = classnum # 11, 4, 2
self.corpus_splitting = splitting # 1 for Lin, 2 for Ji, 3 for 4-way and binary
if self.four_or_eleven == 4 or self.four_or_eleven == 2:
self.corpus_splitting = 3
self.binclass = 0 # 0, 1, 2, 3 self.senseclass2i
self.wordvec_path = '~/Projects/GoogleNews-vectors-negative300.bin.gz'
self.wordvec_dim = 300
self.max_sent_len = 100
################################################################################
# attention
self.attn_topk = 2
self.attn_dropout = 0
###############################################################################
# char/sub
self.need_char = False
self.need_sub = True
if self.need_sub:
self.need_char = False
self.char_num = 262
self.char_padding_idx = 261
if self.need_sub:
if self.corpus_splitting == 1:
self.char_num = 982
elif self.corpus_splitting == 2:
self.char_num = 982
elif self.corpus_splitting == 3:
self.char_num = 982
self.char_padding_idx = 0
self.char_embed_dim = 50
self.char_enc_dim = 50
self.char_filter_num = 2
self.char_filter_dim = [2, 3]
self.char_dropout = 0
self.char_hid_dim = self.char_enc_dim * self.char_filter_num
###############################################################################
# elmo
self.need_elmo = True
self.elmo_options = '~/Projects/ELMo/elmo_2x4096_512_2048cnn_2xhighway_options.json'
self.elmo_weights = '~/Projects/ELMo/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5'
self.elmo_dropout = 0
self.elmo_labmda = 0.001
self.elmo_dim = 300
################################################################################
# CNNLayer RNNLayer
self.use_rnn = False
if self.corpus_splitting == 1:
self.embed_dropout = 0.4
elif self.corpus_splitting == 2:
self.embed_dropout = 0.4
elif self.corpus_splitting == 3:
self.embed_dropout = 0.4
self.cnn_dim = self.wordvec_dim
if self.need_char or self.need_sub:
self.cnn_dim += self.char_hid_dim
if self.need_elmo:
self.cnn_dim += self.elmo_dim
if self.corpus_splitting == 1:
self.cnn_layer_num = 5
elif self.corpus_splitting == 2:
self.cnn_layer_num = 4
elif self.corpus_splitting == 3:
self.cnn_layer_num = 5
if self.corpus_splitting == 1:
self.cnn_kernal_size = [5, 5, 5, 5, 5]
elif self.corpus_splitting == 2:
self.cnn_kernal_size = [5, 5, 5, 5]
elif self.corpus_splitting == 3:
self.cnn_kernal_size = [3, 3, 3, 3, 3]
if self.corpus_splitting == 1:
self.cnn_dropout = 0.4
elif self.corpus_splitting == 2:
self.cnn_dropout = 0.4
elif self.corpus_splitting == 3:
self.cnn_dropout = 0.4
self.attned_dim = self.cnn_dim * self.attn_topk
self.pair_rep_dim = self.attned_dim * 2 * self.cnn_layer_num
################################################################################
# Classifier
self.clf_class_num = self.four_or_eleven
if self.corpus_splitting == 1:
self.clf_fc_num = 0
self.clf_fc_dim = 2048
elif self.corpus_splitting == 2:
self.clf_fc_num = 0
elif self.corpus_splitting == 3:
self.clf_fc_num = 0
if self.corpus_splitting == 1:
self.clf_dropout = 0.3
elif self.corpus_splitting == 2:
self.clf_dropout = 0.3
elif self.corpus_splitting == 3:
self.clf_dropout = 0.3
if self.corpus_splitting == 1:
self.conn_num = 94
elif self.corpus_splitting == 2:
self.conn_num = 92
elif self.corpus_splitting == 3:
self.conn_num = 93
################################################################################
self.seed = 666
self.batch_size = 128
self.shuffle = True
if self.corpus_splitting == 1:
self.lr = 0.001
elif self.corpus_splitting == 2:
self.lr = 0.001
elif self.corpus_splitting == 3:
self.lr = 0.001
self.l2_penalty = 0
self.grad_clip = 1
self.epochs = 10000
self.is_mttrain = True
self.lambda1 = 1
self.logdir = './res/' + datetime.now().strftime('%B%d-%H:%M:%S')
| 5,726 | 0 | 49 |
fbbe2ed61105235aaea446afe9f546e6c24bc273 | 1,222 | py | Python | rabbitmq_project/fanout/producter.py | HEUDavid/mq | ef30c182428ab159dd1c922f844874086c3971f1 | [
"Apache-2.0"
] | null | null | null | rabbitmq_project/fanout/producter.py | HEUDavid/mq | ef30c182428ab159dd1c922f844874086c3971f1 | [
"Apache-2.0"
] | null | null | null | rabbitmq_project/fanout/producter.py | HEUDavid/mq | ef30c182428ab159dd1c922f844874086c3971f1 | [
"Apache-2.0"
] | null | null | null | """
fanout模式下,传递到 exchange 的消息将会转发到所有与其绑定的 queue 上。
不需要指定 routing_key ,即使指定了也是无效。
需要提前将 exchange 和 queue 绑定,一个 exchange 可以绑定多个 queue,一个queue可以绑定多个exchange。
需要先启动 订阅者,此模式下的队列是 consumer 随机生成的,发布者 仅仅发布消息到 exchange ,由 exchange 转发消息至 queue。
"""
import json
import pika
if __name__ == '__main__':
credentials = pika.PlainCredentials('test', '123456') # mq用户名和密码
# 虚拟队列需要指定参数 virtual_host,如果是默认的可以不填。
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='api.mdavid.cn', port=5672, virtual_host='xiang_test', credentials=credentials))
channel = connection.channel()
# 声明exchange,由exchange指定消息在哪个队列传递,如不存在,则创建。
# durable = True 代表exchange持久化存储,False 非持久化存储
channel.exchange_declare(exchange='python-test', durable=True, exchange_type='fanout')
for i in range(10):
message = json.dumps({'OrderId': "1000%s" % i})
# 向队列插入数值 routing_key是队列名。
# delivery_mode = 2 声明消息在队列中持久化,
# delivery_mode = 1 消息非持久化。routing_key 不需要配置
channel.basic_publish(exchange='python-test', routing_key='queue_1', body=message,
properties=pika.BasicProperties(delivery_mode=2))
print(message)
connection.close()
| 35.941176 | 93 | 0.710311 | """
fanout模式下,传递到 exchange 的消息将会转发到所有与其绑定的 queue 上。
不需要指定 routing_key ,即使指定了也是无效。
需要提前将 exchange 和 queue 绑定,一个 exchange 可以绑定多个 queue,一个queue可以绑定多个exchange。
需要先启动 订阅者,此模式下的队列是 consumer 随机生成的,发布者 仅仅发布消息到 exchange ,由 exchange 转发消息至 queue。
"""
import json
import pika
if __name__ == '__main__':
credentials = pika.PlainCredentials('test', '123456') # mq用户名和密码
# 虚拟队列需要指定参数 virtual_host,如果是默认的可以不填。
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='api.mdavid.cn', port=5672, virtual_host='xiang_test', credentials=credentials))
channel = connection.channel()
# 声明exchange,由exchange指定消息在哪个队列传递,如不存在,则创建。
# durable = True 代表exchange持久化存储,False 非持久化存储
channel.exchange_declare(exchange='python-test', durable=True, exchange_type='fanout')
for i in range(10):
message = json.dumps({'OrderId': "1000%s" % i})
# 向队列插入数值 routing_key是队列名。
# delivery_mode = 2 声明消息在队列中持久化,
# delivery_mode = 1 消息非持久化。routing_key 不需要配置
channel.basic_publish(exchange='python-test', routing_key='queue_1', body=message,
properties=pika.BasicProperties(delivery_mode=2))
print(message)
connection.close()
| 0 | 0 | 0 |
bbcd839297834273548af91a040fdf660a2eaa4b | 833 | py | Python | tests/conftest.py | award7/dicomsort | b83a8d9468a6599cfc36f497dcdb38ea62c2c783 | [
"MIT"
] | 15 | 2015-02-26T17:27:48.000Z | 2019-10-22T12:28:24.000Z | tests/conftest.py | award7/dicomsort | b83a8d9468a6599cfc36f497dcdb38ea62c2c783 | [
"MIT"
] | 61 | 2020-02-07T21:56:23.000Z | 2022-03-31T22:12:08.000Z | tests/conftest.py | suever/dicomsort | d2a09887ebe7e3f2bcdc07eb1375d995ba365205 | [
"MIT"
] | 7 | 2015-09-07T04:47:29.000Z | 2019-03-18T09:29:48.000Z | import pytest
from pydicom.dataset import FileDataset, FileMetaDataset
@pytest.fixture(scope='function')
| 26.870968 | 79 | 0.661465 | import pytest
from pydicom.dataset import FileDataset, FileMetaDataset
@pytest.fixture(scope='function')
def dicom_generator(tmpdir):
def _dicom(filename='image.dcm', **values):
filename = str(tmpdir.join(filename))
file_meta = FileMetaDataset()
file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
file_meta.MediaStorageSOPInstanceUID = '1.2.3'
file_meta.ImplementationClassUID = '1.2.3.4'
ds = FileDataset(filename, {}, file_meta=file_meta, preamble=b'\0'*128)
ds.is_little_endian = True
ds.is_implicit_VR = False
ds.PatientName = 'Jonathan^Suever'
ds.SeriesDescription = 'Dicom Sort Test Series'
ds.SeriesNumber = 1
ds.update(values)
ds.save_as(filename)
return filename, ds
return _dicom
| 703 | 0 | 22 |
22d0c8713446fdeb2a1f8467628cf4c7a9f58f29 | 885 | py | Python | sstcam_sandbox/d191009_happy_birthday/click_camera.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d191009_happy_birthday/click_camera.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d191009_happy_birthday/click_camera.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | 1 | 2021-03-30T09:46:56.000Z | 2021-03-30T09:46:56.000Z | from CHECLabPy.plotting.camera import CameraImage
import numpy as np
from matplotlib import pyplot as plt
ci = CameraImageClick.from_camera_version("1.1.0")
plt.show()
np.save("click_camera.npy", ci.image)
| 31.607143 | 65 | 0.672316 | from CHECLabPy.plotting.camera import CameraImage
import numpy as np
from matplotlib import pyplot as plt
class CameraImageClick(CameraImage):
def __init__(self, xpix, ypix, size, **kwargs):
super().__init__(xpix, ypix, size, **kwargs)
self.click_radius = size
self.image = np.zeros(xpix.size, dtype=np.bool)
self.pixels.set_picker(True) # enable click
self.pixels.set_pickradius(self.click_radius)
self.pixels.set_snap(True) # snap cursor to pixel center
self.fig.canvas.mpl_connect('pick_event', self._on_pick)
def _on_pick(self, event):
pix_id = event.ind[-1]
print(f"Clicked pixel: {pix_id}")
image = self.image.copy()
image[pix_id] = ~image[pix_id]
self.image = image
ci = CameraImageClick.from_camera_version("1.1.0")
plt.show()
np.save("click_camera.npy", ci.image)
| 585 | 15 | 76 |
835cb4eaac8855bac1ca3fab089efac0d34dbe49 | 21,410 | py | Python | experiments/multinest_gaussian.py | keflavich/nestfit | 2a4c6951a501f3e4647ceb5dd276da907ab765ba | [
"MIT"
] | 11 | 2019-08-22T17:19:10.000Z | 2021-12-10T06:43:32.000Z | experiments/multinest_gaussian.py | keflavich/nestfit | 2a4c6951a501f3e4647ceb5dd276da907ab765ba | [
"MIT"
] | 3 | 2019-09-30T22:28:56.000Z | 2021-02-15T21:40:33.000Z | experiments/multinest_gaussian.py | keflavich/nestfit | 2a4c6951a501f3e4647ceb5dd276da907ab765ba | [
"MIT"
] | 2 | 2020-06-30T07:18:18.000Z | 2020-07-13T16:27:33.000Z | #!/usr/bin/env python3
"""
Gaussian mixture fitting with Nested Sampling. This module was tested in the
main `nestfit` repo on bare arrays and Gaussian components -- without a
spectral axis, units, or other necessary complications.
The `.wrapped` references a Cython implementation of the Gaussian model class.
"""
import ctypes
import operator
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
from scipy import (special, stats)
from matplotlib import ticker
from matplotlib import pyplot as plt
import corner
import pymultinest
from .wrapped import CGaussianModel
plt.rc('font', size=10, family='serif')
plt.rc('text', usetex=True)
plt.rc('xtick', direction='out', top=True)
plt.rc('ytick', direction='out', right=True)
ROOT_DIR = Path('/lustre/aoc/users/bsvoboda/temp/nestfit')
DATA_DIR = ROOT_DIR / Path('data')
PLOT_DIR = ROOT_DIR / Path('plots')
| 38.998179 | 185 | 0.585567 | #!/usr/bin/env python3
"""
Gaussian mixture fitting with Nested Sampling. This module was tested in the
main `nestfit` repo on bare arrays and Gaussian components -- without a
spectral axis, units, or other necessary complications.
The `.wrapped` references a Cython implementation of the Gaussian model class.
"""
import ctypes
import operator
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
from scipy import (special, stats)
from matplotlib import ticker
from matplotlib import pyplot as plt
import corner
import pymultinest
from .wrapped import CGaussianModel
plt.rc('font', size=10, family='serif')
plt.rc('text', usetex=True)
plt.rc('xtick', direction='out', top=True)
plt.rc('ytick', direction='out', right=True)
ROOT_DIR = Path('/lustre/aoc/users/bsvoboda/temp/nestfit')
DATA_DIR = ROOT_DIR / Path('data')
PLOT_DIR = ROOT_DIR / Path('plots')
class SyntheticSpectrum:
def __init__(self, xaxis, amp, cen, std, noise=0.03, set_seed=False):
"""
Construct a mixture of Gaussians expressed as:
f(x) = A * exp(-(x - c)^2 / (2 * s^2))
for "A" amplitude, "c" centroid, and "s" standard deviation.
Parameters
----------
xaxis : np.ndarray
amp : np.ndarray
Array of Gaussian amplitudes
cen : np.ndarray
Array of Gaussian centroid positions
std : np.ndarray
Array of Guassian standard deviations
noise : float, default=0.03
Noise standard deviation
set_seed : bool, default=False
If `True` will use a default seed of 5 for the np.random module.
"""
if set_seed:
np.random.seed(5)
else:
np.random.seed()
self.xaxis = xaxis.reshape(-1, 1)
self.ncomp = len(amp)
self.size = self.xaxis.shape[0]
self.amp = amp
self.cen = cen
self.std = std
self.truths = np.concatenate([amp, cen, std])
self.noise = noise
self.components = self.profile().T
self.sum_spec = self.components.sum(axis=0)
self.noise_spec = np.random.normal(scale=self.noise, size=self.size)
self.sampled_spec = self.sum_spec + self.noise_spec
def profile(self):
return self.amp * np.exp(-(self.xaxis - self.cen)**2 / (2 * self.std**2))
def resample_spectrum(self, noise=None):
if noise is not None:
self.noise = noise
noise_spec = np.random.normal(scale=self.noise, size=self.size)
self.noise_spec = noise_spec
self.sampled_spec = self.sum_spec + self.noise_spec
def test_spectrum():
return SyntheticSpectrum(
np.linspace(-6, 6, 100),
amp=np.array([0.3, 0.5, 0.4]),
cen=np.array([-1, 0, 3]),
std=np.array([1.5, 1.0, 0.5]),
noise=0.03,
set_seed=True,
)
class GaussianModel:
model_name = 'gaussian'
def __init__(self, xaxis, ydata, noise, ncomp):
self.xaxis = xaxis.reshape(-1, 1)
self.size = xaxis.shape[0]
self.ydata = ydata
self.noise = noise
self.ncomp = ncomp
self.n_params = 3 * ncomp
self.lnpin = -self.size / 2 * np.log(2 * np.pi * noise**2)
self.null_lnZ = self.lnpin - np.sum(ydata**2) / (2 * self.noise**2)
#self.array_type = np.ctypeslib.ndpointer(
# ctypes.c_double, 1, (self.n_params,), 'C_CONTIGUOUS')
@property
def par_labels(self):
comps = range(1, self.ncomp+1)
return [
f'{label}{n}'
for label in ('a', 'c', 's')
for n in comps
]
def loglikelihood(self, theta, ndim, nparams):
n = self.ncomp
#atheta = ctypes.cast(theta, self.array_type).contents
atheta = np.ctypeslib.as_array(theta, shape=(self.n_params,))
amp = atheta[0 : n]
cen = atheta[ n:2*n]
std = atheta[2*n:3*n]
ymodel = np.sum(
amp * np.exp(-(self.xaxis - cen)**2 / (2 * std**2)),
axis=1,
)
difsqsum = np.sum((self.ydata - ymodel)**2)
lnL = self.lnpin - difsqsum / (2 * self.noise**2)
return lnL
def prior_transform(self, utheta, ndim, nparams):
n = self.ncomp
# amplitude -- uniform [0.06, 1.00]
for i in range(0, n):
utheta[i] = 0.94 * utheta[i] + 0.06
# centroid velocity -- uniform [-5.00, 5.00]
# but enforce ordering from left-to-right for the peaks to sort
# and limit multi-modality in posteriors
vmin, vmax = -5.0, 5.0
for i in range(n, 2*n):
v = (vmax - vmin) * utheta[i] + vmin
utheta[i] = vmin = v
# standard deviation -- uniform [0.30, 3.00]
for i in range(2*n, 3*n):
utheta[i] = 2.7 * utheta[i] + 0.30
return utheta # XXX
def run_nested(spec, model, basename='run/test_run'):
pymultinest.run(
model.loglikelihood,
model.prior_transform,
model.n_params,
outputfiles_basename=basename,
resume=False,
verbose=True,
evidence_tolerance=0.3,
n_live_points=400,
sampling_efficiency=0.3,
n_iter_before_update=2000,
)
analyzer = pymultinest.Analyzer(
outputfiles_basename=basename,
n_params=model.n_params,
)
lnZ = analyzer.get_stats()['global evidence']
print(':: Evidence Z:', lnZ/np.log(10))
return analyzer
def test_nested(ncomp=3):
spec = test_spectrum()
model = GaussianModel(
spec.xaxis,
spec.sampled_spec,
spec.noise,
ncomp,
)
analyzer = run_nested(spec, model)
return spec, model, analyzer
def test_nested_cython(ncomp=3):
spec = test_spectrum()
model = CGaussianModel(
spec.xaxis.flatten(),
spec.sampled_spec,
spec.noise,
ncomp,
)
analyzer = run_nested(spec, model)
return spec, model, analyzer
def marginals_to_pandas(a_stats):
margs = a_stats['marginals']
df = pd.DataFrame(margs)
new_cols = {
'median': 'q50',
'q01%': 'q01',
'q10%': 'q10',
'q25%': 'q25',
'q75%': 'q75',
'q90%': 'q90',
'q99%': 'q99',
'1sigma': 'ci_1sigma',
'2sigma': 'ci_2sigma',
'3sigma': 'ci_3sigma',
'5sigma': 'ci_5sigma',
}
df = df.rename(columns=new_cols)
df = df[[
'q01', 'q10', 'q25', 'q50', 'q75', 'q90', 'q99',
'sigma', 'ci_1sigma', 'ci_2sigma', 'ci_3sigma', 'ci_5sigma',
]]
for col in ('ci_1sigma', 'ci_2sigma', 'ci_3sigma', 'ci_5sigma'):
df[col+'_lo'] = df[col].apply(lambda x: x[0])
df[col+'_hi'] = df[col].apply(lambda x: x[1])
del df[col]
return df
def save_run(model, analyzer, group_name, store_name='nestfit'):
if not store_name.endswith('.hdf5'):
store_name += '.hdf5'
a_stats = analyzer.get_stats()
bestfit = analyzer.get_best_fit()
marg_df = marginals_to_pandas(a_stats)
posteriors = analyzer.get_equal_weighted_posterior()
with h5py.File(store_name, 'a') as hdf:
group = hdf.create_group(group_name)
# general attributes:
group.attrs['model_name'] = model.model_name
group.attrs['ncomp'] = model.ncomp
group.attrs['par_labels'] = model.par_labels
group.attrs['std_noise'] = model.noise
group.attrs['null_lnZ'] = model.null_lnZ
group.attrs['global_lnZ'] = a_stats['global evidence']
group.attrs['global_lnZ_err'] = a_stats['global evidence error']
group.attrs['max_loglike'] = bestfit['log_likelihood']
# datasets:
group.create_dataset('map_params', data=np.array(bestfit['parameters']))
group.create_dataset('posteriors', data=posteriors)
group.create_dataset('marginals', data=marg_df.values)
def run_trials_varying_noise(store_name='varnoise'):
xaxis = np.linspace(-6, 6, 100)
amp = np.array([0.3, 0.5, 0.4])
cen = np.array([-1, 0, 3])
std = np.array([1.5, 1.0, 0.5])
args = xaxis, amp, cen, std
# sample noise values log-uniformly from 1 to 100 peak-SNR
all_noises = 0.75 / np.logspace(0, 2, 100)
for ii, noise in enumerate(all_noises):
spec = SyntheticSpectrum(*args, noise=noise, set_seed=False)
for ncomp in range(1, 5):
group_name = f'spec_{ii:0>4d}/ncomp_{ncomp}'
model = GaussianModel(
spec.xaxis,
spec.sampled_spec,
spec.noise,
ncomp,
)
analyzer = run_nested(spec, model)
save_run(model, analyzer, group_name, store_name=store_name)
def plot_spec_compare(synspec, model, analyzer, outname='test'):
n = model.ncomp
xaxis = model.xaxis
fig = plt.figure(figsize=(4, 6))
ax0 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax1 = plt.subplot2grid((3, 1), (2, 0))
## Comparison of the synthetic spectrum and a draw from the posteriors
# observed data
ax0.step(xaxis, model.ydata, color='black')
# plot a sub-sample of spectra
posteriors = analyzer.get_equal_weighted_posterior()[::100,:-1]
spectra = [
SyntheticSpectrum(xaxis, row[:n], row[n:2*n], row[2*n:3*n])
for row in posteriors
]
for spec in spectra:
ax0.plot(xaxis, spec.components.T, '-', color='red',
alpha=0.1)
ax0.plot(xaxis, spec.sum_spec, '-', color='cyan',
alpha=0.1)
# individual true components
ax0.plot(xaxis, synspec.components.T, '-', color='magenta', linewidth=0.7)
# best fit spectrum
best_pars = np.array(analyzer.get_best_fit()['parameters'])
best_spec = SyntheticSpectrum(xaxis,
best_pars[:n], best_pars[n:2*n], best_pars[2*n:3*n],
)
ax0.plot(xaxis, spec.sum_spec, '-', color='dodgerblue', linewidth=0.7)
ax0.set_xlabel(r'$v_\mathrm{lsr} \ [\mathrm{km\, s^{-1}}]$')
ax0.set_ylabel(r'$T_\mathrm{b} \ [\mathrm{K}]$')
## Comparison of the "true" residuals and the residuals from the best-fit
# use error function to represent true noise distribution and use the true
# noise drawn in the synthetic spectrum.
bins = np.linspace(-0.1, 0.1, 100)
errdist = 0.5 * special.erf(bins/model.noise) + 0.5
ax1.plot(bins, errdist, '-', color='0.5')
ax1.hist(synspec.noise_spec, bins=bins, cumulative=True, density=True,
histtype='step', color='black',)
ax1.hist(model.ydata-best_spec.sum_spec, bins=bins, cumulative=True, density=True,
histtype='step', color='red')
ax1.set_xlabel(r'$T_\mathrm{b} \ [\mathrm{K}]$')
ax1.set_ylabel(r'$\mathrm{Residual\ CDF}$')
# save figure
plt.tight_layout()
plt.savefig(f'plots/{outname}.pdf')
plt.close('all')
def plot_corner(synspec, model, analyzer, show_truths=False, outname='test_corner'):
truths = synspec.truths if show_truths else None
plt.rc('font', size=12, family='serif')
posteriors = analyzer.get_equal_weighted_posterior()[:,:-1]
fig = corner.corner(posteriors, truths=truths,
labels=model.par_labels, label_kwargs={'fontsize': 14},
show_titles=True, title_kwargs={'fontsize': 14})
# save figure
plt.savefig(f'plots/{outname}.pdf')
plt.close('all')
plt.rc('font', size=10, family='serif')
def read_varnoise_summary(store_file='varnoise'):
# FIXME it is probably easier and more elegant to interop between pandas
# and HDF5 using pytables.
if not store_file.endswith('.hdf5'):
store_file += '.hdf5'
df = pd.DataFrame()
nchan = 100
with h5py.File(store_file, 'r') as hdf:
for run_group in hdf.values():
spec_name = run_group.name.lstrip('/')
for fit_group in run_group.values():
ncomp = fit_group.attrs['ncomp']
kpar = 3 * ncomp
lnZ = fit_group.attrs['global_lnZ']
lnZ_err = fit_group.attrs['global_lnZ_err']
maxL = fit_group.attrs['max_loglike']
bic = np.log(nchan) * kpar - 2 * maxL
aic = 2 * kpar - 2 * maxL
aicc = aic + (2 * kpar**2 + 2 * kpar) / (nchan - kpar - 1)
df.loc[spec_name, f'lnZ{ncomp}'] = lnZ
df.loc[spec_name, f'lnZ{ncomp}_err'] = lnZ_err
df.loc[spec_name, f'maxL{ncomp}'] = maxL
df.loc[spec_name, f'BIC{ncomp}'] = bic
df.loc[spec_name, f'AIC{ncomp}'] = aic
df.loc[spec_name, f'AICc{ncomp}'] = aicc
lnZ0 = fit_group.attrs['null_lnZ']
df.loc[spec_name, 'lnZ0'] = lnZ0
df.loc[spec_name, 'BIC0'] = -2 * lnZ0
df.loc[spec_name, 'AIC0'] = -2 * lnZ0
df.loc[spec_name, 'AICc0'] = -2 * lnZ0 + 2 / (nchan - 1)
df.loc[spec_name, 'noise'] = fit_group.attrs['std_noise']
return df
def plot_varnoise_evidence_noise(df):
snr = 0.75 / df.noise
fig, ax = plt.subplots(figsize=(4, 3))
ax.hlines([0], snr.min(), snr.max(), color='0.5', linewidth=0.7,
linestyle='dotted')
label1 = r'$\mathrm{ln}(\mathcal{Z}_1/\mathcal{Z}_0)$'
label2 = r'$\mathrm{ln}(\mathcal{Z}_2/\mathcal{Z}_1)$'
label3 = r'$\mathrm{ln}(\mathcal{Z}_3/\mathcal{Z}_2)$'
label4 = r'$\mathrm{ln}(\mathcal{Z}_4/\mathcal{Z}_3)$'
line_kwargs = {'drawstyle': 'steps-mid'}
ax.plot(snr, df.lnZ1-df.lnZ0, label=label1, **line_kwargs)
ax.plot(snr, df.lnZ2-df.lnZ1, label=label2, **line_kwargs)
ax.plot(snr, df.lnZ3-df.lnZ2, label=label3, **line_kwargs)
ax.plot(snr, df.lnZ4-df.lnZ3, label=label4, **line_kwargs)
ax.hlines([-16.1, 16.1], snr.min(), snr.max(), color='red', linewidth=0.7,
linestyle='dotted')
ax.legend(loc='lower left', ncol=2, fancybox=False, fontsize='x-small')
ax.set_xscale('log')
ax.set_xlim(snr.min(), snr.max())
ax.set_ylim(-50, 100)
ax.set_xlabel(r'$\mathrm{max}(I_\nu) / \sigma_\mathrm{rms}$')
ax.set_ylabel(r'$\mathrm{ln}(\mathcal{Z}_{i} / \mathcal{Z}_{i-1})$')
plt.tight_layout()
plt.savefig(PLOT_DIR/Path('evidence_by_noise.pdf'))
plt.close('all')
def plot_varnoise_aic_bic_noise(df):
snr = 0.75 / df.noise
fig, ax = plt.subplots(figsize=(4, 3))
ax.hlines([0], snr.min(), snr.max(), color='0.5', linewidth=0.7,
linestyle='dotted')
label1 = r'$\Delta \mathrm{BIC}(1-0)$'
label2 = r'$\Delta \mathrm{BIC}(2-1)$'
label3 = r'$\Delta \mathrm{BIC}(3-2)$'
label4 = r'$\Delta \mathrm{BIC}(4-3)$'
line_kwargs = {'drawstyle': 'steps-mid'}
#ax.plot(snr, df.AICc1-df.AICc0, label=label1, **line_kwargs)
#ax.plot(snr, df.AICc2-df.AICc1, label=label2, **line_kwargs)
#ax.plot(snr, df.AICc3-df.AICc2, label=label3, **line_kwargs)
#ax.plot(snr, df.AICc4-df.AICc3, label=label4, **line_kwargs)
ax.plot(snr, df.BIC1-df.BIC0, label=label1, **line_kwargs)
ax.plot(snr, df.BIC2-df.BIC1, label=label2, **line_kwargs)
ax.plot(snr, df.BIC3-df.BIC2, label=label3, **line_kwargs)
ax.plot(snr, df.BIC4-df.BIC3, label=label4, **line_kwargs)
ax.hlines([-16.1], snr.min(), snr.max(), color='red', linewidth=0.7,
linestyle='dotted')
ax.legend(loc='upper left', ncol=2, fancybox=False, fontsize='x-small')
ax.set_xscale('log')
ax.set_xlim(snr.min(), snr.max())
ax.set_ylim(-100, 50)
ax.set_xlabel(r'$\mathrm{max}(I_\nu) / \sigma_\mathrm{rms}$')
ax.set_ylabel(r'$\Delta \mathrm{BIC}$')
plt.tight_layout()
plt.savefig(PLOT_DIR/Path('aic_bic_by_noise.pdf'))
plt.close('all')
def plot_varnoise_metrics_compare(df):
df = df.copy()
df['dlnZ10'] = df.lnZ1 - df.lnZ0
df['dlnZ21'] = df.lnZ2 - df.lnZ1
df['dlnZ32'] = df.lnZ3 - df.lnZ2
df['dlnZ43'] = df.lnZ4 - df.lnZ3
df['dBIC10'] = df.BIC1 - df.BIC0 - np.log(100) * 3
df['dBIC21'] = df.BIC2 - df.BIC1 - np.log(100) * 3
df['dBIC32'] = df.BIC3 - df.BIC2 - np.log(100) * 3
df['dBIC43'] = df.BIC4 - df.BIC3 - np.log(100) * 3
snr = 0.75 / df.noise
fig, ax = plt.subplots(figsize=(4, 3))
label1 = r'$(1-0)$'
label2 = r'$(2-1)$'
label3 = r'$(3-2)$'
label4 = r'$(4-3)$'
plot_kwargs = {'marker': 'o', 'markersize': 3, 'linestyle': 'none'}
ax.plot(snr, (df.dBIC10-df.dlnZ10)/(df.dBIC10+df.dlnZ10),
label=label1, **plot_kwargs)
ax.plot(snr, (df.dBIC21-df.dlnZ21)/(df.dBIC21+df.dlnZ21),
label=label2, **plot_kwargs)
ax.plot(snr, (df.dBIC32-df.dlnZ32)/(df.dBIC32+df.dlnZ32),
label=label3, **plot_kwargs)
ax.plot(snr, (df.dBIC43-df.dlnZ43)/(df.dBIC43+df.dlnZ43),
label=label4, **plot_kwargs)
ax.legend(loc='lower right', ncol=2, fancybox=False, fontsize='x-small')
ax.set_xscale('log')
ax.set_xlim(snr.min(), snr.max())
ax.set_ylim(-5, 5)
ax.set_xlabel(r'$\mathrm{max}(I_\nu) / \sigma_\mathrm{rms}$')
ax.set_ylabel(r'$\left[2\Delta \mathcal{L}_\mathrm{max} - \Delta \mathrm{ln}(\mathcal{Z})\right] / \left[2\Delta \mathcal{L}_\mathrm{max} + \Delta \mathrm{ln}(\mathcal{Z})\right]$')
plt.tight_layout()
plt.savefig(PLOT_DIR/Path('diff_evidence_bic_by_noise.pdf'))
plt.close('all')
def plot_varnoise_preferred_model(df):
df = df.copy()
df['snr'] = 0.75 / df.noise
lnZ_cols = [f'lnZ{n}' for n in range(5)]
bic_cols = [f'BIC{n}' for n in range(5)]
aic_cols = [f'AICc{n}' for n in range(5)]
# too complicated but, oh well, it's here and it works
def set_nbest(cols, thresh, outcol='nbest', comp_op='<'):
op = operator.lt if comp_op == '<' else operator.gt
for ix in df.index:
row = df.loc[ix, cols]
for ii in range(4, 0, -1):
if op(row[cols[ii]] - row[cols[ii-1]], thresh):
break
else:
ii = 0
df.loc[ix, outcol] = ii
set_nbest(lnZ_cols, 16.1, outcol='lnZ_nbest', comp_op='>')
set_nbest(bic_cols, -16.1, outcol='bic_nbest', comp_op='<')
set_nbest(aic_cols, -16.1, outcol='aic_nbest', comp_op='<')
fig, ax = plt.subplots(figsize=(4, 2))
plot_kwargs = {'marker': 'o', 'markersize': 1.4, 'linestyle': 'none'}
ax.plot(df.snr, df.aic_nbest+0.2, color='dodgerblue',
label=r'$\mathrm{AICc}$', **plot_kwargs)
ax.plot(df.snr, df.bic_nbest+0.1, color='red',
label=r'$\mathrm{BIC}$', **plot_kwargs)
ax.plot(df.snr, df.lnZ_nbest, color='black',
label=r'$\mathrm{ln}(\mathcal{Z})$', **plot_kwargs)
ax.legend(loc='upper left', ncol=1, fancybox=False, fontsize='x-small')
ax.hlines([0], df.snr.min(), df.snr.max(), color='0.5', linestyle='dotted')
ax.set_xscale('log')
ax.set_yticks(range(5))
ax.set_xlim(df.snr.min(), df.snr.max())
ax.set_ylim(-0.2, 4.2)
ax.set_xlabel(r'$\mathrm{max}(I_\nu) / \sigma_\mathrm{rms}$')
ax.set_ylabel(r'$N_\mathrm{best}$')
plt.tight_layout()
plt.savefig(PLOT_DIR/Path('preferred_model.pdf'))
plt.close('all')
def plot_varnoise_spec_examples(store_name='varnoise'):
if not store_name.endswith('.hdf5'):
store_name += '.hdf5'
tspec = test_spectrum()
def test_spectrum_with_noise(noise):
return SyntheticSpectrum(tspec.xaxis, tspec.amp, tspec.cen, tspec.std,
noise=noise, set_seed=False)
def parse_spectrum(ncomp, params, noise):
ncomp = 1 if ncomp == 0 else ncomp
return SyntheticSpectrum(tspec.xaxis, params[:ncomp],
params[ncomp:2*ncomp], params[2*ncomp:3*ncomp], noise=noise,
set_seed=False)
fig, axes = plt.subplots(ncols=1, nrows=4, sharex=True, sharey=True,
figsize=(4, 6))
xaxis = tspec.xaxis.flatten()
with h5py.File(store_name, 'r') as hdf:
spec_ix = (5, 15, 50, 80)
for ix, n, ax in zip(spec_ix, range(4), axes):
group_name = f'/spec_{ix:0>4d}/ncomp_{1 if n==0 else n}'
noise = hdf[group_name].attrs['std_noise']
pars = hdf[group_name+'/map_params']
spec = parse_spectrum(n, pars, noise)
nspec = test_spectrum_with_noise(noise)
sigmaones = nspec.noise * np.ones(xaxis.shape)
ax.fill_between(xaxis, -sigmaones, sigmaones, color='yellow',
edgecolor='none', alpha=0.5)
label = r'$N_\mathrm{best} = ' + str(n) + '$'
ax.annotate(label, (0.05, 0.8), xycoords='axes fraction')
ax.plot(xaxis, nspec.sampled_spec, color='black',
drawstyle='steps-mid')
ax.plot(xaxis, tspec.components.T, color='magenta', linewidth=0.75)
ax.plot(xaxis,
np.zeros(xaxis.shape) if n == 0 else spec.components.T,
color='cyan', linewidth=1.0)
ax.plot(xaxis,
np.zeros(xaxis.shape) if n == 0 else spec.sum_spec,
color='dodgerblue', linewidth=0.75)
ax.set_ylim(-0.75, 2.0)
ax.set_xlabel(r'$v_\mathrm{lsr} \ [\mathrm{km\, s^{-1}}]$')
ax.set_ylabel(r'$T_\mathrm{b} \ [\mathrm{K}]$')
plt.tight_layout()
plt.savefig(PLOT_DIR/Path('MAP_best_for_noise.pdf'))
plt.close('all')
| 18,576 | 1,538 | 391 |
e586992388f162288d29db4b88fdcffa8c58b944 | 2,667 | py | Python | examples/example_ode.py | eager-dev/eagerx_dcsc_setups | 72a14a2c640f8abb1c1bfad017caaa51fa4832ea | [
"Apache-2.0"
] | 1 | 2022-03-24T10:32:57.000Z | 2022-03-24T10:32:57.000Z | examples/example_ode.py | eager-dev/eagerx_dcsc_setups | 72a14a2c640f8abb1c1bfad017caaa51fa4832ea | [
"Apache-2.0"
] | null | null | null | examples/example_ode.py | eager-dev/eagerx_dcsc_setups | 72a14a2c640f8abb1c1bfad017caaa51fa4832ea | [
"Apache-2.0"
] | null | null | null | # ROS packages required
from eagerx import Object, Engine, Node, initialize, log, process
initialize("eagerx_core", anonymous=True, log_level=log.INFO)
# Environment
from eagerx.core.env import EagerxEnv
from eagerx.core.graph import Graph
from eagerx.wrappers import Flatten
# Implementation specific
import eagerx.nodes # Registers butterworth_filter # noqa # pylint: disable=unused-import
import eagerx_ode # Registers OdeEngine # noqa # pylint: disable=unused-import
import eagerx_dcsc_setups # Registers Pendulum # noqa # pylint: disable=unused-import
# Other
import numpy as np
import stable_baselines3 as sb
if __name__ == "__main__":
# Define rate (depends on rate of ode)
rate = 30.0
# Initialize empty graph
graph = Graph.create()
# Create pendulum
pendulum = Object.make(
"Pendulum",
"pendulum",
render_shape=[480, 480],
sensors=["x"],
states=["model_state", "model_parameters"],
)
# Visualize EngineGraph
pendulum.gui(engine_id="OdeEngine")
graph.add(pendulum)
# Create Butterworth filter
bf = Node.make(
"ButterworthFilter",
name="bf",
rate=rate,
N=2,
Wn=13,
process=process.NEW_PROCESS,
)
graph.add(bf)
# Connect the nodes
graph.connect(action="action", target=bf.inputs.signal)
graph.connect(source=bf.outputs.filtered, target=pendulum.actuators.u)
graph.connect(source=pendulum.sensors.x, observation="observation", window=1)
# Add rendering
graph.add_component(pendulum.sensors.image)
graph.render(source=pendulum.sensors.image, rate=10, display=True)
# Visualize Graph
graph.gui()
# Define engines
engine = Engine.make("OdeEngine", rate=rate, sync=True, real_time_factor=0, process=process.NEW_PROCESS)
# Define step function
# Initialize Environment
env = Flatten(EagerxEnv(name="ode_env", rate=rate, graph=graph, engine=engine, step_fn=step_fn))
# Initialize learner (kudos to Antonin)
model = sb.SAC("MlpPolicy", env, verbose=1)
# First train in simulation for 5 minutes and save
env.render("human")
model.learn(total_timesteps=int(300 * rate))
| 29.307692 | 108 | 0.663292 | # ROS packages required
from eagerx import Object, Engine, Node, initialize, log, process
initialize("eagerx_core", anonymous=True, log_level=log.INFO)
# Environment
from eagerx.core.env import EagerxEnv
from eagerx.core.graph import Graph
from eagerx.wrappers import Flatten
# Implementation specific
import eagerx.nodes # Registers butterworth_filter # noqa # pylint: disable=unused-import
import eagerx_ode # Registers OdeEngine # noqa # pylint: disable=unused-import
import eagerx_dcsc_setups # Registers Pendulum # noqa # pylint: disable=unused-import
# Other
import numpy as np
import stable_baselines3 as sb
if __name__ == "__main__":
# Define rate (depends on rate of ode)
rate = 30.0
# Initialize empty graph
graph = Graph.create()
# Create pendulum
pendulum = Object.make(
"Pendulum",
"pendulum",
render_shape=[480, 480],
sensors=["x"],
states=["model_state", "model_parameters"],
)
# Visualize EngineGraph
pendulum.gui(engine_id="OdeEngine")
graph.add(pendulum)
# Create Butterworth filter
bf = Node.make(
"ButterworthFilter",
name="bf",
rate=rate,
N=2,
Wn=13,
process=process.NEW_PROCESS,
)
graph.add(bf)
# Connect the nodes
graph.connect(action="action", target=bf.inputs.signal)
graph.connect(source=bf.outputs.filtered, target=pendulum.actuators.u)
graph.connect(source=pendulum.sensors.x, observation="observation", window=1)
# Add rendering
graph.add_component(pendulum.sensors.image)
graph.render(source=pendulum.sensors.image, rate=10, display=True)
# Visualize Graph
graph.gui()
# Define engines
engine = Engine.make("OdeEngine", rate=rate, sync=True, real_time_factor=0, process=process.NEW_PROCESS)
# Define step function
def step_fn(prev_obs, obs, action, steps):
state = obs["observation"][0]
u = action["action"][0]
# Calculate reward
cos_th, sin_th, thdot = state
th = np.arctan2(sin_th, cos_th)
cost = th**2 + 0.1 * (thdot / (1 + 10 * abs(th))) ** 2 + 0.01 * u**2
# Determine done flag
done = steps > 500
# Set info:
info = {"TimeLimit.truncated": done}
return obs, -cost, done, info
# Initialize Environment
env = Flatten(EagerxEnv(name="ode_env", rate=rate, graph=graph, engine=engine, step_fn=step_fn))
# Initialize learner (kudos to Antonin)
model = sb.SAC("MlpPolicy", env, verbose=1)
# First train in simulation for 5 minutes and save
env.render("human")
model.learn(total_timesteps=int(300 * rate))
| 434 | 0 | 26 |
e336c5e0b4e6db03e129a38b3263dfdd50380089 | 27 | py | Python | bgtorch/bgtorch/nn/__init__.py | noegroup/neurips2020_snf | 9b017bb3681f756d0f2ba0ee7b1a2986c1b07261 | [
"BSD-3-Clause"
] | null | null | null | bgtorch/bgtorch/nn/__init__.py | noegroup/neurips2020_snf | 9b017bb3681f756d0f2ba0ee7b1a2986c1b07261 | [
"BSD-3-Clause"
] | null | null | null | bgtorch/bgtorch/nn/__init__.py | noegroup/neurips2020_snf | 9b017bb3681f756d0f2ba0ee7b1a2986c1b07261 | [
"BSD-3-Clause"
] | null | null | null | from .dense import DenseNet | 27 | 27 | 0.851852 | from .dense import DenseNet | 0 | 0 | 0 |
48f02c18873c94098ec234cdc39ca3c8cf0a5833 | 227 | py | Python | networkx/utils/__init__.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | 1 | 2015-07-16T01:36:44.000Z | 2015-07-16T01:36:44.000Z | networkx/utils/__init__.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | null | null | null | networkx/utils/__init__.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | null | null | null | from networkx.utils.misc import *
from networkx.utils.decorators import *
from networkx.utils.random_sequence import *
from networkx.utils.union_find import *
from networkx.utils.rcm import *
from networkx.utils.heaps import *
| 32.428571 | 44 | 0.814978 | from networkx.utils.misc import *
from networkx.utils.decorators import *
from networkx.utils.random_sequence import *
from networkx.utils.union_find import *
from networkx.utils.rcm import *
from networkx.utils.heaps import *
| 0 | 0 | 0 |
fcf93e02cb9bb7b2351b68fb27d23532a49a5296 | 408 | py | Python | Computer science/Programming languages/Python/Working with data/Data types and operations/Strings/Basic string methods/preprocessing.py | chanchanchong/PYTHON-TRACK-IN-HYPERSKILL | 462fe08ff4a2b183fd45a0235ab1ec7a788bd54c | [
"MIT"
] | null | null | null | Computer science/Programming languages/Python/Working with data/Data types and operations/Strings/Basic string methods/preprocessing.py | chanchanchong/PYTHON-TRACK-IN-HYPERSKILL | 462fe08ff4a2b183fd45a0235ab1ec7a788bd54c | [
"MIT"
] | null | null | null | Computer science/Programming languages/Python/Working with data/Data types and operations/Strings/Basic string methods/preprocessing.py | chanchanchong/PYTHON-TRACK-IN-HYPERSKILL | 462fe08ff4a2b183fd45a0235ab1ec7a788bd54c | [
"MIT"
] | null | null | null | # Preprocess an input text:
# - delete punctuation symbols (commas, periods,
# exclamation and question marks ,.!?),
# - convert all symbols to lowercase.
# Then print your text.
# Punctuation marks appear not only at the end of the input
# string, so you have to figure out how to get rid of all of them.
print(input().replace('?', '').replace('!', '').replace(',', '').replace('.', '').lower())
| 27.2 | 90 | 0.656863 | # Preprocess an input text:
# - delete punctuation symbols (commas, periods,
# exclamation and question marks ,.!?),
# - convert all symbols to lowercase.
# Then print your text.
# Punctuation marks appear not only at the end of the input
# string, so you have to figure out how to get rid of all of them.
print(input().replace('?', '').replace('!', '').replace(',', '').replace('.', '').lower())
| 0 | 0 | 0 |
e241d8b1efbbfbe3ea79b275171492279d932370 | 3,548 | py | Python | koro/tasks.py | gabrielkhh/flaxen-spade | 9594b16f0f13d3c762d2cdac1cf2b9d11d84f0c2 | [
"Unlicense"
] | null | null | null | koro/tasks.py | gabrielkhh/flaxen-spade | 9594b16f0f13d3c762d2cdac1cf2b9d11d84f0c2 | [
"Unlicense"
] | null | null | null | koro/tasks.py | gabrielkhh/flaxen-spade | 9594b16f0f13d3c762d2cdac1cf2b9d11d84f0c2 | [
"Unlicense"
] | null | null | null | import re
from typing import Optional
from flask import redirect, render_template, request, url_for
from koro.dataset import JsonLoader
from koro.manipulation import first_true
| 32.550459 | 88 | 0.596956 | import re
from typing import Optional
from flask import redirect, render_template, request, url_for
from koro.dataset import JsonLoader
from koro.manipulation import first_true
class Task:
def __init__(self, name, filename):
self.name = name
self.filename = filename
self.slug = self.slugify(name)
def slugify(self, name: str) -> str:
return re.sub(r"[^a-z0-9]+", "-", name.lower()).strip("-")
class TaskBuilder:
def __init__(self):
self.tasks = [
Task("Best time to travel", "best_time_to_travel.json"),
Task(
"Popular MRT routes on weekends",
"pop_mrt_routes_on_weekends_publicholiday.json",
),
Task("Popular End Trips", "popular_end_trip.json"),
Task("Popular Stations", "popular_stations.json"),
Task("Shopping Mall traffic", "shopping-mall-passenger-volume.json"),
]
def find_task(self, slug) -> Optional[Task]:
return first_true(self.tasks, lambda task: task.slug == slug)
def get_tasks(self):
return self.tasks
class ViewDispatcher:
def __init__(self):
self.dispatch_table = {
"best-time-to-travel": self.best_time_to_travel,
"popular-mrt-routes-on-weekends": self.popular_mrt_routes_on_weekends,
"popular-end-trips": self.popular_end_trips,
"popular-stations": self.popular_stations,
"shopping-mall-traffic": self.shopping_mall_traffic,
}
def dispatch(self, slug):
return self.dispatch_table[slug]()
def best_time_to_travel(self):
results = JsonLoader().load_file("results/best_time_to_travel.json")
if filter_by := request.args.get("filter"):
results = {
key: value for key, value in results.items() if filter_by.upper() in key
}
return render_template("tasks/best_time.html", results=results)
def popular_mrt_routes_on_weekends(self):
results = JsonLoader().load_file(
"results/pop_mrt_routes_on_weekends_publicholiday.json"
)
if filter_by := request.args.get("filter"):
results = {
key: value for key, value in results.items() if filter_by.upper() in key
}
return render_template("tasks/popular_mrt_routes.html", results=results)
def popular_end_trips(self):
results = JsonLoader().load_file("results/popular_end_trip.json")
if filter_by := request.args.get("filter"):
new = {}
for month in ["06", "07", "08"]:
new["month"] = {
key: value
for key, value in results[month].items()
if filter_by.upper() in key
}
results = new
return render_template("tasks/popular_end_trips.html", results=results.items())
def popular_stations(self):
results = JsonLoader().load_file("results/popular_stations.json")
if filter_by := request.args.get("filter"):
new = {}
for key, stations in results.items():
new[key] = [
station
for station in stations
if filter_by in station["station_name"].lower()
]
results = new
return render_template("tasks/popular_stations.html", results=results)
def shopping_mall_traffic(self):
return redirect(url_for("frontend.mall_index"))
| 2,989 | -13 | 390 |
22950088bb0aff0a2160c2ee47bc681bd56a33be | 10,983 | py | Python | hyperadmin/apirequests.py | zbyte64/django-hyperadmin | 9ac2ae284b76efb3c50a1c2899f383a27154cb54 | [
"BSD-3-Clause"
] | 25 | 2015-01-26T11:37:22.000Z | 2021-04-05T17:21:05.000Z | hyperadmin/apirequests.py | zbyte64/django-hyperadmin | 9ac2ae284b76efb3c50a1c2899f383a27154cb54 | [
"BSD-3-Clause"
] | 1 | 2015-04-13T04:19:49.000Z | 2015-04-13T04:19:49.000Z | hyperadmin/apirequests.py | zbyte64/django-hyperadmin | 9ac2ae284b76efb3c50a1c2899f383a27154cb54 | [
"BSD-3-Clause"
] | 2 | 2017-05-24T13:33:17.000Z | 2019-11-14T06:24:48.000Z | import mimeparse
from django.contrib.auth.models import AnonymousUser
from hyperadmin.states import State
class APIRequest(object):
"""
An API Request
"""
@property
@property
def get_response_type(self):
"""
Returns the active response type to be used
:rtype: string
"""
val = self.META.get('HTTP_ACCEPT', self.META.get('CONTENT_TYPE', ''))
media_types = self.media_types.keys()
if not media_types:
return val
return mimeparse.best_match(media_types, val) or val
def get_request_type(self):
"""
Returns the active request type to be used
:rtype: string
"""
val = self.META.get('CONTENT_TYPE', self.META.get('HTTP_ACCEPT', ''))
media_types = self.media_types.keys()
if not media_types:
return val
return mimeparse.best_match(media_types, val) or val
def get_request_media_type(self):
"""
Returns the request media type to be used or raises an error
:raises ValueError: when the requested content type is unrecognized
:rtype: string
"""
content_type = self.get_request_type()
media_type_cls = self.media_types.get(content_type, None)
if media_type_cls is None:
raise ValueError('Unrecognized request content type "%s". Choices are: %s' % (content_type, self.media_types.keys()))
return media_type_cls(self)
def get_response_media_type(self):
"""
Returns the response media type to be used or raises an error
:raises ValueError: when the requested content type is unrecognized
:rtype: string
"""
content_type = self.get_response_type()
media_type_cls = self.media_types.get(content_type, None)
if media_type_cls is None:
raise ValueError('Unrecognized request content type "%s". Choices are: %s' % (content_type, self.media_types.keys()))
return media_type_cls(self)
def get_endpoint(self, urlname):
"""
Returns a bound endpoint matching the urlname
:param urlname: The urlname to find
:type urlname: string
:raises KeyError: when the urlname does not match any endpoints
:rtype: Endpoint
"""
if urlname not in self.endpoint_state['endpoints']:
endpoint = self.site.get_endpoint_from_urlname(urlname)
bound_endpoint = endpoint.fork(api_request=self)
if bound_endpoint != self.endpoint_state['endpoints'][urlname]:
pass
return self.endpoint_state['endpoints'][urlname]
def record_endpoint(self, endpoint):
"""
Record the endpoint in our urlname cache
:param resource: Endpoint
"""
assert endpoint.api_request == self
urlname = endpoint.get_url_name()
if urlname not in self.endpoint_state['endpoints']:
self.endpoint_state['endpoints'][urlname] = endpoint
#else:
# original = self.endpoint_state['endpoints'][urlname]
# self.site.get_logger().debug('Double registration at api request level on %s by %s, original: %s' % (urlname, endpoint, original))
def get_link_prototypes(self, endpoint):
"""
Returns the link prototypes to be used by the endpint
:param endpoint: endpoint object
:rtype: list of link prototypes
"""
urlname = endpoint.get_url_name()
if urlname not in self.endpoint_state['link_prototypes']:
link_prototypes = endpoint.create_link_prototypes()
self.endpoint_state['link_prototypes'][urlname] = link_prototypes
return self.endpoint_state['link_prototypes'][urlname]
def get_site(self):
"""
Returns the bound site
:rtype: SiteResource
"""
if 'site' not in self.endpoint_state:
bound_site = self.site.fork(api_request=self)
self.endpoint_state['site'] = bound_site
return self.endpoint_state['site']
def generate_response(self, link, state):
"""
Returns a response generated from the response media type
:param link: The active link representing the endpoint's response
:param state: The endpoint's state
:rtype: [Http]Response
"""
media_type = self.get_response_media_type()
response_type = self.get_response_type()
return media_type.serialize(content_type=response_type, link=link, state=state)
def generate_options_response(self, links, state):
"""
Returns an OPTIONS response generated from the response media type
:param links: dictionary mapping available HTTP methods to a link
:param state: The endpoint's state
:rtype: [Http]Response
"""
media_type = self.get_response_media_type()
response_type = self.get_response_type()
return media_type.options_serialize(content_type=response_type, links=links, state=state)
class InternalAPIRequest(APIRequest):
"""
An Internal API Request
"""
class HTTPAPIRequest(APIRequest):
"""
Represents an API Request spawned from a Django HTTP Request
"""
get_to_meta_map = {
'_HTTP_ACCEPT':'HTTP_ACCEPT',
'_CONTENT_TYPE':'CONTENT_TYPE',
}
@property
@property
@property
@property
class Namespace(object):
"""
Represents alternative data associated with the current api request
Namespaced data is provided by another resource through an internal api request
"""
@property
@property
| 33.793846 | 143 | 0.634162 | import mimeparse
from django.contrib.auth.models import AnonymousUser
from hyperadmin.states import State
class APIRequest(object):
"""
An API Request
"""
def __init__(self, site, path, url_args, url_kwargs, global_state=None):
self.site = site
self.path = path
self.url_args = url_args
self.url_kwargs = url_kwargs
#self.payload = payload
#self.method = method
#self.user = user
#self.params = params
#self.META = meta
self.session_state = State()
self.endpoint_state = State()
self.endpoint_state['endpoints'] = dict()
self.endpoint_state['link_prototypes'] = dict()
if global_state is not None:
self.session_state.update(global_state)
super(APIRequest, self).__init__()
def get_django_request(self):
raise NotImplementedError
@property
def META(self):
return self.session_state['meta']
@property
def media_types(self):
return self.get_site().media_types
def get_response_type(self):
"""
Returns the active response type to be used
:rtype: string
"""
val = self.META.get('HTTP_ACCEPT', self.META.get('CONTENT_TYPE', ''))
media_types = self.media_types.keys()
if not media_types:
return val
return mimeparse.best_match(media_types, val) or val
def get_request_type(self):
"""
Returns the active request type to be used
:rtype: string
"""
val = self.META.get('CONTENT_TYPE', self.META.get('HTTP_ACCEPT', ''))
media_types = self.media_types.keys()
if not media_types:
return val
return mimeparse.best_match(media_types, val) or val
def get_request_media_type(self):
"""
Returns the request media type to be used or raises an error
:raises ValueError: when the requested content type is unrecognized
:rtype: string
"""
content_type = self.get_request_type()
media_type_cls = self.media_types.get(content_type, None)
if media_type_cls is None:
raise ValueError('Unrecognized request content type "%s". Choices are: %s' % (content_type, self.media_types.keys()))
return media_type_cls(self)
def get_response_media_type(self):
"""
Returns the response media type to be used or raises an error
:raises ValueError: when the requested content type is unrecognized
:rtype: string
"""
content_type = self.get_response_type()
media_type_cls = self.media_types.get(content_type, None)
if media_type_cls is None:
raise ValueError('Unrecognized request content type "%s". Choices are: %s' % (content_type, self.media_types.keys()))
return media_type_cls(self)
def get_endpoint(self, urlname):
"""
Returns a bound endpoint matching the urlname
:param urlname: The urlname to find
:type urlname: string
:raises KeyError: when the urlname does not match any endpoints
:rtype: Endpoint
"""
if urlname not in self.endpoint_state['endpoints']:
endpoint = self.site.get_endpoint_from_urlname(urlname)
bound_endpoint = endpoint.fork(api_request=self)
if bound_endpoint != self.endpoint_state['endpoints'][urlname]:
pass
return self.endpoint_state['endpoints'][urlname]
def record_endpoint(self, endpoint):
"""
Record the endpoint in our urlname cache
:param resource: Endpoint
"""
assert endpoint.api_request == self
urlname = endpoint.get_url_name()
if urlname not in self.endpoint_state['endpoints']:
self.endpoint_state['endpoints'][urlname] = endpoint
#else:
# original = self.endpoint_state['endpoints'][urlname]
# self.site.get_logger().debug('Double registration at api request level on %s by %s, original: %s' % (urlname, endpoint, original))
def get_link_prototypes(self, endpoint):
"""
Returns the link prototypes to be used by the endpint
:param endpoint: endpoint object
:rtype: list of link prototypes
"""
urlname = endpoint.get_url_name()
if urlname not in self.endpoint_state['link_prototypes']:
link_prototypes = endpoint.create_link_prototypes()
self.endpoint_state['link_prototypes'][urlname] = link_prototypes
return self.endpoint_state['link_prototypes'][urlname]
def get_site(self):
"""
Returns the bound site
:rtype: SiteResource
"""
if 'site' not in self.endpoint_state:
bound_site = self.site.fork(api_request=self)
self.endpoint_state['site'] = bound_site
return self.endpoint_state['site']
def generate_response(self, link, state):
"""
Returns a response generated from the response media type
:param link: The active link representing the endpoint's response
:param state: The endpoint's state
:rtype: [Http]Response
"""
media_type = self.get_response_media_type()
response_type = self.get_response_type()
return media_type.serialize(content_type=response_type, link=link, state=state)
def generate_options_response(self, links, state):
"""
Returns an OPTIONS response generated from the response media type
:param links: dictionary mapping available HTTP methods to a link
:param state: The endpoint's state
:rtype: [Http]Response
"""
media_type = self.get_response_media_type()
response_type = self.get_response_type()
return media_type.options_serialize(content_type=response_type, links=links, state=state)
def reverse(self, name, *args, **kwargs):
return self.get_site().reverse(name, *args, **kwargs)
class InternalAPIRequest(APIRequest):
"""
An Internal API Request
"""
def __init__(self, site, path='/', url_args=[], url_kwargs={}, **kwargs):
super(InternalAPIRequest, self).__init__(site, path, url_args, url_kwargs)
kwargs.setdefault('method', 'GET')
kwargs.setdefault('params', {})
kwargs.setdefault('payload', {})
kwargs.setdefault('full_path', path)
for key, val in kwargs.iteritems():
setattr(self, key, val)
self.session_state.update(self.get_session_data())
def get_full_path(self):
return self.full_path
def get_django_request(self):
return self.request
def get_session_data(self):
data = {'endpoints': {},
'resources': {},
#'request': request,
'meta': {
'CONTENT_TYPE':'application/vnd.Collection.hyperadmin+JSON',
},
'extra_get_params':{},}
if hasattr(self, 'user'):
data['auth'] = self.user
return data
class HTTPAPIRequest(APIRequest):
"""
Represents an API Request spawned from a Django HTTP Request
"""
get_to_meta_map = {
'_HTTP_ACCEPT':'HTTP_ACCEPT',
'_CONTENT_TYPE':'CONTENT_TYPE',
}
def __init__(self, request, **kwargs):
self.request = request
kwargs.setdefault('path', request.path)
super(HTTPAPIRequest, self).__init__(**kwargs)
self.populate_session_data_from_request(request)
@property
def payload(self):
if not hasattr(self, '_payload'):
media_type = self.get_request_media_type()
self._payload = media_type.deserialize()
return self._payload
@property
def method(self):
return self.request.method
def get_django_request(self):
return self.request
def get_full_path(self):
return self.request.get_full_path()
@property
def user(self):
return self.session_state.get('auth', AnonymousUser())
@property
def params(self):
if not hasattr(self, '_params'):
self._params = self.request.GET.copy()
return self._params
def get_session_data_from_request(self, request):
#TODO consult site object
data = {'endpoints': {},
'resources': {},
'request': request,
'meta': self.patched_meta(request),
'extra_get_params': self.get_passthrough_params(request), }
if hasattr(request, 'user'):
data['auth'] = request.user
return data
def populate_session_data_from_request(self, request):
data = self.get_session_data_from_request(request)
self.session_state.update(data)
#TODO set response type & request type
def patched_meta(self, request):
meta = dict(request.META)
for src, dst in self.get_to_meta_map.iteritems():
if src in request.GET:
val = request.GET[src]
meta[dst] = val
return meta
def get_passthrough_params(self, request):
pass_through_params = dict()
for src, dst in self.get_to_meta_map.iteritems():
if src in request.GET:
pass_through_params[src] = request.GET[src]
return pass_through_params
class NamespaceAPIRequest(InternalAPIRequest):
def __init__(self, api_request, **kwargs):
self.original_api_request = api_request
kwargs.setdefault('full_path', self.original_api_request.get_full_path())
kwargs.setdefault('site', api_request.site)
super(NamespaceAPIRequest, self).__init__(**kwargs)
self.site = api_request.site.fork(api_request=self)
self.session_state = State(substates=[api_request.session_state])
@property
def user(self):
return self.original_api_request.user
def get_django_request(self):
return self.original_api_request.get_django_request()
class Namespace(object):
"""
Represents alternative data associated with the current api request
Namespaced data is provided by another resource through an internal api request
"""
def __init__(self, name, endpoint, state_data={}):
self.name = name
self.api_request = NamespaceAPIRequest(endpoint.api_request)
self.state_data = state_data
self.endpoint = endpoint.fork(api_request=self.api_request)
self.endpoint.state.update(state_data)
self.api_request.endpoint_state['endpoints'][self.endpoint.get_url_name()] = self.endpoint
def get_namespaces(self):
return dict()
def get_prompt(self):
return self.endpoint.get_prompt()
@property
def link(self):
if not hasattr(self, '_link'):
self._link = self.endpoint.get_link()
return self._link
@property
def state(self):
return self.endpoint.state
| 4,512 | 119 | 687 |
c58493b3e91e0fba2da94168d92512744d33d261 | 433 | py | Python | Graphs/Kruskal algorithm/Testando.py | lucasEngdComp/graphs | da71f249c3ea0496f2a6a3695c66adeb4f3db43c | [
"MIT"
] | null | null | null | Graphs/Kruskal algorithm/Testando.py | lucasEngdComp/graphs | da71f249c3ea0496f2a6a3695c66adeb4f3db43c | [
"MIT"
] | null | null | null | Graphs/Kruskal algorithm/Testando.py | lucasEngdComp/graphs | da71f249c3ea0496f2a6a3695c66adeb4f3db43c | [
"MIT"
] | null | null | null | from grafo_adj_nao_dir import *
g8 = Grafo([], {})
for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
g8.adicionaVertice(i)
g8.adicionaAresta("a-b", 5)
g8.adicionaAresta("a-e", 12)
g8.adicionaAresta("b-g", 4)
g8.adicionaAresta("b-f", 9)
g8.adicionaAresta("c-d", 10)
g8.adicionaAresta("c-f", 7)
g8.adicionaAresta("d-f", 8)
g8.adicionaAresta("c-e", 10)
g8.adicionaAresta("e-g", 2)
g8.adicionaAresta("f-g", 6)
print(g8.Kruskal()) | 18.826087 | 45 | 0.628176 | from grafo_adj_nao_dir import *
g8 = Grafo([], {})
for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
g8.adicionaVertice(i)
g8.adicionaAresta("a-b", 5)
g8.adicionaAresta("a-e", 12)
g8.adicionaAresta("b-g", 4)
g8.adicionaAresta("b-f", 9)
g8.adicionaAresta("c-d", 10)
g8.adicionaAresta("c-f", 7)
g8.adicionaAresta("d-f", 8)
g8.adicionaAresta("c-e", 10)
g8.adicionaAresta("e-g", 2)
g8.adicionaAresta("f-g", 6)
print(g8.Kruskal()) | 0 | 0 | 0 |
4a6394532327d3da3227cf7b82337e7a46ae0189 | 513 | py | Python | tcpping/tcpping.py | ipv6freely/tcpping | ac514c6855d70b76a23052c8d1d08cd4413a00bc | [
"MIT"
] | 4 | 2019-02-20T21:19:28.000Z | 2021-07-06T07:54:08.000Z | tcpping/tcpping.py | ipv6freely/tcpping | ac514c6855d70b76a23052c8d1d08cd4413a00bc | [
"MIT"
] | null | null | null | tcpping/tcpping.py | ipv6freely/tcpping | ac514c6855d70b76a23052c8d1d08cd4413a00bc | [
"MIT"
] | null | null | null | #!/usr/bin/python
import socket
def tcpping(host, port, timeout = 5):
""" Does a TCP 'ping'
Simply attempts a socket connection on the specified port
22 = SSH
23 = Telnet
Timeout is 5 seconds
Code "borrowed" from yantisj
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect((host, int(port)))
s.shutdown(socket.SHUT_RD)
return True
except:
pass
return False | 24.428571 | 65 | 0.584795 | #!/usr/bin/python
import socket
def tcpping(host, port, timeout = 5):
""" Does a TCP 'ping'
Simply attempts a socket connection on the specified port
22 = SSH
23 = Telnet
Timeout is 5 seconds
Code "borrowed" from yantisj
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect((host, int(port)))
s.shutdown(socket.SHUT_RD)
return True
except:
pass
return False | 0 | 0 | 0 |
98017ec3e8b7d27797898fc5e845bdf43e2a1e99 | 2,738 | py | Python | contrib/regridpack/Test/test_regridpack_dum.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 62 | 2018-03-30T15:46:56.000Z | 2021-12-08T23:30:24.000Z | contrib/regridpack/Test/test_regridpack_dum.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 114 | 2018-03-21T01:12:43.000Z | 2021-07-05T12:29:54.000Z | contrib/regridpack/Test/test_regridpack_dum.py | CDAT/uvcdat | 5133560c0c049b5c93ee321ba0af494253b44f91 | [
"BSD-3-Clause"
] | 14 | 2018-06-06T02:42:47.000Z | 2021-11-26T03:27:00.000Z | # Adapted for numpy/ma/cdms2 by convertcdms.py
import adamsregrid
import numpy
import EzTemplate
import vcs.test.support
bg = vcs.test.support.bg
ts=[]
M = EzTemplate.Multi(1,5)
for i in range(5):
ts.append(M.get())
## Prepare axes
lon1 = numpy.arange(0,360,.25,'f')/180.*numpy.pi #.25 deg
lat1 = numpy.arange(0,180,2,'f')/180.*numpy.pi #2 deg
lev1 = numpy.arange(0,17,1,'f')+1. # 17 levs
tim1 = numpy.arange(0,24,3,'f')+1. # 3hourly data
lon2 = numpy.arange(0,360,5,'f')/180.*numpy.pi #5 deg
lat2 = numpy.arange(0,180,4,'f')/180.*numpy.pi #4 deg
lev2 = numpy.arange(0,17,4,'f')+1. # less levs
tim2 = numpy.arange(0,24,6,'f')+1. # 6hourly data
p1 = numpy.cos(lon1)
p2 = p1[numpy.newaxis,:]*numpy.sin(lat1)[:,numpy.newaxis]
p3 = p2[numpy.newaxis,:,:]*lev1[:,numpy.newaxis,numpy.newaxis]
p4 = p3[numpy.newaxis,:,:,:]*tim1[:,numpy.newaxis,numpy.newaxis,numpy.newaxis]
print 'Testing for 1D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p1,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,0)
po1 = R.rgrd(p1)
M.x.plot(po1,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 2D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p2,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,1,
lat1,lat2,interp,0)
po2 = R.rgrd(p2)
M.x.plot(po2,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 3D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p3,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,2,
lat1,lat2,interp,1,
lev1,lev2,interp,0,
)
po3 = R.rgrd(p3)
M.x.plot(po3,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 4D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p4,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,3,
lat1,lat2,interp,2,
lev1,lev2,interp,1,
tim1,tim2,interp,0,
)
po4 = R.rgrd(p4)
M.x.plot(po4,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 1D array/grid passing 2D'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p2,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,1)
po2 = R.rgrd(p2)
M.x.plot(po2,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
| 27.38 | 78 | 0.607743 | # Adapted for numpy/ma/cdms2 by convertcdms.py
import adamsregrid
import numpy
import EzTemplate
import vcs.test.support
bg = vcs.test.support.bg
ts=[]
M = EzTemplate.Multi(1,5)
for i in range(5):
ts.append(M.get())
## Prepare axes
lon1 = numpy.arange(0,360,.25,'f')/180.*numpy.pi #.25 deg
lat1 = numpy.arange(0,180,2,'f')/180.*numpy.pi #2 deg
lev1 = numpy.arange(0,17,1,'f')+1. # 17 levs
tim1 = numpy.arange(0,24,3,'f')+1. # 3hourly data
lon2 = numpy.arange(0,360,5,'f')/180.*numpy.pi #5 deg
lat2 = numpy.arange(0,180,4,'f')/180.*numpy.pi #4 deg
lev2 = numpy.arange(0,17,4,'f')+1. # less levs
tim2 = numpy.arange(0,24,6,'f')+1. # 6hourly data
p1 = numpy.cos(lon1)
p2 = p1[numpy.newaxis,:]*numpy.sin(lat1)[:,numpy.newaxis]
p3 = p2[numpy.newaxis,:,:]*lev1[:,numpy.newaxis,numpy.newaxis]
p4 = p3[numpy.newaxis,:,:,:]*tim1[:,numpy.newaxis,numpy.newaxis,numpy.newaxis]
print 'Testing for 1D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p1,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,0)
po1 = R.rgrd(p1)
M.x.plot(po1,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 2D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p2,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,1,
lat1,lat2,interp,0)
po2 = R.rgrd(p2)
M.x.plot(po2,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 3D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p3,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,2,
lat1,lat2,interp,1,
lev1,lev2,interp,0,
)
po3 = R.rgrd(p3)
M.x.plot(po3,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 4D array/grid'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p4,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,3,
lat1,lat2,interp,2,
lev1,lev2,interp,1,
tim1,tim2,interp,0,
)
po4 = R.rgrd(p4)
M.x.plot(po4,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
print 'Testing for 1D array/grid passing 2D'
interps = ['linear','linearLog','cubic','cubicLog']
M.x.clear()
M.x.plot(p2,ts[0],bg=bg)
for i in range(4):
interp = interps[i]
R = adamsregrid.Regrid(lon1,lon2,interp,1)
po2 = R.rgrd(p2)
M.x.plot(po2,ts[i+1],bg=bg)
vcs.test.support.check_plot(M.x)
| 0 | 0 | 0 |
7eadabc7c4de06e58609de119bf7bf09ba166e40 | 554 | py | Python | sdk/python/pulumi_azure_native/batch/v20170501/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/v20170501/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/v20170501/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_package import *
from .batch_account import *
from .get_application import *
from .get_application_package import *
from .get_batch_account import *
from .list_batch_account_keys import *
from ._inputs import *
from . import outputs
| 30.777778 | 80 | 0.761733 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_package import *
from .batch_account import *
from .get_application import *
from .get_application_package import *
from .get_batch_account import *
from .list_batch_account_keys import *
from ._inputs import *
from . import outputs
| 0 | 0 | 0 |
b4e6ea818472c7f66018dee97e5d19c3d79c1231 | 3,225 | py | Python | download_scp_stream/download_ftp.py | ahmadsalimi/UtilityScripts | ed3e8f9601b706bf2b568ac26a6c23948c995223 | [
"MIT"
] | null | null | null | download_scp_stream/download_ftp.py | ahmadsalimi/UtilityScripts | ed3e8f9601b706bf2b568ac26a6c23948c995223 | [
"MIT"
] | null | null | null | download_scp_stream/download_ftp.py | ahmadsalimi/UtilityScripts | ed3e8f9601b706bf2b568ac26a6c23948c995223 | [
"MIT"
] | null | null | null | import ftplib
from ftplib import FTP
from sys import argv
import warnings
import functions as F
from tqdm import tqdm
import os
warnings.filterwarnings('ignore')
not_to_download = [
"LIDC-IDRI-0001",
"LIDC-IDRI-0004",
"LIDC-IDRI-0007",
"LIDC-IDRI-0010",
"LIDC-IDRI-0013",
"LIDC-IDRI-0016",
"LIDC-IDRI-0002",
"LIDC-IDRI-0005",
"LIDC-IDRI-0008",
"LIDC-IDRI-0011",
"LIDC-IDRI-0014",
"LIDC-IDRI-0017",
"LIDC-IDRI-0003",
"LIDC-IDRI-0006",
"LIDC-IDRI-0009",
"LIDC-IDRI-0012",
"LIDC-IDRI-0015"
]
def _is_ftp_dir(ftp, name, guess_by_extension=True):
""" simply determines if an item listed on the ftp server is a valid directory or not """
# if the name has a "." in the fourth to last position, its probably a file extension
# this is MUCH faster than trying to set every file to a working directory, and will work 99% of time.
if guess_by_extension is True:
if len(name) >= 4:
if name[-4] == '.':
return False
original_cwd = ftp.pwd() # remember the current working directory
try:
ftp.cwd(name) # try to set directory to new name
ftp.cwd(original_cwd) # set it back to what it was
return True
except ftplib.error_perm as e:
print(e)
return False
except Exception as e:
print(e)
return False
if __name__ == "__main__":
if len(argv) is not 7:
print("Usage: python download_ftp.py ftp_server ftp_path ssh_server ssh_username ssh_password ssh_path")
else:
ftp_server = argv[1]
ftp_path = argv[2]
ssh_server = argv[3]
ssh_username = argv[4]
ssh_password = argv[5]
ssh_path = argv[6]
ftp = FTP(ftp_server)
ftp.login("", "")
sftp = F.sftp_connect(ssh_server, ssh_username, ssh_password)
download_ftp_tree(ftp, ftp_path, sftp, ssh_path)
| 28.539823 | 112 | 0.637519 | import ftplib
from ftplib import FTP
from sys import argv
import warnings
import functions as F
from tqdm import tqdm
import os
warnings.filterwarnings('ignore')
not_to_download = [
"LIDC-IDRI-0001",
"LIDC-IDRI-0004",
"LIDC-IDRI-0007",
"LIDC-IDRI-0010",
"LIDC-IDRI-0013",
"LIDC-IDRI-0016",
"LIDC-IDRI-0002",
"LIDC-IDRI-0005",
"LIDC-IDRI-0008",
"LIDC-IDRI-0011",
"LIDC-IDRI-0014",
"LIDC-IDRI-0017",
"LIDC-IDRI-0003",
"LIDC-IDRI-0006",
"LIDC-IDRI-0009",
"LIDC-IDRI-0012",
"LIDC-IDRI-0015"
]
def _is_ftp_dir(ftp, name, guess_by_extension=True):
""" simply determines if an item listed on the ftp server is a valid directory or not """
# if the name has a "." in the fourth to last position, its probably a file extension
# this is MUCH faster than trying to set every file to a working directory, and will work 99% of time.
if guess_by_extension is True:
if len(name) >= 4:
if name[-4] == '.':
return False
original_cwd = ftp.pwd() # remember the current working directory
try:
ftp.cwd(name) # try to set directory to new name
ftp.cwd(original_cwd) # set it back to what it was
return True
except ftplib.error_perm as e:
print(e)
return False
except Exception as e:
print(e)
return False
def generate_sftp_write(file, bar):
def sftp_write(data):
file.write(data)
bar.update(len(data))
return sftp_write
def _make_parent_dir(sftp, sftp_path):
dirname = os.path.dirname(sftp_path)
if not sftp.exists(dirname):
_make_parent_dir(sftp, dirname)
sftp.mkdir(dirname)
def _download_ftp_file(ftp, ftp_path, sftp, sftp_path):
print("downloading " + ftp_path + " to " + sftp_path)
_make_parent_dir(sftp, sftp_path)
with sftp.open(sftp_path, 'w') as file:
with tqdm(unit='B', unit_scale=True, unit_divisor=1024) as bar:
ftp.retrbinary("RETR {0}".format(ftp_path), generate_sftp_write(file, bar))
def _mirror_ftp_dir(ftp, ftp_path, sftp, sftp_path):
if ftp_path.split('/')[-1] in not_to_download:
print('skipping ' + ftp_path)
return
for item in ftp.nlst(ftp_path):
destination = sftp_path + '/' + item.split('/')[-1]
if _is_ftp_dir(ftp, item, False):
_mirror_ftp_dir(ftp, item, sftp, destination)
else:
_download_ftp_file(ftp, item, sftp, destination)
def download_ftp_tree(ftp, ftp_path, sftp, sftp_path):
ftp_path = ftp_path.lstrip('/')
sftp_path = sftp_path.rstrip('/')
_mirror_ftp_dir(ftp, ftp_path, sftp, sftp_path)
if __name__ == "__main__":
if len(argv) is not 7:
print("Usage: python download_ftp.py ftp_server ftp_path ssh_server ssh_username ssh_password ssh_path")
else:
ftp_server = argv[1]
ftp_path = argv[2]
ssh_server = argv[3]
ssh_username = argv[4]
ssh_password = argv[5]
ssh_path = argv[6]
ftp = FTP(ftp_server)
ftp.login("", "")
sftp = F.sftp_connect(ssh_server, ssh_username, ssh_password)
download_ftp_tree(ftp, ftp_path, sftp, ssh_path)
| 1,183 | 0 | 115 |
50b767afc66ad1d4c465ef8bd4b9e5ce3c763ee8 | 686 | py | Python | exercise/views.py | Kartero/acme | d9c6b15c0fbacc47a1283544dfd9192a10cb2271 | [
"MIT"
] | null | null | null | exercise/views.py | Kartero/acme | d9c6b15c0fbacc47a1283544dfd9192a10cb2271 | [
"MIT"
] | null | null | null | exercise/views.py | Kartero/acme | d9c6b15c0fbacc47a1283544dfd9192a10cb2271 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import Action
from .models import Sport
from .forms import ActionForm
| 24.5 | 58 | 0.661808 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import Action
from .models import Sport
from .forms import ActionForm
def index(request):
actions = Action.objects.order_by('-date')[:30]
sports = Sport.objects.only('id', 'name')
action_form = ActionForm()
context = {
'actions': actions,
'sports': sports,
'action_form': action_form
}
return render(request, 'exercise/index.html', context)
def add(request):
if request.method == 'POST':
form = ActionForm(request.POST)
if form.is_valid():
action = form.save()
return HttpResponseRedirect('/exercise')
| 475 | 0 | 46 |
d78692e94b0a47ee671f0c39abaed36175cfc731 | 1,951 | py | Python | home/utils.py | ganlvtech/blueking-django-startup-project | 042aa36b0757c0d3929d88bc23534f54963d333e | [
"MIT"
] | 1 | 2018-11-22T21:13:25.000Z | 2018-11-22T21:13:25.000Z | home/utils.py | ganlvtech/blueking-django-startup-project | 042aa36b0757c0d3929d88bc23534f54963d333e | [
"MIT"
] | null | null | null | home/utils.py | ganlvtech/blueking-django-startup-project | 042aa36b0757c0d3929d88bc23534f54963d333e | [
"MIT"
] | null | null | null | import os
from django.conf import settings
from django.shortcuts import render
def render_special_markdown_template(request, template_name, relative_path):
"""
:param request:
:param template_name:
:param relative_path: markdown relative path from BASE_DIR like 'docs/demos.md'
:return:
"""
path = os.path.join(settings.BASE_DIR, relative_path)
content = markdown_from_file(path)
return render(request, template_name, {
'content': content
})
def render_markdown_template(request, title, heading, relative_path, leads=None):
"""
:param request:
:param title:
:param heading:
:param leads:
:param relative_path: markdown relative path from BASE_DIR like 'docs/demos.md'
:return:
"""
if leads is None:
leads = ()
path = os.path.join(settings.BASE_DIR, relative_path)
content = markdown_from_file(path)
return render(request, 'home/markdown.html', {
'title': title,
'heading': heading,
'leads': leads,
'content': content,
})
def render_plain_text_file(request, title, heading, relative_path):
"""
:param request:
:param title:
:param heading:
:param relative_path: file relative path from BASE_DIR like 'LICENSE'
:return:
"""
path = os.path.join(settings.BASE_DIR, relative_path)
content = read_file(path)
return render_plain_text_content(request, title, heading, content)
| 26.364865 | 83 | 0.666838 | import os
from django.conf import settings
from django.shortcuts import render
def read_file(path):
from io import open
with open(path, 'r', encoding='utf-8') as f:
data = f.read()
return data
def markdown_from_file(path):
import markdown
html = markdown.markdown(read_file(path), extensions=['fenced_code', 'tables'])
return html
def render_special_markdown_template(request, template_name, relative_path):
"""
:param request:
:param template_name:
:param relative_path: markdown relative path from BASE_DIR like 'docs/demos.md'
:return:
"""
path = os.path.join(settings.BASE_DIR, relative_path)
content = markdown_from_file(path)
return render(request, template_name, {
'content': content
})
def render_markdown_template(request, title, heading, relative_path, leads=None):
"""
:param request:
:param title:
:param heading:
:param leads:
:param relative_path: markdown relative path from BASE_DIR like 'docs/demos.md'
:return:
"""
if leads is None:
leads = ()
path = os.path.join(settings.BASE_DIR, relative_path)
content = markdown_from_file(path)
return render(request, 'home/markdown.html', {
'title': title,
'heading': heading,
'leads': leads,
'content': content,
})
def render_plain_text_content(request, title, heading, content):
return render(request, 'home/plain_text.html', {
'title': title,
'heading': heading,
'content': content,
})
def render_plain_text_file(request, title, heading, relative_path):
"""
:param request:
:param title:
:param heading:
:param relative_path: file relative path from BASE_DIR like 'LICENSE'
:return:
"""
path = os.path.join(settings.BASE_DIR, relative_path)
content = read_file(path)
return render_plain_text_content(request, title, heading, content)
| 423 | 0 | 69 |
8d1bb6a6cdb4c0d1cf1b0317d4c288b977a515dd | 1,401 | py | Python | tests/resources/django_project/run_tornado.py | ritesh-loanstreet/deploy-scripts | a6d392f1f91777a01114a6247e27635af7ba338c | [
"MIT"
] | 7 | 2021-03-09T10:22:09.000Z | 2022-03-13T00:38:47.000Z | tests/resources/django_project/run_tornado.py | ritesh-loanstreet/deploy-scripts | a6d392f1f91777a01114a6247e27635af7ba338c | [
"MIT"
] | 1 | 2021-06-15T08:25:00.000Z | 2021-06-15T08:25:00.000Z | tests/resources/django_project/run_tornado.py | ritesh-loanstreet/deploy-scripts | a6d392f1f91777a01114a6247e27635af7ba338c | [
"MIT"
] | 4 | 2021-04-24T16:18:30.000Z | 2021-12-01T11:34:49.000Z | #!/usr/bin/env python
#
# Runs a Tornado web server with a django project
# Make sure to edit the DJANGO_SETTINGS_MODULE to point to your settings.py
#
# http://localhost:8080/hello-tornado
# http://localhost:8080
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import asyncio
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.wsgi
from tornado.options import options, define, parse_command_line
from tornado.platform.asyncio import AsyncIOMainLoop
from django.core.wsgi import get_wsgi_application
define('port', type=int, default=8001)
if __name__ == '__main__':
main()
| 25.472727 | 82 | 0.745182 | #!/usr/bin/env python
#
# Runs a Tornado web server with a django project
# Make sure to edit the DJANGO_SETTINGS_MODULE to point to your settings.py
#
# http://localhost:8080/hello-tornado
# http://localhost:8080
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import asyncio
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.wsgi
from tornado.options import options, define, parse_command_line
from tornado.platform.asyncio import AsyncIOMainLoop
from django.core.wsgi import get_wsgi_application
define('port', type=int, default=8001)
def main():
sys.path.append('django_project') # path to your project if needed
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_project.settings'
parse_command_line()
wsgi_app = get_wsgi_application()
container = tornado.wsgi.WSGIContainer(wsgi_app)
tornado_app = tornado.web.Application([
('.*', tornado.web.FallbackHandler, dict(fallback=container)),
])
tornado_app = tornado.web.Application([
('.*', tornado.web.FallbackHandler, dict(fallback=container)),
])
server = tornado.httpserver.HTTPServer(tornado_app)
server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
AsyncIOMainLoop().install()
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main()
| 705 | 0 | 23 |
c713efaf80927ef782e954232582b9695a3f7b3d | 3,157 | py | Python | mac/google-cloud-sdk/lib/googlecloudsdk/command_lib/app/firewall_rules_util.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | mac/google-cloud-sdk/lib/googlecloudsdk/command_lib/app/firewall_rules_util.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 4 | 2020-07-21T12:51:46.000Z | 2022-01-22T10:29:25.000Z | mac/google-cloud-sdk/lib/googlecloudsdk/command_lib/app/firewall_rules_util.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 1 | 2020-07-25T18:17:57.000Z | 2020-07-25T18:17:57.000Z | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for `gcloud app firewall-rules`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import resources
import six
# The default rule is placed at MaxInt32 - 1 and is always evaluated last
DEFAULT_RULE_PRIORITY = 2**31 - 1
LIST_FORMAT = """
table(
priority:sort=1,
action,
source_range,
description
)
"""
registry = resources.REGISTRY
def ParseFirewallRule(client, priority):
"""Creates a resource path given a firewall rule priority.
Args:
client: AppengineFirewallApiClient, the API client for this release track.
priority: str, the priority of the rule.
Returns:
The resource for the rule.
"""
res = GetRegistry(client.ApiVersion()).Parse(
six.text_type(ParsePriority(priority)),
params={'appsId': client.project},
collection='appengine.apps.firewall.ingressRules')
return res
def ParsePriority(priority):
"""Converts a priority to an integer."""
if priority == 'default':
priority = DEFAULT_RULE_PRIORITY
try:
priority_int = int(priority)
if priority_int <= 0 or priority_int > DEFAULT_RULE_PRIORITY:
raise exceptions.InvalidArgumentException(
'priority', 'Priority must be between 1 and {0} inclusive.'.format(
DEFAULT_RULE_PRIORITY))
return priority_int
except ValueError:
raise exceptions.InvalidArgumentException(
'priority', 'Priority should be an integer value or `default`.')
def ParseAction(messages, action):
"""Converts an action string to the corresponding enum value.
Options are: 'allow' or 'deny', otherwise None will be returned.
Args:
messages: apitools.base.protorpclite.messages, the proto messages class for
this API version for firewall.
action: str, the action as a string
Returns:
ActionValueValuesEnum type
"""
if not action:
return None
return messages.FirewallRule.ActionValueValuesEnum(action.upper())
| 29.783019 | 79 | 0.733925 | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for `gcloud app firewall-rules`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import resources
import six
# The default rule is placed at MaxInt32 - 1 and is always evaluated last
DEFAULT_RULE_PRIORITY = 2**31 - 1
LIST_FORMAT = """
table(
priority:sort=1,
action,
source_range,
description
)
"""
registry = resources.REGISTRY
def GetRegistry(version):
global registry
try:
resources.REGISTRY.GetCollectionInfo('appengine', version)
except resources.InvalidCollectionException:
registry = resources.REGISTRY.Clone()
registry.RegisterApiByName('appengine', version)
return registry
def ParseFirewallRule(client, priority):
"""Creates a resource path given a firewall rule priority.
Args:
client: AppengineFirewallApiClient, the API client for this release track.
priority: str, the priority of the rule.
Returns:
The resource for the rule.
"""
res = GetRegistry(client.ApiVersion()).Parse(
six.text_type(ParsePriority(priority)),
params={'appsId': client.project},
collection='appengine.apps.firewall.ingressRules')
return res
def ParsePriority(priority):
"""Converts a priority to an integer."""
if priority == 'default':
priority = DEFAULT_RULE_PRIORITY
try:
priority_int = int(priority)
if priority_int <= 0 or priority_int > DEFAULT_RULE_PRIORITY:
raise exceptions.InvalidArgumentException(
'priority', 'Priority must be between 1 and {0} inclusive.'.format(
DEFAULT_RULE_PRIORITY))
return priority_int
except ValueError:
raise exceptions.InvalidArgumentException(
'priority', 'Priority should be an integer value or `default`.')
def ParseAction(messages, action):
"""Converts an action string to the corresponding enum value.
Options are: 'allow' or 'deny', otherwise None will be returned.
Args:
messages: apitools.base.protorpclite.messages, the proto messages class for
this API version for firewall.
action: str, the action as a string
Returns:
ActionValueValuesEnum type
"""
if not action:
return None
return messages.FirewallRule.ActionValueValuesEnum(action.upper())
def RaiseMinArgument():
raise exceptions.MinimumArgumentException([
'--action', '--source-range', '--description'
], 'Please specify at least one attribute to the firewall-rules update.')
| 428 | 0 | 46 |
2a51e61f2f46aecfcdd03a417f71785673571e38 | 1,760 | py | Python | templaterex/test/test_base.py | troxel/TemplateRex-Python | 69982c34bcdff3787ce0681e22a3c47a1f40c79d | [
"MIT"
] | null | null | null | templaterex/test/test_base.py | troxel/TemplateRex-Python | 69982c34bcdff3787ce0681e22a3c47a1f40c79d | [
"MIT"
] | 1 | 2018-12-17T01:47:00.000Z | 2018-12-17T01:47:00.000Z | templaterex/test/test_base.py | troxel/TemplateRex-Python | 69982c34bcdff3787ce0681e22a3c47a1f40c79d | [
"MIT"
] | null | null | null | import unittest
import pprint
import sys
import os
import json
import datetime
# The test object
sys.path.append('../')
from template import TemplateRex
fspec_template = 't-detail_base.html'
fspec_tsections = "./test_data/tsections_base.py"
fspec_render = "./test_data/trender_base.html"
fspec_data_flwr = "./test_data/data_flwr.json"
# set as true to make new set to test data
#tdata_make = False
#if tdata_make: print("\nWarning test data be generated!\n\n")
global display_flg, tdata_make_flg
display_flg = 0
tdata_make_flg = 0
# ----------------------
# ----------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
arg1 = sys.argv.pop()
if arg1 == '-d':
display_flg = 1
if arg1 == '-m':
tdata_make_flg = 1
unittest.main()
| 23.466667 | 67 | 0.615909 | import unittest
import pprint
import sys
import os
import json
import datetime
# The test object
sys.path.append('../')
from template import TemplateRex
fspec_template = 't-detail_base.html'
fspec_tsections = "./test_data/tsections_base.py"
fspec_render = "./test_data/trender_base.html"
fspec_data_flwr = "./test_data/data_flwr.json"
# set as true to make new set to test data
#tdata_make = False
#if tdata_make: print("\nWarning test data be generated!\n\n")
global display_flg, tdata_make_flg
display_flg = 0
tdata_make_flg = 0
class TestCase(unittest.TestCase):
# ----------------------
def test_template_base_render(self):
trex = TemplateRex(fname=fspec_template)
fid = open(fspec_data_flwr,'r')
row_data = json.load(fid)
fid.close()
inc = 1
for row in row_data[0]:
row['inc'] = inc
trm = trex.render_sec('row', row )
inc += 1
rtn = trex.render_sec('tbl')
rtn = trex.render_sec('ftr')
rtn = trex.render_sec('content')
date_now = datetime.datetime(2017,7,17)
rtn = trex.render_sec('incl_note',{'date_now':date_now})
rtn_str = trex.render()
if display_flg:
print("-----------\n");print(rtn_str);print("-----------\n");
if tdata_make_flg:
fid = open( fspec_render ,'w')
fid.write(rtn_str)
fid.close()
print("Creating!!!! {0} test data".format(fspec_render))
fid = open( fspec_render,'r')
trender_str = fid.read()
fid.close()
self.assertTrue(rtn_str == trender_str)
# ----------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
arg1 = sys.argv.pop()
if arg1 == '-d':
display_flg = 1
if arg1 == '-m':
tdata_make_flg = 1
unittest.main()
| 892 | 13 | 47 |
384f33733073595e6a81f5fe0ec864fcd4f8abfe | 1,994 | py | Python | optuna/samplers/nsgaii/_crossovers/_base.py | captain-pool/optuna | 2ae8c17afea54362460320870304c763e91c0596 | [
"MIT"
] | 1,300 | 2018-12-03T06:11:11.000Z | 2019-11-15T01:28:25.000Z | optuna/samplers/nsgaii/_crossovers/_base.py | captain-pool/optuna | 2ae8c17afea54362460320870304c763e91c0596 | [
"MIT"
] | 274 | 2018-12-04T09:54:07.000Z | 2019-11-15T02:23:18.000Z | optuna/samplers/nsgaii/_crossovers/_base.py | captain-pool/optuna | 2ae8c17afea54362460320870304c763e91c0596 | [
"MIT"
] | 148 | 2018-12-03T10:48:50.000Z | 2019-11-11T16:37:51.000Z | import abc
import numpy as np
from optuna.study import Study
class BaseCrossover(object, metaclass=abc.ABCMeta):
"""Base class for crossovers.
A crossover operation is used by :class:`~optuna.samplers.NSGAIISampler`
to create new parameter combination from parameters of ``n`` parent individuals.
.. note::
Concrete implementations of this class are expected to only accept parameters
from numerical distributions. At the moment, only crossover operation for categorical
parameters (uniform crossover) is built-in into :class:`~optuna.samplers.NSGAIISampler`.
"""
@property
@abc.abstractmethod
def n_parents(self) -> int:
"""Number of parent individuals required to perform crossover."""
raise NotImplementedError
@abc.abstractmethod
def crossover(
self,
parents_params: np.ndarray,
rng: np.random.RandomState,
study: Study,
search_space_bounds: np.ndarray,
) -> np.ndarray:
"""Perform crossover of selected parent individuals.
This method is called in :func:`~optuna.samplers.NSGAIISampler.sample_relative`.
Args:
parents_params:
A ``numpy.ndarray`` with dimensions ``num_parents x num_parameters``.
Represents a parameter space for each parent individual. This space is
continuous for numerical parameters.
rng:
An instance of ``numpy.random.RandomState``.
study:
Target study object.
search_space_bounds:
A ``numpy.ndarray`` with dimensions ``len_search_space x 2`` representing
numerical distribution bounds constructed from transformed search space.
Returns:
A 1-dimensional ``numpy.ndarray`` containing new parameter combination.
"""
raise NotImplementedError
| 32.688525 | 96 | 0.652457 | import abc
import numpy as np
from optuna.study import Study
class BaseCrossover(object, metaclass=abc.ABCMeta):
"""Base class for crossovers.
A crossover operation is used by :class:`~optuna.samplers.NSGAIISampler`
to create new parameter combination from parameters of ``n`` parent individuals.
.. note::
Concrete implementations of this class are expected to only accept parameters
from numerical distributions. At the moment, only crossover operation for categorical
parameters (uniform crossover) is built-in into :class:`~optuna.samplers.NSGAIISampler`.
"""
def __str__(self) -> str:
return self.__class__.__name__
@property
@abc.abstractmethod
def n_parents(self) -> int:
"""Number of parent individuals required to perform crossover."""
raise NotImplementedError
@abc.abstractmethod
def crossover(
self,
parents_params: np.ndarray,
rng: np.random.RandomState,
study: Study,
search_space_bounds: np.ndarray,
) -> np.ndarray:
"""Perform crossover of selected parent individuals.
This method is called in :func:`~optuna.samplers.NSGAIISampler.sample_relative`.
Args:
parents_params:
A ``numpy.ndarray`` with dimensions ``num_parents x num_parameters``.
Represents a parameter space for each parent individual. This space is
continuous for numerical parameters.
rng:
An instance of ``numpy.random.RandomState``.
study:
Target study object.
search_space_bounds:
A ``numpy.ndarray`` with dimensions ``len_search_space x 2`` representing
numerical distribution bounds constructed from transformed search space.
Returns:
A 1-dimensional ``numpy.ndarray`` containing new parameter combination.
"""
raise NotImplementedError
| 44 | 0 | 27 |
6368df4eac03e9269e18eff3db750c6fd33be03b | 4,292 | py | Python | analysis/scripts/preprocess_correlates.py | nivlab/sciops | e53bad3b177a653d43b800d86f925b0c3722a0b3 | [
"MIT"
] | null | null | null | analysis/scripts/preprocess_correlates.py | nivlab/sciops | e53bad3b177a653d43b800d86f925b0c3722a0b3 | [
"MIT"
] | null | null | null | analysis/scripts/preprocess_correlates.py | nivlab/sciops | e53bad3b177a653d43b800d86f925b0c3722a0b3 | [
"MIT"
] | null | null | null | import os
import numpy as np
from pandas import DataFrame, read_csv
from os.path import dirname, join
ROOT_DIR = dirname(dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(ROOT_DIR, 'data')
STAN_DIR = os.path.join(ROOT_DIR, 'stan_results')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load metadata and restrict participants.
metadata = read_csv(os.path.join('data','metadata.csv'))
metadata = metadata.loc[metadata['prev_complete']=="No",['platform','subject']].copy()
## Load task data and restrict participants.
data = read_csv(os.path.join('data','data.csv'))
data = data.loc[data.subject.isin(metadata.subject)]
## Initialize correlates DataFrame.
corr = metadata[['platform','subject']].copy()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.1 Accuracy.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.1 Computing accuracy.')
## Compute accuracy.
gb = data.groupby(['platform','subject']).accuracy.mean().reset_index()
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.2 Total Points.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.2 Computing points total.')
## Compute points.
gb = data.groupby(['platform','subject']).outcome.sum().reset_index()
gb = gb.rename(columns={'outcome':'points'})
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.3 Win-Stay Rates.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.3 Computing win-stay rates.')
## Determine previous win trials.
f = lambda x: np.roll(x, 1)
data['prev_win'] = data.groupby(['platform','subject']).outcome.transform(f)
data.loc[data.trial==1, 'prev_win'] = np.nan
## Determine stay trials.
f = lambda x: (x == np.roll(x,1)).astype(int)
data['stay'] = data.groupby(['platform','subject']).choice.transform(f)
data.loc[data.trial==1, 'stay'] = np.nan
## Compute win-stay rate.
gb = data.query('prev_win==1').groupby(['platform','subject']).stay.mean().reset_index()
gb = gb.rename(columns={'stay':'ws'})
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.4 Lose-Shift Rates.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.4 Computing lose-shift rates.')
## Compute lose-shift rate.
gb = data.query('prev_win==0').groupby(['platform','subject']).stay.mean().reset_index()
gb = gb.rename(columns={'stay':'ls'})
gb['ls'] = 1 - gb['ls']
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.5 Perseveration Errors.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.5 Computing perseveration errors.')
## Define trial number within each block.
data['exposure'] = data.groupby(['subject','block']).trial.transform(lambda x: np.arange(x.size)+1)
## Define perseveration errors.
data['perseveration'] = data.groupby('subject').correct.transform(lambda x: np.roll(x, 15))
data['perseveration'] = (data['perseveration'] == data['choice']).astype(int)
data.loc[data.block==1,'perseveration'] = np.nan
## Compute perseveration errors within participants.
gb = data.groupby(['platform','subject']).perseveration.mean().reset_index()
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.6 Model-based correlates.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.6 Extracting Stan parameters.')
## Load StanFit.
df = read_csv('stan_results/rstd.tsv.gz', sep='\t', compression='gzip')
## Extract parameters of interest.
beta = df.filter(regex='beta').median().values
eta_p = df.filter(regex='^eta_p').median().values
eta_n = df.filter(regex='^eta_n').median().values
kappa = (eta_p - eta_n) / (eta_p + eta_n)
## Convert to DataFrame.
params = DataFrame(np.column_stack([beta,eta_p,eta_n,kappa]),
columns=['beta','eta_p','eta_n','kappa'])
## Append metadata.
params['platform'] = corr.sort_values('subject').platform.values
params['subject'] = corr.sort_values('subject').subject.values
## Merge with correlates.
corr = corr.merge(params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Save data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('Saving data.')
corr.to_csv(os.path.join(DATA_DIR, 'correlates.csv'), index=False) | 32.763359 | 99 | 0.581314 | import os
import numpy as np
from pandas import DataFrame, read_csv
from os.path import dirname, join
ROOT_DIR = dirname(dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(ROOT_DIR, 'data')
STAN_DIR = os.path.join(ROOT_DIR, 'stan_results')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load metadata and restrict participants.
metadata = read_csv(os.path.join('data','metadata.csv'))
metadata = metadata.loc[metadata['prev_complete']=="No",['platform','subject']].copy()
## Load task data and restrict participants.
data = read_csv(os.path.join('data','data.csv'))
data = data.loc[data.subject.isin(metadata.subject)]
## Initialize correlates DataFrame.
corr = metadata[['platform','subject']].copy()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.1 Accuracy.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.1 Computing accuracy.')
## Compute accuracy.
gb = data.groupby(['platform','subject']).accuracy.mean().reset_index()
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.2 Total Points.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.2 Computing points total.')
## Compute points.
gb = data.groupby(['platform','subject']).outcome.sum().reset_index()
gb = gb.rename(columns={'outcome':'points'})
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.3 Win-Stay Rates.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.3 Computing win-stay rates.')
## Determine previous win trials.
f = lambda x: np.roll(x, 1)
data['prev_win'] = data.groupby(['platform','subject']).outcome.transform(f)
data.loc[data.trial==1, 'prev_win'] = np.nan
## Determine stay trials.
f = lambda x: (x == np.roll(x,1)).astype(int)
data['stay'] = data.groupby(['platform','subject']).choice.transform(f)
data.loc[data.trial==1, 'stay'] = np.nan
## Compute win-stay rate.
gb = data.query('prev_win==1').groupby(['platform','subject']).stay.mean().reset_index()
gb = gb.rename(columns={'stay':'ws'})
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.4 Lose-Shift Rates.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.4 Computing lose-shift rates.')
## Compute lose-shift rate.
gb = data.query('prev_win==0').groupby(['platform','subject']).stay.mean().reset_index()
gb = gb.rename(columns={'stay':'ls'})
gb['ls'] = 1 - gb['ls']
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.5 Perseveration Errors.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.5 Computing perseveration errors.')
## Define trial number within each block.
data['exposure'] = data.groupby(['subject','block']).trial.transform(lambda x: np.arange(x.size)+1)
## Define perseveration errors.
data['perseveration'] = data.groupby('subject').correct.transform(lambda x: np.roll(x, 15))
data['perseveration'] = (data['perseveration'] == data['choice']).astype(int)
data.loc[data.block==1,'perseveration'] = np.nan
## Compute perseveration errors within participants.
gb = data.groupby(['platform','subject']).perseveration.mean().reset_index()
## Merge with correlates.
corr = corr.merge(gb)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### 1.6 Model-based correlates.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('1.6 Extracting Stan parameters.')
## Load StanFit.
df = read_csv('stan_results/rstd.tsv.gz', sep='\t', compression='gzip')
## Extract parameters of interest.
beta = df.filter(regex='beta').median().values
eta_p = df.filter(regex='^eta_p').median().values
eta_n = df.filter(regex='^eta_n').median().values
kappa = (eta_p - eta_n) / (eta_p + eta_n)
## Convert to DataFrame.
params = DataFrame(np.column_stack([beta,eta_p,eta_n,kappa]),
columns=['beta','eta_p','eta_n','kappa'])
## Append metadata.
params['platform'] = corr.sort_values('subject').platform.values
params['subject'] = corr.sort_values('subject').subject.values
## Merge with correlates.
corr = corr.merge(params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Save data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print('Saving data.')
corr.to_csv(os.path.join(DATA_DIR, 'correlates.csv'), index=False) | 0 | 0 | 0 |
31e0456a58cf0a2bbbbc82847cdef2f7f4d69748 | 1,725 | py | Python | alipay/aop/api/domain/MEquityDisplayInfo.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/MEquityDisplayInfo.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/MEquityDisplayInfo.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
| 24.295775 | 71 | 0.547826 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class MEquityDisplayInfo(object):
def __init__(self):
self._brand_name = None
self._logo = None
self._name = None
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, value):
self._brand_name = value
@property
def logo(self):
return self._logo
@logo.setter
def logo(self, value):
self._logo = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.brand_name:
if hasattr(self.brand_name, 'to_alipay_dict'):
params['brand_name'] = self.brand_name.to_alipay_dict()
else:
params['brand_name'] = self.brand_name
if self.logo:
if hasattr(self.logo, 'to_alipay_dict'):
params['logo'] = self.logo.to_alipay_dict()
else:
params['logo'] = self.logo
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MEquityDisplayInfo()
if 'brand_name' in d:
o.brand_name = d['brand_name']
if 'logo' in d:
o.logo = d['logo']
if 'name' in d:
o.name = d['name']
return o
| 1,202 | 371 | 23 |
48e0e62b1b6cf256008752c61bde5f74a8346072 | 3,400 | py | Python | strawberry/schema/types/union.py | zefciu/strawberry | a660fccadcd2e3ff4c1be985708a37eb6164c02c | [
"MIT"
] | null | null | null | strawberry/schema/types/union.py | zefciu/strawberry | a660fccadcd2e3ff4c1be985708a37eb6164c02c | [
"MIT"
] | 1 | 2020-12-10T14:57:49.000Z | 2020-12-10T14:57:49.000Z | strawberry/schema/types/union.py | zefciu/strawberry | a660fccadcd2e3ff4c1be985708a37eb6164c02c | [
"MIT"
] | null | null | null | import typing
from graphql import GraphQLUnionType
from strawberry.exceptions import UnallowedReturnTypeForUnion, WrongReturnTypeForUnion
from strawberry.type import TypeDefinition
from strawberry.union import UnionDefinition
from strawberry.utils.typing import (
get_list_annotation,
is_generic,
is_list,
is_type_var,
)
from .types import TypeMap
| 33.333333 | 88 | 0.693824 | import typing
from graphql import GraphQLUnionType
from strawberry.exceptions import UnallowedReturnTypeForUnion, WrongReturnTypeForUnion
from strawberry.type import TypeDefinition
from strawberry.union import UnionDefinition
from strawberry.utils.typing import (
get_list_annotation,
is_generic,
is_list,
is_type_var,
)
from .types import TypeMap
def _get_type_mapping_from_actual_type(root) -> typing.Dict[typing.Any, typing.Type]:
# we map ~T to the actual type of root
type_var_to_actual_type = {}
for field_name, annotation in root.__annotations__.items():
# when we have a list we want to get the type of the elements contained in the
# list, to do so we currently only get the first time (if the list is not empty)
# this might break in more complex cases, but should suffice for now.
if is_list(annotation):
annotation = get_list_annotation(annotation)
if is_type_var(annotation):
values = getattr(root, field_name)
if values:
type_var_to_actual_type[annotation] = type(values[0])
elif is_type_var(annotation):
type_var_to_actual_type[annotation] = type(getattr(root, field_name))
elif is_generic(annotation):
type_var_to_actual_type.update(
_get_type_mapping_from_actual_type(getattr(root, field_name))
)
return type_var_to_actual_type
def _find_type_for_generic_union(root: typing.Any) -> TypeDefinition:
# this is a ordered tuple of the type vars for the generic class, so for
# typing.Generic[T, V] it would return (T, V)
type_params = root.__parameters__
mapping = _get_type_mapping_from_actual_type(root)
if not mapping:
# if we weren't able to find a mapping, ie. when returning an empty list
# for a generic type, then we fall back to returning the first copy.
# This a very simplistic heuristic and it is bound to break with complex
# uses cases. We can improve it later if this becomes an issue.
return next((t for t in root._copies.values()))._type_definition
types = tuple(mapping[param] for param in type_params)
type = root._copies.get(types)
if type is None:
raise ValueError(f"Unable to find type for {root.__class__} and {types}")
return type._type_definition
def get_union_type(
union_definition: UnionDefinition, type_map: TypeMap,
) -> GraphQLUnionType:
from .object_type import get_object_type
def _resolve_type(root, info, _type):
if not hasattr(root, "_type_definition"):
raise WrongReturnTypeForUnion(info.field_name, str(type(root)))
type_definition = root._type_definition
if is_generic(type(root)):
type_definition = _find_type_for_generic_union(root)
returned_type = type_map[type_definition.name].implementation
if returned_type not in _type.types:
raise UnallowedReturnTypeForUnion(
info.field_name, str(type(root)), _type.types
)
return returned_type
types = union_definition.types # type: ignore
return GraphQLUnionType(
union_definition.name,
[get_object_type(type, type_map) for type in types],
description=union_definition.description,
resolve_type=_resolve_type,
)
| 2,962 | 0 | 69 |
485f19d616b485043deed58bf0b22b015741b482 | 5,419 | py | Python | pystol-operator/pystol/cleaner.py | ccamacho/pystol | 7925461c324975c8334009e3b878debcf056698b | [
"Apache-2.0"
] | null | null | null | pystol-operator/pystol/cleaner.py | ccamacho/pystol | 7925461c324975c8334009e3b878debcf056698b | [
"Apache-2.0"
] | null | null | null | pystol-operator/pystol/cleaner.py | ccamacho/pystol | 7925461c324975c8334009e3b878debcf056698b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright 2019 Pystol (pystol.org).
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import kubernetes
from kubernetes.client.rest import ApiException
from pystol import __version__
from pystol.operator import load_kubernetes_config
pystol_version = __version__
def purge_pystol():
"""
Purge Pystol from the cluster.
This is a main component of the input for the controller
"""
load_kubernetes_config()
v1 = kubernetes.client.CoreV1Api()
name = 'pystol'
pretty = 'true'
orphan_dependents = True
# propagation_policy = 'Foreground'
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
v1.delete_namespace(name,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Namespace removed.")
except ApiException:
print(" " + u"\u2757" + " Namespace removing warning.")
print(" Can't remove it, maybe it's gone...")
name = 'pystol-config'
namespace = 'pystol'
pretty = 'true'
orphan_dependents = True
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
v1.delete_namespaced_config_map(name,
namespace=namespace,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Config map removed.")
except ApiException:
print(" " + u"\u2757" + " Config map removing warning.")
print(" Can't remove it, maybe it's gone...")
name = 'pystol'
namespace = 'pystol'
pretty = 'true'
orphans = True
propagation = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
v1.delete_namespaced_service_account(name,
namespace=namespace,
pretty=pretty,
orphan_dependents=orphans,
propagation_policy=propagation,
body=body)
print(" " + u"\U0001F9F9" + " Service account removed.")
except ApiException:
print(" " + u"\u2757" + " Service account removing warning.")
print(" Can't remove it, maybe it's gone...")
rbac = kubernetes.client.RbacAuthorizationV1Api()
name = 'pystol'
pretty = 'true'
orphan_dependents = True
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
rbac.delete_cluster_role(name,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Cluster role removed.")
except ApiException:
print(" " + u"\u2757" + " Cluster role removing warning.")
print(" Can't remove it, maybe it's gone...")
rbac = kubernetes.client.RbacAuthorizationV1Api()
name = 'pystol'
pretty = 'true'
orphan_dependents = True
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
rbac.delete_cluster_role_binding(name,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Cluster role binding removed.")
except ApiException:
print(" " + u"\u2757" + " Cluster role binding removing warning.")
print(" Can't remove it, maybe it's gone...")
ext = kubernetes.client.ApiextensionsV1beta1Api()
name = 'pystolactions.pystol.org'
pretty = 'true'
orphans = True
propagation = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
ext.delete_custom_resource_definition(name,
pretty=pretty,
orphan_dependents=orphans,
propagation_policy=propagation,
body=body)
print(" " + u"\U0001F9F9" + " CRD removed.")
except ApiException:
print(" " + u"\u2757" + " CRD removing warning.")
print(" Can't remove it, maybe it's gone...")
| 38.707143 | 79 | 0.553792 | #!/usr/bin/env python
"""
Copyright 2019 Pystol (pystol.org).
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import kubernetes
from kubernetes.client.rest import ApiException
from pystol import __version__
from pystol.operator import load_kubernetes_config
pystol_version = __version__
def purge_pystol():
"""
Purge Pystol from the cluster.
This is a main component of the input for the controller
"""
load_kubernetes_config()
v1 = kubernetes.client.CoreV1Api()
name = 'pystol'
pretty = 'true'
orphan_dependents = True
# propagation_policy = 'Foreground'
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
v1.delete_namespace(name,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Namespace removed.")
except ApiException:
print(" " + u"\u2757" + " Namespace removing warning.")
print(" Can't remove it, maybe it's gone...")
name = 'pystol-config'
namespace = 'pystol'
pretty = 'true'
orphan_dependents = True
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
v1.delete_namespaced_config_map(name,
namespace=namespace,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Config map removed.")
except ApiException:
print(" " + u"\u2757" + " Config map removing warning.")
print(" Can't remove it, maybe it's gone...")
name = 'pystol'
namespace = 'pystol'
pretty = 'true'
orphans = True
propagation = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
v1.delete_namespaced_service_account(name,
namespace=namespace,
pretty=pretty,
orphan_dependents=orphans,
propagation_policy=propagation,
body=body)
print(" " + u"\U0001F9F9" + " Service account removed.")
except ApiException:
print(" " + u"\u2757" + " Service account removing warning.")
print(" Can't remove it, maybe it's gone...")
rbac = kubernetes.client.RbacAuthorizationV1Api()
name = 'pystol'
pretty = 'true'
orphan_dependents = True
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
rbac.delete_cluster_role(name,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Cluster role removed.")
except ApiException:
print(" " + u"\u2757" + " Cluster role removing warning.")
print(" Can't remove it, maybe it's gone...")
rbac = kubernetes.client.RbacAuthorizationV1Api()
name = 'pystol'
pretty = 'true'
orphan_dependents = True
propagation_policy = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
rbac.delete_cluster_role_binding(name,
pretty=pretty,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy,
body=body)
print(" " + u"\U0001F9F9" + " Cluster role binding removed.")
except ApiException:
print(" " + u"\u2757" + " Cluster role binding removing warning.")
print(" Can't remove it, maybe it's gone...")
ext = kubernetes.client.ApiextensionsV1beta1Api()
name = 'pystolactions.pystol.org'
pretty = 'true'
orphans = True
propagation = 'Background'
body = kubernetes.client.V1DeleteOptions()
try:
ext.delete_custom_resource_definition(name,
pretty=pretty,
orphan_dependents=orphans,
propagation_policy=propagation,
body=body)
print(" " + u"\U0001F9F9" + " CRD removed.")
except ApiException:
print(" " + u"\u2757" + " CRD removing warning.")
print(" Can't remove it, maybe it's gone...")
| 0 | 0 | 0 |
ea2ed8e08645dde0ae2ca5eb2c3ab6e2ff863d77 | 1,297 | py | Python | tasks/views.py | ybjeon01/django-todolist | 0ccbd2edd350e44bc485dfebef3a0d796566077a | [
"MIT"
] | null | null | null | tasks/views.py | ybjeon01/django-todolist | 0ccbd2edd350e44bc485dfebef3a0d796566077a | [
"MIT"
] | null | null | null | tasks/views.py | ybjeon01/django-todolist | 0ccbd2edd350e44bc485dfebef3a0d796566077a | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Task
from .mixins import UserTaskMixin
| 28.822222 | 89 | 0.735544 | from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Task
from .mixins import UserTaskMixin
class TaskList(LoginRequiredMixin, UserTaskMixin, ListView):
model = Task
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
incomplete_num = self.object_list.filter(complete=False)
data['incomplete_num'] = len(incomplete_num)
return data
class TaskDetailView(LoginRequiredMixin, UserTaskMixin, DetailView):
model = Task
class TaskCreateView(LoginRequiredMixin, UserTaskMixin, CreateView):
model = Task
fields = ['title', 'description']
success_url = reverse_lazy('tasks:list')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class TaskUpdateView(LoginRequiredMixin, UserTaskMixin, UpdateView):
model = Task
fields = ['title', 'description', 'complete']
success_url = reverse_lazy('tasks:list')
class TaskDeleteView(LoginRequiredMixin, UserTaskMixin, DeleteView):
model = Task
success_url = reverse_lazy('tasks:list')
| 306 | 592 | 115 |
2d172cc9710b47714af331b909b9f1d7fe6672ae | 1,488 | py | Python | tests/parsers/spotlight_storedb.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 1 | 2020-12-04T10:26:34.000Z | 2020-12-04T10:26:34.000Z | tests/parsers/spotlight_storedb.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | tests/parsers/spotlight_storedb.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Apple Spotlight store database parser."""
from __future__ import unicode_literals
import unittest
from plaso.lib import definitions
from plaso.parsers import spotlight_storedb
from tests.parsers import test_lib
class SpotlightStoreDatabaseParserTest(test_lib.ParserTestCase):
"""Tests for the Apple Spotlight store database parser."""
def testParse(self):
"""Tests the Parse function."""
parser = spotlight_storedb.SpotlightStoreDatabaseParser()
storage_writer = self._ParseFile(['store.db'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1159238)
events = list(storage_writer.GetEvents())
expected_event_values = {
'file_name': 'CIJCanoScan9000F.icns',
'file_system_identifier': 41322,
'kind': 'Apple icon image',
'parent_file_system_identifier': 41320,
'timestamp': '2013-06-04 20:53:10.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[12], expected_event_values)
expected_message = 'CIJCanoScan9000F.icns com.apple.icns'
expected_short_message = 'CIJCanoScan9000F.icns'
event_data = self._GetEventDataOfEvent(storage_writer, events[12])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| 31 | 76 | 0.744624 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Apple Spotlight store database parser."""
from __future__ import unicode_literals
import unittest
from plaso.lib import definitions
from plaso.parsers import spotlight_storedb
from tests.parsers import test_lib
class SpotlightStoreDatabaseParserTest(test_lib.ParserTestCase):
"""Tests for the Apple Spotlight store database parser."""
def testParse(self):
"""Tests the Parse function."""
parser = spotlight_storedb.SpotlightStoreDatabaseParser()
storage_writer = self._ParseFile(['store.db'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1159238)
events = list(storage_writer.GetEvents())
expected_event_values = {
'file_name': 'CIJCanoScan9000F.icns',
'file_system_identifier': 41322,
'kind': 'Apple icon image',
'parent_file_system_identifier': 41320,
'timestamp': '2013-06-04 20:53:10.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[12], expected_event_values)
expected_message = 'CIJCanoScan9000F.icns com.apple.icns'
expected_short_message = 'CIJCanoScan9000F.icns'
event_data = self._GetEventDataOfEvent(storage_writer, events[12])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
89c0617aa85a2a93b6d6687a716980f3f67234fa | 1,030 | py | Python | src/data/create_dataset.py | petrkokl/chess_predict | 7f0871f2a6dc295692c774af3418dc6d1316e01e | [
"MIT"
] | 1 | 2021-09-15T19:18:03.000Z | 2021-09-15T19:18:03.000Z | src/data/create_dataset.py | petrkokl/chess_predict | 7f0871f2a6dc295692c774af3418dc6d1316e01e | [
"MIT"
] | null | null | null | src/data/create_dataset.py | petrkokl/chess_predict | 7f0871f2a6dc295692c774af3418dc6d1316e01e | [
"MIT"
] | null | null | null | from pathlib import Path
import chess.pgn
import pandas as pd
from datetime import date
csv_datasets_folder = Path('../../data/interim')
raw_data_folder = Path('../../data/raw')
pgn_files = sorted(raw_data_folder.glob('*.pgn'))
datasets = []
for path in pgn_files:
print(f'parsing {path}')
games = []
with open(path) as pgn_file:
while pgn_file:
try:
headers = chess.pgn.read_headers(pgn_file)
except UnicodeDecodeError:
print(
f'UnicodeDecodeError occurs while parsing {path} after the game {headers}. Going to the next game...')
continue
if headers is None:
break
games.append(pd.Series(headers))
df = pd.DataFrame(games)
datasets.append(df)
final_df = pd.concat(datasets)
today_str = date.today().strftime('%d_%m_%Y')
csv_file_name = csv_datasets_folder / f"{today_str}.csv"
final_df.to_csv(csv_file_name)
print(f'csv file {csv_file_name} was saved.')
| 27.837838 | 122 | 0.628155 | from pathlib import Path
import chess.pgn
import pandas as pd
from datetime import date
csv_datasets_folder = Path('../../data/interim')
raw_data_folder = Path('../../data/raw')
pgn_files = sorted(raw_data_folder.glob('*.pgn'))
datasets = []
for path in pgn_files:
print(f'parsing {path}')
games = []
with open(path) as pgn_file:
while pgn_file:
try:
headers = chess.pgn.read_headers(pgn_file)
except UnicodeDecodeError:
print(
f'UnicodeDecodeError occurs while parsing {path} after the game {headers}. Going to the next game...')
continue
if headers is None:
break
games.append(pd.Series(headers))
df = pd.DataFrame(games)
datasets.append(df)
final_df = pd.concat(datasets)
today_str = date.today().strftime('%d_%m_%Y')
csv_file_name = csv_datasets_folder / f"{today_str}.csv"
final_df.to_csv(csv_file_name)
print(f'csv file {csv_file_name} was saved.')
| 0 | 0 | 0 |
984fcd0b3fd1f6b6f0cb0f1b719af66ef56c5429 | 1,844 | py | Python | 5/code.py | aapalo/aoc2020 | 72f74345512c6c1ef275352b798196f1589bc7c3 | [
"MIT"
] | null | null | null | 5/code.py | aapalo/aoc2020 | 72f74345512c6c1ef275352b798196f1589bc7c3 | [
"MIT"
] | null | null | null | 5/code.py | aapalo/aoc2020 | 72f74345512c6c1ef275352b798196f1589bc7c3 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import time
''' ####### '''
date = 5
dev = 0 # extra prints
part = 3 # 1,2, or 3 for both
samp = 0 # 0 or 1
''' ####### '''
''' ####### '''
time0 = time.time()
if samp == 1:
filename = "/sample.txt"
else:
filename = "/input.txt"
try:
with open(str(date) + filename,"r") as f:
t = f.readlines()
except FileNotFoundError:
with open("." + filename,"r") as f:
t = f.readlines()
t = [(x.strip().replace(' ',' ')) for x in t]
if part == 1:
print("Part 1: ", day(t))
elif part == 2:
print("Part 2: ", day2(t))
elif part == 3:
#run both
print("Part 1: ", day(t))
print("Part 2: ", day2(t))
tdif = time.time() - time0
print("Elapsed time: {:.4f} s".format(tdif))
| 20.488889 | 54 | 0.462039 | #!/usr/bin/python3
import time
''' ####### '''
date = 5
dev = 0 # extra prints
part = 3 # 1,2, or 3 for both
samp = 0 # 0 or 1
''' ####### '''
def splitrange(full, char):
if char in "LF":
# upper half
return full[:int(len(full)/2)]
else:
# lower half
return full[int(len(full)/2):]
def day(te):
rows = list(range(128))
cols = list(range(8))
ids = set()
for t in te:
col = cols[:]
row = rows[:]
for c in t:
if c in "FB":
row = splitrange(row, c)
else:
col = splitrange(col, c)
ids.append(row[0] * 8 + col[0])
return max(ids)
def day2(te):
rows = list(range(128))
cols = list(range(8))
ids = set()
for t in te:
col = cols[:]
row = rows[:]
for c in t:
if c in "FB":
row = splitrange(row, c)
else:
col = splitrange(col, c)
ids.add(row[0] * 8 + col[0])
for j in range(1, max(ids)-1):
#ID is not in the list, but its neighbours are
if j not in ids:
if (j-1) in ids:
if (j+1) in ids:
return(j)
return 0
''' ####### '''
time0 = time.time()
if samp == 1:
filename = "/sample.txt"
else:
filename = "/input.txt"
try:
with open(str(date) + filename,"r") as f:
t = f.readlines()
except FileNotFoundError:
with open("." + filename,"r") as f:
t = f.readlines()
t = [(x.strip().replace(' ',' ')) for x in t]
if part == 1:
print("Part 1: ", day(t))
elif part == 2:
print("Part 2: ", day2(t))
elif part == 3:
#run both
print("Part 1: ", day(t))
print("Part 2: ", day2(t))
tdif = time.time() - time0
print("Elapsed time: {:.4f} s".format(tdif))
| 1,014 | 0 | 69 |
0d581ab331b5fc7bf7ffc15b5ed44b7c01ab8101 | 2,170 | py | Python | hera_pspec/tests/test_version.py | adeliegorce/hera_pspec | 4d2fe17e2015b02b16683b29e2fd5530066dfd7b | [
"BSD-3-Clause"
] | 10 | 2018-01-28T06:59:22.000Z | 2021-02-23T19:23:09.000Z | hera_pspec/tests/test_version.py | adeliegorce/hera_pspec | 4d2fe17e2015b02b16683b29e2fd5530066dfd7b | [
"BSD-3-Clause"
] | 314 | 2017-06-30T04:10:58.000Z | 2022-03-18T16:34:37.000Z | hera_pspec/tests/test_version.py | adeliegorce/hera_pspec | 4d2fe17e2015b02b16683b29e2fd5530066dfd7b | [
"BSD-3-Clause"
] | 3 | 2017-10-26T00:21:01.000Z | 2022-01-21T20:59:18.000Z | """Tests for version.py."""
import os
import sys
import pytest
try:
# Python 2
from cStringIO import StringIO
except:
# Python 3
from io import StringIO
import hera_pspec
from .. import version
import json
| 29.324324 | 89 | 0.607834 | """Tests for version.py."""
import os
import sys
import pytest
try:
# Python 2
from cStringIO import StringIO
except:
# Python 3
from io import StringIO
import hera_pspec
from .. import version
import json
def test_main():
version_info = version.construct_version_info()
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
hera_pspec.version.main()
output = out.getvalue()
assert (output == 'Version = {v}\ngit origin = {o}\n'
'git branch = {b}\ngit description = {d}\n'
.format(v=version_info['version'],
o=version_info['git_origin'],
b=version_info['git_branch'],
d=version_info['git_description']))
finally:
sys.stdout = saved_stdout
# Test history string function
history = hera_pspec.version.history_string()
def test_get_gitinfo_file():
dir = version.hera_pspec_dir
git_file = os.path.join(dir, 'GIT_INFO')
if not os.path.exists(git_file):
# write a file to read in
temp_git_file = os.path.join(dir, 'GIT_INFO')
version_info = version.construct_version_info()
data = [version_info['git_origin'], version_info['git_origin'],
version_info['git_origin'], version_info['git_origin']]
with open(temp_git_file, 'w') as outfile:
json.dump(data, outfile)
git_file = temp_git_file
with open(git_file) as data_file:
data = [version._unicode_to_str(x) for x in json.loads(data_file.read().strip())]
git_origin = data[0]
git_hash = data[1]
git_description = data[2]
git_branch = data[3]
test_file_info = {
'git_origin': git_origin, 'git_hash': git_hash,
'git_description': git_description, 'git_branch': git_branch
}
if 'temp_git_file' in locals():
file_info = version._get_gitinfo_file(git_file=temp_git_file)
os.remove(temp_git_file)
else:
file_info = version._get_gitinfo_file()
assert file_info == test_file_info
| 1,899 | 0 | 46 |
6aabcb707e3f6937237b6f531ea09ed598489a2e | 5,784 | py | Python | murano_tempest_tests/tests/api/application_catalog/test_environments.py | zhur0ng/murano-tempest-plugin | c70cda4dc7b8208252e9741a96acba9fb6a5c6e9 | [
"Apache-2.0"
] | 6 | 2017-10-31T10:37:17.000Z | 2019-01-28T22:05:05.000Z | murano_tempest_tests/tests/api/application_catalog/test_environments.py | zhur0ng/murano-tempest-plugin | c70cda4dc7b8208252e9741a96acba9fb6a5c6e9 | [
"Apache-2.0"
] | 1 | 2018-08-20T07:39:23.000Z | 2018-08-20T07:39:23.000Z | murano_tempest_tests/tests/api/application_catalog/test_environments.py | zhur0ng/murano-tempest-plugin | c70cda4dc7b8208252e9741a96acba9fb6a5c6e9 | [
"Apache-2.0"
] | 2 | 2018-01-11T05:08:35.000Z | 2018-08-20T07:32:33.000Z | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from murano_tempest_tests.tests.api.application_catalog import base
from murano_tempest_tests import utils
| 44.837209 | 78 | 0.678596 | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from murano_tempest_tests.tests.api.application_catalog import base
from murano_tempest_tests import utils
class TestEnvironments(base.BaseApplicationCatalogTest):
@classmethod
def resource_setup(cls):
super(TestEnvironments, cls).resource_setup()
name = utils.generate_name(cls.__name__)
cls.environment = cls.application_catalog_client.\
create_environment(name)
@classmethod
def resource_cleanup(cls):
cls.application_catalog_client.\
delete_environment(cls.environment['id'])
super(TestEnvironments, cls).resource_cleanup()
@decorators.attr(type='smoke')
@decorators.idempotent_id('32f26f2e-6c55-4e83-9d8c-023d86299d3e')
def test_list_environments(self):
environments_list = self.application_catalog_client.\
get_environments_list()
self.assertIsInstance(environments_list, list)
@decorators.attr(type='smoke')
@decorators.idempotent_id('a4c0b2fd-2c1b-473c-80cc-d433ceec4c80')
def test_create_and_delete_environment(self):
environments_list = self.application_catalog_client.\
get_environments_list()
name = utils.generate_name('create_and_delete_env')
environment = self.application_catalog_client.create_environment(name)
self.assertEqual(name, environment['name'])
upd_environments_list = self.application_catalog_client.\
get_environments_list()
self.assertEqual(len(environments_list) + 1,
len(upd_environments_list))
self.application_catalog_client.delete_environment(environment['id'])
upd_environments_list = self.application_catalog_client.\
get_environments_list()
self.assertEqual(len(environments_list),
len(upd_environments_list))
@decorators.idempotent_id('52a06d5f-69e4-4184-a127-1bb13ce6dc7c')
def test_create_and_delete_environment_with_unicode_name(self):
environments_list = self.application_catalog_client.\
get_environments_list()
name = u'$yaql \u2665 unicode'
environment = self.application_catalog_client.create_environment(name)
self.assertEqual(name, environment['name'])
upd_environments_list = self.application_catalog_client.\
get_environments_list()
self.assertEqual(len(environments_list) + 1,
len(upd_environments_list))
self.application_catalog_client.delete_environment(environment['id'])
upd_environments_list = self.application_catalog_client.\
get_environments_list()
self.assertEqual(len(environments_list),
len(upd_environments_list))
@decorators.idempotent_id('2b45d30b-3f1d-4482-805e-7cf15d19fe38')
def test_get_environment(self):
environment = self.application_catalog_client.\
get_environment(self.environment['id'])
self.assertEqual(self.environment['name'], environment['name'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('950f5bc1-3e5c-48d1-8b05-dc33303ce6f3')
def test_update_environment(self):
environment = self.application_catalog_client.\
update_environment(self.environment['id'])
self.assertIsNot(self.environment['name'], environment['name'])
@decorators.idempotent_id('61001866-e885-4dda-9ac9-5b24c67a0e25')
def test_get_environment_model(self):
model = self.application_catalog_client.\
get_environment_model(self.environment['id'])
self.assertIsInstance(model, dict)
self.assertIn('defaultNetworks', model)
self.assertEqual(self.environment['name'], model['name'])
self.assertEqual(model['?']['type'], "io.murano.Environment")
net_name = self.application_catalog_client.\
get_environment_model(self.environment['id'],
path='/defaultNetworks/environment/name')
self.assertEqual("{0}-network".format(self.environment['name']),
net_name)
@decorators.idempotent_id('23416978-9701-49ff-9bb1-d312292a7f49')
def test_update_environment_model(self):
session = self.application_catalog_client. \
create_session(self.environment['id'])
patch = [{
"op": "replace",
"path": "/defaultNetworks/flat",
"value": True
}]
new_model = self.application_catalog_client. \
update_environment_model(self.environment['id'], patch,
session['id'])
self.assertTrue(new_model['defaultNetworks']['flat'])
value_draft = self.application_catalog_client. \
get_environment_model(self.environment['id'],
'/defaultNetworks/flat',
session['id'])
self.assertTrue(value_draft)
model_current = self.application_catalog_client. \
get_environment_model(self.environment['id'])
self.assertIsNone(model_current['defaultNetworks']['flat'])
| 4,096 | 907 | 23 |
dc5ea310a762d48e7693fa52f8708b66049c545c | 16,348 | py | Python | system_test/test.py | questdb/c-questdb-client | 0451339e2058c1fe5ff50e871c2935551f9be223 | [
"Apache-2.0"
] | 2 | 2022-03-24T13:08:06.000Z | 2022-03-24T15:47:31.000Z | system_test/test.py | questdb/c-questdb-client | 0451339e2058c1fe5ff50e871c2935551f9be223 | [
"Apache-2.0"
] | 1 | 2022-03-28T12:22:33.000Z | 2022-03-28T12:22:33.000Z | system_test/test.py | questdb/c-questdb-client | 0451339e2058c1fe5ff50e871c2935551f9be223 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
################################################################################
## ___ _ ____ ____
## / _ \ _ _ ___ ___| |_| _ \| __ )
## | | | | | | |/ _ \/ __| __| | | | _ \
## | |_| | |_| | __/\__ \ |_| |_| | |_) |
## \__\_\\__,_|\___||___/\__|____/|____/
##
## Copyright (c) 2014-2019 Appsicle
## Copyright (c) 2019-2022 QuestDB
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
################################################################################
import sys
sys.dont_write_bytecode = True
import datetime
import argparse
import unittest
import questdb_line_sender as qls
import uuid
from fixture import (
Project,
QuestDbFixture,
install_questdb,
list_questdb_releases,
retry)
import urllib.request
import urllib.parse
import json
import subprocess
from collections import namedtuple
QDB_FIXTURE: QuestDbFixture = None
if __name__ == '__main__':
main()
| 34.129436 | 80 | 0.565268 | #!/usr/bin/env python3
################################################################################
## ___ _ ____ ____
## / _ \ _ _ ___ ___| |_| _ \| __ )
## | | | | | | |/ _ \/ __| __| | | | _ \
## | |_| | |_| | __/\__ \ |_| |_| | |_) |
## \__\_\\__,_|\___||___/\__|____/|____/
##
## Copyright (c) 2014-2019 Appsicle
## Copyright (c) 2019-2022 QuestDB
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
################################################################################
import sys
sys.dont_write_bytecode = True
import datetime
import argparse
import unittest
import questdb_line_sender as qls
import uuid
from fixture import (
Project,
QuestDbFixture,
install_questdb,
list_questdb_releases,
retry)
import urllib.request
import urllib.parse
import json
import subprocess
from collections import namedtuple
QDB_FIXTURE: QuestDbFixture = None
class QueryError(Exception):
pass
def http_sql_query(sql_query):
host, port = QDB_FIXTURE.host, QDB_FIXTURE.http_server_port
url = (
f'http://{host}:{port}/exec?' +
urllib.parse.urlencode({'query': sql_query}))
resp = urllib.request.urlopen(url, timeout=0.2)
if resp.status != 200:
raise RuntimeError(f'Error response {resp.status} from {sql_query!r}')
buf = resp.read()
try:
data = json.loads(buf)
except json.JSONDecodeError as jde:
# Include buffer in error message for easier debugging.
raise json.JSONDecodeError(
f'Could not parse response: {buf!r}: {jde.msg}',
jde.doc,
jde.pos)
if 'error' in data:
raise QueryError(data['error'])
return data
def retry_check_table(table_name, min_rows=1, timeout_sec=5):
def check_table():
try:
resp = http_sql_query(f"select * from '{table_name}'")
if not resp.get('dataset'):
return False
elif len(resp['dataset']) < min_rows:
return False
return resp
except QueryError:
return None
return retry(check_table, timeout_sec=timeout_sec)
def ns_to_qdb_date(at_ts_ns):
# We first need to match QuestDB's internal microsecond resolution.
at_ts_us = int(at_ts_ns / 1000.0)
at_ts_sec = at_ts_us / 1000000.0
at_td = datetime.datetime.fromtimestamp(at_ts_sec)
return at_td.isoformat() + 'Z'
class TestLineSender(unittest.TestCase):
def _mk_linesender(self):
return qls.LineSender(
QDB_FIXTURE.host,
QDB_FIXTURE.line_tcp_port)
def test_insert_three_rows(self):
table_name = uuid.uuid4().hex
with self._mk_linesender() as sender:
for _ in range(3):
(sender
.table(table_name)
.symbol('name_a', 'val_a')
.column('name_b', True)
.column('name_c', 42)
.column('name_d', 2.5)
.column('name_e', 'val_b')
.at_now())
sender.flush()
resp = retry_check_table(table_name)
exp_columns = [
{'name': 'name_a', 'type': 'SYMBOL'},
{'name': 'name_b', 'type': 'BOOLEAN'},
{'name': 'name_c', 'type': 'LONG'},
{'name': 'name_d', 'type': 'DOUBLE'},
{'name': 'name_e', 'type': 'STRING'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [ # Comparison excludes timestamp column.
['val_a', True, 42, 2.5, 'val_b'],
['val_a', True, 42, 2.5, 'val_b'],
['val_a', True, 42, 2.5, 'val_b']]
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def test_repeated_symbol_and_column_names(self):
if QDB_FIXTURE.version <= (6, 1, 2):
self.skipTest('No support for duplicate column names.')
return
table_name = uuid.uuid4().hex
with self._mk_linesender() as sender:
(sender
.table(table_name)
.symbol('a', 'A')
.symbol('a', 'B')
.column('b', False)
.column('b', 'C')
.at_now())
resp = retry_check_table(table_name)
exp_columns = [
{'name': 'a', 'type': 'SYMBOL'},
{'name': 'b', 'type': 'BOOLEAN'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [['A', False]] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def test_same_symbol_and_col_name(self):
if QDB_FIXTURE.version <= (6, 1, 2):
self.skipTest('No support for duplicate column names.')
return
table_name = uuid.uuid4().hex
with self._mk_linesender() as sender:
(sender
.table(table_name)
.symbol('a', 'A')
.column('a', 'B')
.at_now())
resp = retry_check_table(table_name)
exp_columns = [
{'name': 'a', 'type': 'SYMBOL'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [['A']] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def test_single_symbol(self):
table_name = uuid.uuid4().hex
with self._mk_linesender() as sender:
(sender
.table(table_name)
.symbol('a', 'A')
.at_now())
resp = retry_check_table(table_name)
exp_columns = [
{'name': 'a', 'type': 'SYMBOL'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [['A']] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def test_two_columns(self):
table_name = uuid.uuid4().hex
with self._mk_linesender() as sender:
(sender
.table(table_name)
.column('a', 'A')
.column('b', 'B')
.at_now())
resp = retry_check_table(table_name)
exp_columns = [
{'name': 'a', 'type': 'STRING'},
{'name': 'b', 'type': 'STRING'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [['A', 'B']] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def test_mismatched_types_across_rows(self):
table_name = uuid.uuid4().hex
with self._mk_linesender() as sender:
(sender
.table(table_name)
.symbol('a', 'A') # SYMBOL
.at_now())
(sender
.table(table_name)
.column('a', 'B') # STRING
.at_now())
# We only ever get the first row back.
resp = retry_check_table(table_name)
exp_columns = [
{'name': 'a', 'type': 'SYMBOL'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [['A']] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
# The second one is dropped and will not appear in results.
with self.assertRaises(TimeoutError):
retry_check_table(table_name, min_rows=2, timeout_sec=1)
def test_at(self):
if QDB_FIXTURE.version <= (6, 0, 7, 1):
self.skipTest('No support for user-provided timestamps.')
return
table_name = uuid.uuid4().hex
at_ts_ns = 1647357688714369403
with qls.LineSender('localhost', QDB_FIXTURE.line_tcp_port) as sender:
(sender
.table(table_name)
.symbol('a', 'A')
.at(at_ts_ns))
resp = retry_check_table(table_name)
exp_dataset = [['A', ns_to_qdb_date(at_ts_ns)]]
self.assertEqual(resp['dataset'], exp_dataset)
def test_bad_at(self):
if QDB_FIXTURE.version <= (6, 0, 7, 1):
self.skipTest('No support for user-provided timestamps.')
return
table_name = uuid.uuid4().hex
at_ts_ns1 = 1648032959100000000
at_ts_ns2 = 1648032958100000000 # A second before `at_ts_ns1`.
with self._mk_linesender() as sender:
(sender
.table(table_name)
.symbol('a', 'A')
.at(at_ts_ns1))
(sender
.table(table_name)
.symbol('a', 'B')
.at(at_ts_ns2))
resp = retry_check_table(table_name)
exp_dataset = [['A', ns_to_qdb_date(at_ts_ns1)]]
self.assertEqual(resp['dataset'], exp_dataset)
# The second time stamp is dropped and will not appear in results.
with self.assertRaises(TimeoutError):
retry_check_table(table_name, min_rows=2, timeout_sec=1)
def test_underscores(self):
table_name = f'_{uuid.uuid4().hex}_'
with self._mk_linesender() as sender:
(sender
.table(table_name)
.symbol('_a_b_c_', 'A')
.column('_d_e_f_', True)
.at_now())
resp = retry_check_table(table_name)
exp_columns = [
{'name': '_a_b_c_', 'type': 'SYMBOL'},
{'name': '_d_e_f_', 'type': 'BOOLEAN'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [['A', True]] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def test_funky_chars(self):
if QDB_FIXTURE.version <= (6, 0, 7, 1):
self.skipTest('No unicode support.')
return
table_name = uuid.uuid4().hex
smilie = b'\xf0\x9f\x98\x81'.decode('utf-8')
with self._mk_linesender() as sender:
sender.table(table_name)
sender.symbol(smilie, smilie)
# for num in range(1, 32):
# char = chr(num)
# sender.column(char, char)
sender.at_now()
resp = retry_check_table(table_name)
exp_columns = [
{'name': smilie, 'type': 'SYMBOL'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [[smilie]] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def _test_example(self, bin_name, table_name):
# Call the example program.
proj = Project()
ext = '.exe' if sys.platform == 'win32' else ''
bin_path = next(proj.build_dir.glob(f'**/{bin_name}{ext}'))
args = [str(bin_path), "localhost", str(QDB_FIXTURE.line_tcp_port)]
subprocess.check_call(args, cwd=bin_path.parent)
# Check inserted data.
resp = retry_check_table(table_name)
exp_columns = [
{'name': 'id', 'type': 'SYMBOL'},
{'name': 'x', 'type': 'DOUBLE'},
{'name': 'y', 'type': 'DOUBLE'},
{'name': 'booked', 'type': 'BOOLEAN'},
{'name': 'passengers', 'type': 'LONG'},
{'name': 'driver', 'type': 'STRING'},
{'name': 'timestamp', 'type': 'TIMESTAMP'}]
self.assertEqual(resp['columns'], exp_columns)
exp_dataset = [[
'd6e5fe92-d19f-482a-a97a-c105f547f721',
30.5,
-150.25,
True,
3,
'Ranjit Singh']] # Comparison excludes timestamp column.
scrubbed_dataset = [row[:-1] for row in resp['dataset']]
self.assertEqual(scrubbed_dataset, exp_dataset)
def test_c_example(self):
self._test_example('line_sender_c_example', 'c_cars')
def test_cpp_example(self):
self._test_example('line_sender_cpp_example', 'cpp_cars')
def parse_args():
parser = argparse.ArgumentParser('Run system tests.')
sub_p = parser.add_subparsers(dest='command')
run_p = sub_p.add_parser('run', help='Run tests')
run_p.add_argument(
'--unittest-help',
action='store_true',
help='Show unittest --help')
version_g = run_p.add_mutually_exclusive_group()
version_g.add_argument(
'--last-n',
type=int,
help='test against last N versions')
version_g.add_argument(
'--versions',
type=str,
nargs='+',
help='List of versions, e.g. `6.1.2`')
version_g.add_argument(
'--existing',
type=str,
metavar='HOST:ILP_PORT:HTTP_PORT',
help=('Test against existing running instance. ' +
'e.g. `localhost:9009:9000`'))
list_p = sub_p.add_parser('list', help='List latest -n releases.')
list_p.set_defaults(command='list')
list_p.add_argument('-n', type=int, default=30, help='number of releases')
return parser.parse_known_args()
def list(args):
print('List of releases:')
for vers, _ in list_questdb_releases(args.n or 1):
print(f' {vers}')
def run_with_existing(args):
global QDB_FIXTURE
MockFixture = namedtuple(
'MockFixture',
('host', 'line_tcp_port', 'http_server_port', 'version'))
host, line_tcp_port, http_server_port = args.existing.split(':')
QDB_FIXTURE = MockFixture(
host,
int(line_tcp_port),
int(http_server_port),
(999, 999, 999))
unittest.main()
def run_with_fixtures(args):
global QDB_FIXTURE
last_n = 1
if getattr(args, 'last_n', None):
last_n = args.last_n
elif getattr(args, 'versions', None):
last_n = 30 # Hack, can't test older releases.
versions = {
vers: download_url
for vers, download_url
in list_questdb_releases(last_n)}
versions_args = getattr(args, 'versions', None)
if versions_args:
versions = {
vers: versions[vers]
for vers in versions_args}
for version, download_url in versions.items():
questdb_dir = install_questdb(version, download_url)
QDB_FIXTURE = QuestDbFixture(questdb_dir)
try:
QDB_FIXTURE.start()
test_prog = unittest.TestProgram(exit=False)
if not test_prog.result.wasSuccessful():
sys.exit(1)
finally:
QDB_FIXTURE.stop()
def run(args, show_help=False):
if show_help:
sys.argv.append('--help')
unittest.main()
return
existing_instance = getattr(args, 'existing', None)
if existing_instance:
run_with_existing(args)
else:
run_with_fixtures(args)
def main():
args, extra_args = parse_args()
if args.command == 'list':
list(args)
else:
# Repackage args for unittests own arg parser.
sys.argv[:] = sys.argv[:1] + extra_args
show_help = getattr(args, 'unittest_help', False)
run(args, show_help)
if __name__ == '__main__':
main()
| 14,187 | 35 | 630 |
7ba7f2a59fc0d606b52fb95dd2f6f28c51425612 | 533 | py | Python | deepmoji/filter_input.py | minh364/Text-to-Color | 561c44d65c88d951795fa765e2bda404937db3eb | [
"MIT"
] | 14 | 2018-07-26T15:33:51.000Z | 2019-06-13T17:06:01.000Z | deepmoji/filter_input.py | minh364/Text-to-Color | 561c44d65c88d951795fa765e2bda404937db3eb | [
"MIT"
] | 8 | 2020-01-28T23:00:33.000Z | 2022-02-10T00:17:38.000Z | deepmoji/filter_input.py | hon9g/Text-to-Color | 561c44d65c88d951795fa765e2bda404937db3eb | [
"MIT"
] | 3 | 2018-08-01T03:10:46.000Z | 2019-06-13T16:56:51.000Z | from __future__ import print_function, division
import codecs
from emoji import UNICODE_EMOJI
| 31.352941 | 71 | 0.626642 | from __future__ import print_function, division
import codecs
from emoji import UNICODE_EMOJI
def read_english(path="english_words.txt", add_emojis=True):
# read english words for filtering (includes emojis as part of set)
english = set()
with codecs.open(path, "r", "utf-8") as f:
for line in f:
line = line.strip().lower().replace('\n', '')
if len(line):
english.add(line)
if add_emojis:
for e in UNICODE_EMOJI:
english.add(e)
return english | 416 | 0 | 23 |
6a2fe064406f98c198734977c2aa2ea0b69bd698 | 13,852 | py | Python | phase1a/case5a/case5a_build_xml.py | pep8speaks/fhr-benchmark-1 | 531d518844de20ca70bd90522c573ead7aa459ce | [
"BSD-3-Clause"
] | 1 | 2020-11-24T09:56:12.000Z | 2020-11-24T09:56:12.000Z | phase1a/case5a/case5a_build_xml.py | pep8speaks/fhr-benchmark-1 | 531d518844de20ca70bd90522c573ead7aa459ce | [
"BSD-3-Clause"
] | null | null | null | phase1a/case5a/case5a_build_xml.py | pep8speaks/fhr-benchmark-1 | 531d518844de20ca70bd90522c573ead7aa459ce | [
"BSD-3-Clause"
] | null | null | null | """
This python script builds the XML files for case5a of the FHR benchmark
materials.xml, geometry.xml, settings.xml, and tallies.xml
"""
###############################################################################
# Python Package Import
###############################################################################
import openmc
import numpy as np
from numpy import sin, cos, tan, pi
import sys
sys.path.insert(1, '../../scripts/')
from phase1a_constants import *
from tallies import *
###############################################################################
# Simulation Input File Parameters
###############################################################################
# OpenMC simulation parameters
batches = 500
inactive = 100
particles = 2000000
tallies_on = True
###############################################################################
# Exporting to OpenMC materials.xml file
###############################################################################
uoc_9 = openmc.Material()
uoc_9.set_density('g/cc', 11)
uoc_9.add_nuclide('U235', 2.27325e-3)
uoc_9.add_nuclide('U238', 2.269476e-2)
uoc_9.add_nuclide('O16', 3.561871e-2)
uoc_9.add_nuclide('C0', 9.79714e-3)
uoc_9.temperature = 1110
uoc_9.volume = 4 / 3 * pi * (T_r1 ** 3) * 101 * 210 * 4 * 36
por_c = openmc.Material()
por_c.set_density('g/cc', 1)
por_c.add_nuclide('C0', 5.013980e-2)
por_c.temperature = 948
si_c = openmc.Material()
si_c.set_density('g/cc', 3.2)
si_c.add_nuclide('Si28', 4.431240e-2)
si_c.add_nuclide('Si29', 2.25887e-3)
si_c.add_nuclide('Si30', 1.48990e-3)
si_c.add_nuclide('C0', 4.806117e-2)
si_c.temperature = 948
graphite = openmc.Material()
graphite.set_density('g/cc', 1.8)
graphite.add_nuclide('C0', 9.025164e-2)
graphite.temperature = 948
p_graphite = openmc.Material()
p_graphite.set_density('g/cc', 1.8)
p_graphite.add_nuclide('C0', 9.025164e-2)
p_graphite.add_nuclide('Eu151', 1.533453e-6)
p_graphite.add_nuclide('Eu153', 1.674607e-6)
p_graphite.add_nuclide('O16', 4.812090e-6)
p_graphite.temperature = 948
s_graphite = openmc.Material()
s_graphite.set_density('g/cc', 1.8)
s_graphite.add_nuclide('C0', 9.025164e-2)
s_graphite.temperature = 948
lm_graphite = openmc.Material()
lm_graphite.set_density('g/cc', 1.8)
lm_graphite.add_nuclide('C0', 9.025164e-2)
lm_graphite.temperature = 948
flibe = openmc.Material()
flibe.set_density('g/cc', 1.95)
flibe.add_nuclide('Li6', 1.383014e-6)
flibe.add_nuclide('Li7', 2.37132e-2)
flibe.add_nuclide('Be9', 1.18573e-2)
flibe.add_nuclide('F19', 4.74291e-2)
flibe.temperature = 948
mhc = openmc.Material()
mhc.set_density('g/cc', 10.28)
mhc.add_nuclide('Mo92', 9.328884e-3)
mhc.add_nuclide('Mo94', 5.850533e-3)
mhc.add_nuclide('Mo95', 1.010836e-2)
mhc.add_nuclide('Mo96', 1.061782e-2)
mhc.add_nuclide('Mo97', 6.102080e-3)
mhc.add_nuclide('Mo98', 1.546981e-2)
mhc.add_nuclide('Mo100', 6.205246e-3)
mhc.add_nuclide('Hf174', 6.659530e-7)
mhc.add_nuclide('Hf176', 2.189321e-5)
mhc.add_nuclide('Hf177', 7.741704e-5)
mhc.add_nuclide('Hf178', 1.135450e-4)
mhc.add_nuclide('Hf179', 5.668925e-5)
mhc.add_nuclide('Hf180', 1.460102e-4)
mhc.add_nuclide('C0', 5.154371e-4)
mhc.temperature = 948
mats = openmc.Materials(
(uoc_9,
por_c,
si_c,
graphite,
p_graphite,
lm_graphite,
flibe,
mhc,
s_graphite))
mats.export_to_xml()
###############################################################################
# Exporting to OpenMC geometry.xml file
###############################################################################
# top and bottom surfaces (dz)
top_surface = openmc.ZPlane(
z0=T_pitch / 2 + (z_thickness - 1) / 2 * T_pitch, boundary_type='reflective')
bot_surface = openmc.ZPlane(
z0=-(T_pitch / 2 + (z_thickness - 1) / 2 * T_pitch), boundary_type='reflective')
# Outermost Hexagon
H_m = 1 / tan(pi / 6)
H_1 = openmc.YPlane(0.5 * H_side / tan(pi / 6), 'periodic')
H_2 = plane(-H_m, 0.5 * H_side, 0.5 * H_side / tan(pi / 6), 'periodic')
H_3 = plane(H_m, 0.5 * H_side, -0.5 * H_side / tan(pi / 6), 'periodic')
H_4 = openmc.YPlane(-0.5 * H_side / tan(pi / 6), 'periodic')
H_5 = plane(-H_m, -0.5 * H_side, -0.5 * H_side / tan(pi / 6), 'periodic')
H_6 = plane(H_m, -0.5 * H_side, 0.5 * H_side / tan(pi / 6), 'periodic')
H_1.periodic_surface = H_4
H_2.periodic_surface = H_5
H_3.periodic_surface = H_6
H_region = -H_1 & +H_4 & -H_2 & +H_3 & +H_5 & -H_6
H_cell = openmc.Cell(fill=graphite)
H_cell.region = H_region & -top_surface & + bot_surface
# Diamond Plank Area
A1_D_cell = openmc.Cell(fill=flibe)
A1_D_cell.region = region_maker('A1', 'D') & -top_surface & + bot_surface
A2_D_cell = openmc.Cell(fill=flibe)
A2_D_cell.region = region_maker('A2', 'D') & -top_surface & + bot_surface
A3_D_cell = openmc.Cell(fill=flibe)
A3_D_cell.region = region_maker('A3', 'D') & -top_surface & + bot_surface
D_regions = A1_D_cell.region | A2_D_cell.region | A3_D_cell.region
D_universe = openmc.Universe(cells=(A1_D_cell, A2_D_cell, A3_D_cell,))
D_areas = openmc.Cell(fill=D_universe, region=D_regions)
H_cell.region &= ~D_regions
# Graphite Planks
all_P_univ = openmc.Universe()
all_P_regions = region_maker('A1', 'P') # initialize
for area in range(3):
area_str = 'A{}'.format(area + 1)
P_region = region_maker(area_str, 'P')
P_cell = openmc.Cell(fill=p_graphite, region=P_region)
P_univ = openmc.Universe(cells=(P_cell,))
for trans in range(6):
P_region_new = P_region.translate(
(trans * T[area_str]['P']['x'], trans * T[area_str]['P']['y'], 0))
P_cell_new = openmc.Cell(fill=P_univ, region=P_region_new)
P_cell_new.translation = (
trans *
T[area_str]['P']['x'],
trans *
T[area_str]['P']['y'],
0)
all_P_univ.add_cell(P_cell_new)
all_P_regions |= P_region_new
D_areas.region &= ~P_region_new
H_cell.region &= ~P_region_new
P_areas = openmc.Cell(
fill=all_P_univ,
region=all_P_regions & -
top_surface & +
bot_surface)
# Triso Particles
spheres = [openmc.Sphere(r=r)
for r in [T_r1, T_r2, T_r3, T_r4, T_r5]]
triso_cells = [openmc.Cell(fill=uoc_9, region=-spheres[0]),
openmc.Cell(fill=por_c, region=+spheres[0] & -spheres[1]),
openmc.Cell(fill=graphite, region=+spheres[1] & -spheres[2]),
openmc.Cell(fill=si_c, region=+spheres[2] & -spheres[3]),
openmc.Cell(fill=graphite, region=+spheres[3] & -spheres[4]),
openmc.Cell(fill=lm_graphite, region=+spheres[4])]
triso_univ = openmc.Universe(cells=triso_cells)
lm_graphite_cell = openmc.Cell(fill=lm_graphite)
lm_graphite_univ = openmc.Universe(cells=(lm_graphite_cell,))
u = triso_univ
lattice = openmc.RectLattice()
lattice.lower_left = (V['A1']['F']['L']['x'], V['A1']['F']['B']
['y'], -(T_pitch / 2 + (z_thickness - 1) / 2 * T_pitch))
lattice.pitch = (T_pitch, T_pitch, T_pitch)
lattice.outer = lm_graphite_univ
lattice_list = []
for z in range(z_thickness):
lattice_z_list = []
for row in range(4):
lattice_y_list = []
for col in range(210):
lattice_y_list.append(u)
lattice_z_list.append(lattice_y_list)
lattice_list.append(lattice_z_list)
lattice.universes = lattice_list
# Fuel Plank
all_F_univ = openmc.Universe()
all_F_regions = region_maker('A1', 'F') # initialize
for area in range(3):
area_str = 'A{}'.format(area + 1)
F_region = region_maker(area_str, 'F')
F_cell = openmc.Cell(fill=lm_graphite,)
F_cell.fill = lattice
F_univ = openmc.Universe(cells=(F_cell,))
for t in range(6):
for x in range(2):
x_trans = t * T[area_str]['P']['x']
y_trans = t * T[area_str]['P']['y']
if x == 1:
x_trans += T[area_str]['F']['x']
y_trans += T[area_str]['F']['y']
F_region_new = F_region.translate((x_trans, y_trans, 0))
F_cell_new = openmc.Cell(fill=F_univ, region=F_region_new)
if area == 1:
F_cell_new.rotation = (0, 0, -120)
if area == 2:
F_cell_new.rotation = (0, 0, 120)
F_cell_new.translation = (x_trans, y_trans, 0)
all_F_univ.add_cell(F_cell_new)
all_F_regions |= F_region_new
P_areas.region &= ~F_region_new
D_areas.region &= ~F_region_new
H_cell.region &= ~F_region_new
F_areas = openmc.Cell(
fill=all_F_univ,
region=all_F_regions & -
top_surface & +
bot_surface)
# Spacer
all_S_univ = openmc.Universe()
S_small_spacer_surf = openmc.ZCylinder(
r=S_small_r,
x0=-D_to_center_width - S_A1_D_gap,
y0=-D_to_center - P_small_gap) # initialize
all_S_regions = -S_small_spacer_surf & + \
plane(V['A1']['P']['T']['m'], V['A1']['P']['T']['x'], V['A1']['P']['T']['y'])
# outer loop is for 3 types of spacers, small top, big middle, small bottom
rad = [S_small_r, S_large_r, S_small_r]
start = [0, 1, 5]
end = [1, 6, 6]
C = ['C', 'C', 'Cb']
for y in range(3):
for area in range(3):
area_str = 'A{}'.format(area + 1)
S_cylinder = openmc.ZCylinder(r=rad[y],
x0=V[area_str]['S'][C[y]]['x0'],
y0=V[area_str]['S'][C[y]]['y0'])
if area == 0:
S_region = -S_cylinder & + \
plane(V[area_str]['P']['T']['m'], V[area_str]['P']['T']['x'], V[area_str]['P']['T']['y'])
if y == 2:
S_region = -S_cylinder & - \
plane(V[area_str]['P']['B']['m'], V[area_str]['P']['B']['x'], V[area_str]['P']['B']['y'])
if area == 1:
S_region = -S_cylinder & - \
plane(V[area_str]['P']['R']['m'], V[area_str]['P']['R']['x'], V[area_str]['P']['R']['y'])
if y == 2:
S_region = -S_cylinder & + \
plane(V[area_str]['P']['L']['m'], V[area_str]['P']['L']['x'], V[area_str]['P']['L']['y'])
if area == 2:
S_region = -S_cylinder & - \
plane(V[area_str]['P']['L']['m'], V[area_str]['P']['L']['x'], V[area_str]['P']['L']['y'])
if y == 2:
S_region = -S_cylinder & + \
plane(V[area_str]['P']['R']['m'], V[area_str]['P']['R']['x'], V[area_str]['P']['R']['y'])
S_cell = openmc.Cell(fill=s_graphite, region=S_region)
S_univ = openmc.Universe(cells=(S_cell,))
for trans in range(start[y], end[y]):
for x in range(2):
x_trans = trans * T[area_str]['P']['x']
y_trans = trans * T[area_str]['P']['y']
if x == 1:
x_trans += T[area_str]['S']['x']
y_trans += T[area_str]['S']['y']
S_region_new = S_region.translate((x_trans, y_trans, 0))
S_cell_new = openmc.Cell(fill=S_univ, region=S_region_new)
S_cell_new.translation = (x_trans, y_trans, 0)
all_S_univ.add_cell(S_cell_new)
all_S_regions |= S_region_new
F_areas.region &= ~S_region_new
P_areas.region &= ~S_region_new
D_areas.region &= ~S_region_new
H_cell.region &= ~S_region_new
S_areas = openmc.Cell(
fill=all_S_univ,
region=all_S_regions & -
top_surface & +
bot_surface)
# Control Rod Slot
A1_CS_cell = openmc.Cell(fill=flibe)
A1_CS_cell.region = region_maker('A1', 'CS') & -top_surface & + bot_surface
A2_CS_cell = openmc.Cell(fill=flibe)
A2_CS_cell.region = region_maker('A2', 'CS') & -top_surface & + bot_surface
A3_CS_cell = openmc.Cell(fill=flibe)
A3_CS_cell.region = region_maker('A3', 'CS') & -top_surface & + bot_surface
CS_regions = A1_CS_cell.region | A2_CS_cell.region | A3_CS_cell.region
CS_universe = openmc.Universe(cells=(A1_CS_cell, A2_CS_cell, A3_CS_cell,))
CS_areas = openmc.Cell(fill=CS_universe, region=CS_regions)
S_areas.region &= ~CS_regions
F_areas.region &= ~CS_regions
P_areas.region &= ~CS_regions
D_areas.region &= ~CS_regions
H_cell.region &= ~CS_regions
# Control Rod Arm
A1_CA_cell = openmc.Cell(fill=flibe)
A1_CA_cell.region = region_maker('A1', 'CA') & -top_surface & + bot_surface
A2_CA_cell = openmc.Cell(fill=flibe)
A2_CA_cell.region = region_maker('A2', 'CA') & -top_surface & + bot_surface
A3_CA_cell = openmc.Cell(fill=flibe)
A3_CA_cell.region = region_maker('A3', 'CA') & -top_surface & + bot_surface
CA_regions = A1_CA_cell.region | A2_CA_cell.region | A3_CA_cell.region
CA_universe = openmc.Universe(cells=(A1_CA_cell, A2_CA_cell, A3_CA_cell,))
CA_areas = openmc.Cell(fill=CA_universe, region=CA_regions)
CS_areas.region &= ~CA_regions
S_areas.region &= ~CA_regions
F_areas.region &= ~CA_regions
P_areas.region &= ~CA_regions
D_areas.region &= ~CA_regions
H_cell.region &= ~CA_regions
# export to xml
root = openmc.Universe(
cells=[
H_cell,
D_areas,
P_areas,
F_areas,
S_areas,
CS_areas,
CA_areas])
geom = openmc.Geometry(root)
geom.export_to_xml()
###############################################################################
# Exporting to OpenMC settings.xml file
##############################################################################
settings = openmc.Settings()
settings.batches = batches
settings.inactive = inactive
settings.particles = particles
settings.temperature = {'multipole': True, 'method': 'interpolation'}
settings.export_to_xml()
###############################################################################
# Exporting to OpenMC tallies.xml file
###############################################################################
if tallies_on:
tallies_generation(root)
else:
print('tallies off')
| 36.072917 | 109 | 0.582515 | """
This python script builds the XML files for case5a of the FHR benchmark
materials.xml, geometry.xml, settings.xml, and tallies.xml
"""
###############################################################################
# Python Package Import
###############################################################################
import openmc
import numpy as np
from numpy import sin, cos, tan, pi
import sys
sys.path.insert(1, '../../scripts/')
from phase1a_constants import *
from tallies import *
###############################################################################
# Simulation Input File Parameters
###############################################################################
# OpenMC simulation parameters
batches = 500
inactive = 100
particles = 2000000
tallies_on = True
###############################################################################
# Exporting to OpenMC materials.xml file
###############################################################################
uoc_9 = openmc.Material()
uoc_9.set_density('g/cc', 11)
uoc_9.add_nuclide('U235', 2.27325e-3)
uoc_9.add_nuclide('U238', 2.269476e-2)
uoc_9.add_nuclide('O16', 3.561871e-2)
uoc_9.add_nuclide('C0', 9.79714e-3)
uoc_9.temperature = 1110
uoc_9.volume = 4 / 3 * pi * (T_r1 ** 3) * 101 * 210 * 4 * 36
por_c = openmc.Material()
por_c.set_density('g/cc', 1)
por_c.add_nuclide('C0', 5.013980e-2)
por_c.temperature = 948
si_c = openmc.Material()
si_c.set_density('g/cc', 3.2)
si_c.add_nuclide('Si28', 4.431240e-2)
si_c.add_nuclide('Si29', 2.25887e-3)
si_c.add_nuclide('Si30', 1.48990e-3)
si_c.add_nuclide('C0', 4.806117e-2)
si_c.temperature = 948
graphite = openmc.Material()
graphite.set_density('g/cc', 1.8)
graphite.add_nuclide('C0', 9.025164e-2)
graphite.temperature = 948
p_graphite = openmc.Material()
p_graphite.set_density('g/cc', 1.8)
p_graphite.add_nuclide('C0', 9.025164e-2)
p_graphite.add_nuclide('Eu151', 1.533453e-6)
p_graphite.add_nuclide('Eu153', 1.674607e-6)
p_graphite.add_nuclide('O16', 4.812090e-6)
p_graphite.temperature = 948
s_graphite = openmc.Material()
s_graphite.set_density('g/cc', 1.8)
s_graphite.add_nuclide('C0', 9.025164e-2)
s_graphite.temperature = 948
lm_graphite = openmc.Material()
lm_graphite.set_density('g/cc', 1.8)
lm_graphite.add_nuclide('C0', 9.025164e-2)
lm_graphite.temperature = 948
flibe = openmc.Material()
flibe.set_density('g/cc', 1.95)
flibe.add_nuclide('Li6', 1.383014e-6)
flibe.add_nuclide('Li7', 2.37132e-2)
flibe.add_nuclide('Be9', 1.18573e-2)
flibe.add_nuclide('F19', 4.74291e-2)
flibe.temperature = 948
mhc = openmc.Material()
mhc.set_density('g/cc', 10.28)
mhc.add_nuclide('Mo92', 9.328884e-3)
mhc.add_nuclide('Mo94', 5.850533e-3)
mhc.add_nuclide('Mo95', 1.010836e-2)
mhc.add_nuclide('Mo96', 1.061782e-2)
mhc.add_nuclide('Mo97', 6.102080e-3)
mhc.add_nuclide('Mo98', 1.546981e-2)
mhc.add_nuclide('Mo100', 6.205246e-3)
mhc.add_nuclide('Hf174', 6.659530e-7)
mhc.add_nuclide('Hf176', 2.189321e-5)
mhc.add_nuclide('Hf177', 7.741704e-5)
mhc.add_nuclide('Hf178', 1.135450e-4)
mhc.add_nuclide('Hf179', 5.668925e-5)
mhc.add_nuclide('Hf180', 1.460102e-4)
mhc.add_nuclide('C0', 5.154371e-4)
mhc.temperature = 948
mats = openmc.Materials(
(uoc_9,
por_c,
si_c,
graphite,
p_graphite,
lm_graphite,
flibe,
mhc,
s_graphite))
mats.export_to_xml()
###############################################################################
# Exporting to OpenMC geometry.xml file
###############################################################################
# top and bottom surfaces (dz)
top_surface = openmc.ZPlane(
z0=T_pitch / 2 + (z_thickness - 1) / 2 * T_pitch, boundary_type='reflective')
bot_surface = openmc.ZPlane(
z0=-(T_pitch / 2 + (z_thickness - 1) / 2 * T_pitch), boundary_type='reflective')
# Outermost Hexagon
H_m = 1 / tan(pi / 6)
H_1 = openmc.YPlane(0.5 * H_side / tan(pi / 6), 'periodic')
H_2 = plane(-H_m, 0.5 * H_side, 0.5 * H_side / tan(pi / 6), 'periodic')
H_3 = plane(H_m, 0.5 * H_side, -0.5 * H_side / tan(pi / 6), 'periodic')
H_4 = openmc.YPlane(-0.5 * H_side / tan(pi / 6), 'periodic')
H_5 = plane(-H_m, -0.5 * H_side, -0.5 * H_side / tan(pi / 6), 'periodic')
H_6 = plane(H_m, -0.5 * H_side, 0.5 * H_side / tan(pi / 6), 'periodic')
H_1.periodic_surface = H_4
H_2.periodic_surface = H_5
H_3.periodic_surface = H_6
H_region = -H_1 & +H_4 & -H_2 & +H_3 & +H_5 & -H_6
H_cell = openmc.Cell(fill=graphite)
H_cell.region = H_region & -top_surface & + bot_surface
# Diamond Plank Area
A1_D_cell = openmc.Cell(fill=flibe)
A1_D_cell.region = region_maker('A1', 'D') & -top_surface & + bot_surface
A2_D_cell = openmc.Cell(fill=flibe)
A2_D_cell.region = region_maker('A2', 'D') & -top_surface & + bot_surface
A3_D_cell = openmc.Cell(fill=flibe)
A3_D_cell.region = region_maker('A3', 'D') & -top_surface & + bot_surface
D_regions = A1_D_cell.region | A2_D_cell.region | A3_D_cell.region
D_universe = openmc.Universe(cells=(A1_D_cell, A2_D_cell, A3_D_cell,))
D_areas = openmc.Cell(fill=D_universe, region=D_regions)
H_cell.region &= ~D_regions
# Graphite Planks
all_P_univ = openmc.Universe()
all_P_regions = region_maker('A1', 'P') # initialize
for area in range(3):
area_str = 'A{}'.format(area + 1)
P_region = region_maker(area_str, 'P')
P_cell = openmc.Cell(fill=p_graphite, region=P_region)
P_univ = openmc.Universe(cells=(P_cell,))
for trans in range(6):
P_region_new = P_region.translate(
(trans * T[area_str]['P']['x'], trans * T[area_str]['P']['y'], 0))
P_cell_new = openmc.Cell(fill=P_univ, region=P_region_new)
P_cell_new.translation = (
trans *
T[area_str]['P']['x'],
trans *
T[area_str]['P']['y'],
0)
all_P_univ.add_cell(P_cell_new)
all_P_regions |= P_region_new
D_areas.region &= ~P_region_new
H_cell.region &= ~P_region_new
P_areas = openmc.Cell(
fill=all_P_univ,
region=all_P_regions & -
top_surface & +
bot_surface)
# Triso Particles
spheres = [openmc.Sphere(r=r)
for r in [T_r1, T_r2, T_r3, T_r4, T_r5]]
triso_cells = [openmc.Cell(fill=uoc_9, region=-spheres[0]),
openmc.Cell(fill=por_c, region=+spheres[0] & -spheres[1]),
openmc.Cell(fill=graphite, region=+spheres[1] & -spheres[2]),
openmc.Cell(fill=si_c, region=+spheres[2] & -spheres[3]),
openmc.Cell(fill=graphite, region=+spheres[3] & -spheres[4]),
openmc.Cell(fill=lm_graphite, region=+spheres[4])]
triso_univ = openmc.Universe(cells=triso_cells)
lm_graphite_cell = openmc.Cell(fill=lm_graphite)
lm_graphite_univ = openmc.Universe(cells=(lm_graphite_cell,))
u = triso_univ
lattice = openmc.RectLattice()
lattice.lower_left = (V['A1']['F']['L']['x'], V['A1']['F']['B']
['y'], -(T_pitch / 2 + (z_thickness - 1) / 2 * T_pitch))
lattice.pitch = (T_pitch, T_pitch, T_pitch)
lattice.outer = lm_graphite_univ
lattice_list = []
for z in range(z_thickness):
lattice_z_list = []
for row in range(4):
lattice_y_list = []
for col in range(210):
lattice_y_list.append(u)
lattice_z_list.append(lattice_y_list)
lattice_list.append(lattice_z_list)
lattice.universes = lattice_list
# Fuel Plank
all_F_univ = openmc.Universe()
all_F_regions = region_maker('A1', 'F') # initialize
for area in range(3):
area_str = 'A{}'.format(area + 1)
F_region = region_maker(area_str, 'F')
F_cell = openmc.Cell(fill=lm_graphite,)
F_cell.fill = lattice
F_univ = openmc.Universe(cells=(F_cell,))
for t in range(6):
for x in range(2):
x_trans = t * T[area_str]['P']['x']
y_trans = t * T[area_str]['P']['y']
if x == 1:
x_trans += T[area_str]['F']['x']
y_trans += T[area_str]['F']['y']
F_region_new = F_region.translate((x_trans, y_trans, 0))
F_cell_new = openmc.Cell(fill=F_univ, region=F_region_new)
if area == 1:
F_cell_new.rotation = (0, 0, -120)
if area == 2:
F_cell_new.rotation = (0, 0, 120)
F_cell_new.translation = (x_trans, y_trans, 0)
all_F_univ.add_cell(F_cell_new)
all_F_regions |= F_region_new
P_areas.region &= ~F_region_new
D_areas.region &= ~F_region_new
H_cell.region &= ~F_region_new
F_areas = openmc.Cell(
fill=all_F_univ,
region=all_F_regions & -
top_surface & +
bot_surface)
# Spacer
all_S_univ = openmc.Universe()
S_small_spacer_surf = openmc.ZCylinder(
r=S_small_r,
x0=-D_to_center_width - S_A1_D_gap,
y0=-D_to_center - P_small_gap) # initialize
all_S_regions = -S_small_spacer_surf & + \
plane(V['A1']['P']['T']['m'], V['A1']['P']['T']['x'], V['A1']['P']['T']['y'])
# outer loop is for 3 types of spacers, small top, big middle, small bottom
rad = [S_small_r, S_large_r, S_small_r]
start = [0, 1, 5]
end = [1, 6, 6]
C = ['C', 'C', 'Cb']
for y in range(3):
for area in range(3):
area_str = 'A{}'.format(area + 1)
S_cylinder = openmc.ZCylinder(r=rad[y],
x0=V[area_str]['S'][C[y]]['x0'],
y0=V[area_str]['S'][C[y]]['y0'])
if area == 0:
S_region = -S_cylinder & + \
plane(V[area_str]['P']['T']['m'], V[area_str]['P']['T']['x'], V[area_str]['P']['T']['y'])
if y == 2:
S_region = -S_cylinder & - \
plane(V[area_str]['P']['B']['m'], V[area_str]['P']['B']['x'], V[area_str]['P']['B']['y'])
if area == 1:
S_region = -S_cylinder & - \
plane(V[area_str]['P']['R']['m'], V[area_str]['P']['R']['x'], V[area_str]['P']['R']['y'])
if y == 2:
S_region = -S_cylinder & + \
plane(V[area_str]['P']['L']['m'], V[area_str]['P']['L']['x'], V[area_str]['P']['L']['y'])
if area == 2:
S_region = -S_cylinder & - \
plane(V[area_str]['P']['L']['m'], V[area_str]['P']['L']['x'], V[area_str]['P']['L']['y'])
if y == 2:
S_region = -S_cylinder & + \
plane(V[area_str]['P']['R']['m'], V[area_str]['P']['R']['x'], V[area_str]['P']['R']['y'])
S_cell = openmc.Cell(fill=s_graphite, region=S_region)
S_univ = openmc.Universe(cells=(S_cell,))
for trans in range(start[y], end[y]):
for x in range(2):
x_trans = trans * T[area_str]['P']['x']
y_trans = trans * T[area_str]['P']['y']
if x == 1:
x_trans += T[area_str]['S']['x']
y_trans += T[area_str]['S']['y']
S_region_new = S_region.translate((x_trans, y_trans, 0))
S_cell_new = openmc.Cell(fill=S_univ, region=S_region_new)
S_cell_new.translation = (x_trans, y_trans, 0)
all_S_univ.add_cell(S_cell_new)
all_S_regions |= S_region_new
F_areas.region &= ~S_region_new
P_areas.region &= ~S_region_new
D_areas.region &= ~S_region_new
H_cell.region &= ~S_region_new
S_areas = openmc.Cell(
fill=all_S_univ,
region=all_S_regions & -
top_surface & +
bot_surface)
# Control Rod Slot
A1_CS_cell = openmc.Cell(fill=flibe)
A1_CS_cell.region = region_maker('A1', 'CS') & -top_surface & + bot_surface
A2_CS_cell = openmc.Cell(fill=flibe)
A2_CS_cell.region = region_maker('A2', 'CS') & -top_surface & + bot_surface
A3_CS_cell = openmc.Cell(fill=flibe)
A3_CS_cell.region = region_maker('A3', 'CS') & -top_surface & + bot_surface
CS_regions = A1_CS_cell.region | A2_CS_cell.region | A3_CS_cell.region
CS_universe = openmc.Universe(cells=(A1_CS_cell, A2_CS_cell, A3_CS_cell,))
CS_areas = openmc.Cell(fill=CS_universe, region=CS_regions)
S_areas.region &= ~CS_regions
F_areas.region &= ~CS_regions
P_areas.region &= ~CS_regions
D_areas.region &= ~CS_regions
H_cell.region &= ~CS_regions
# Control Rod Arm
A1_CA_cell = openmc.Cell(fill=flibe)
A1_CA_cell.region = region_maker('A1', 'CA') & -top_surface & + bot_surface
A2_CA_cell = openmc.Cell(fill=flibe)
A2_CA_cell.region = region_maker('A2', 'CA') & -top_surface & + bot_surface
A3_CA_cell = openmc.Cell(fill=flibe)
A3_CA_cell.region = region_maker('A3', 'CA') & -top_surface & + bot_surface
CA_regions = A1_CA_cell.region | A2_CA_cell.region | A3_CA_cell.region
CA_universe = openmc.Universe(cells=(A1_CA_cell, A2_CA_cell, A3_CA_cell,))
CA_areas = openmc.Cell(fill=CA_universe, region=CA_regions)
CS_areas.region &= ~CA_regions
S_areas.region &= ~CA_regions
F_areas.region &= ~CA_regions
P_areas.region &= ~CA_regions
D_areas.region &= ~CA_regions
H_cell.region &= ~CA_regions
# export to xml
root = openmc.Universe(
cells=[
H_cell,
D_areas,
P_areas,
F_areas,
S_areas,
CS_areas,
CA_areas])
geom = openmc.Geometry(root)
geom.export_to_xml()
###############################################################################
# Exporting to OpenMC settings.xml file
##############################################################################
settings = openmc.Settings()
settings.batches = batches
settings.inactive = inactive
settings.particles = particles
settings.temperature = {'multipole': True, 'method': 'interpolation'}
settings.export_to_xml()
###############################################################################
# Exporting to OpenMC tallies.xml file
###############################################################################
if tallies_on:
tallies_generation(root)
else:
print('tallies off')
| 0 | 0 | 0 |
c91bea908263571adb67a8c6dedec582e02dc67f | 119 | py | Python | src/objects/truss/__init__.py | thinkofher/lecter | cb434c1abe9423a1bb80670e8d41ecc63773b42b | [
"MIT"
] | null | null | null | src/objects/truss/__init__.py | thinkofher/lecter | cb434c1abe9423a1bb80670e8d41ecc63773b42b | [
"MIT"
] | null | null | null | src/objects/truss/__init__.py | thinkofher/lecter | cb434c1abe9423a1bb80670e8d41ecc63773b42b | [
"MIT"
] | null | null | null | from .general import TrussBar
from .construction import TrussConstruction
__all__ = ['TrussBar', 'TrussConstruction']
| 23.8 | 43 | 0.806723 | from .general import TrussBar
from .construction import TrussConstruction
__all__ = ['TrussBar', 'TrussConstruction']
| 0 | 0 | 0 |
23796b0848fe955c0ddb64905cf9a0866bd095f3 | 2,605 | py | Python | syncmanagerapi/deploy/create_files.py | Frie-man/syncmanager | f76e36f85ea68ab177a9ffd50dfff033ae0fc8f6 | [
"MIT"
] | null | null | null | syncmanagerapi/deploy/create_files.py | Frie-man/syncmanager | f76e36f85ea68ab177a9ffd50dfff033ae0fc8f6 | [
"MIT"
] | null | null | null | syncmanagerapi/deploy/create_files.py | Frie-man/syncmanager | f76e36f85ea68ab177a9ffd50dfff033ae0fc8f6 | [
"MIT"
] | null | null | null | import os
import configparser
import getpass
from jinja2 import Environment, FileSystemLoader
import socket
import sys
deploy_dir = os.path.dirname(os.path.abspath(__file__))
module_root = os.path.dirname(deploy_dir)
properties_path = module_root + "/application.prod.cfg"
config = configparser.ConfigParser()
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(deploy_dir, 'templates')),
trim_blocks=False)
if os.path.isfile(properties_path):
with open(properties_path, 'r') as propertiesfile:
config_string = '[default_section]\n' + propertiesfile.read()
config.read_string(config_string)
if sys.argv[1] == 'syncmanagerapi.service':
systemd_service_file = sys.argv[1]
install_dir = config['default_section'].get('INSTALL_DIR', '/opt/syncmanagerapi').strip('"\'')
fs_root_dir = config['default_section'].get('FS_ROOT', '/var/syncmanager').strip('"\'')
context = {
'unix_user': config['default_section'].get('UNIX_USER', 'syncman').strip('"\''),
'unix_group': config['default_section'].get('UNIX_USER', 'syncman').strip('"\''),
'install_dir': install_dir,
'server_port': config['default_section'].get('SERVER_PORT', '5010'),
'hostname': config['default_section'].get('HOSTNAME', socket.gethostname()).strip('"\'')
}
conf_file = TEMPLATE_ENVIRONMENT.get_template('{}.j2'.format(systemd_service_file)).render(context)
f = open(os.path.join(deploy_dir, systemd_service_file), 'w')
f.write(conf_file)
f.close()
# generate database init script
if sys.argv[1] == 'init_db.sql':
init_db_file = sys.argv[1]
db_user_name = config['default_section'].get('DB_USER', 'syncmanager').strip('"\'')
# password must be provided, in future this should be replaced by a retrieval from a password vault
passw = getpass.getpass("Provide password for Mysql user {}:".format(db_user_name))
context = {
'db_schema_name': config['default_section'].get('DB_SCHEMA_NAME', 'syncmanerapi').strip('"\''),
'db_user': db_user_name,
'db_user_password': passw
}
conf_file = TEMPLATE_ENVIRONMENT.get_template('{}.j2'.format(init_db_file)).render(context)
f = open(os.path.join(deploy_dir, init_db_file), 'w')
f.write(conf_file)
f.close()
print(f"DB_PASSWORD=\"{passw}\"")
| 47.363636 | 111 | 0.629942 | import os
import configparser
import getpass
from jinja2 import Environment, FileSystemLoader
import socket
import sys
deploy_dir = os.path.dirname(os.path.abspath(__file__))
module_root = os.path.dirname(deploy_dir)
properties_path = module_root + "/application.prod.cfg"
config = configparser.ConfigParser()
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(deploy_dir, 'templates')),
trim_blocks=False)
if os.path.isfile(properties_path):
with open(properties_path, 'r') as propertiesfile:
config_string = '[default_section]\n' + propertiesfile.read()
config.read_string(config_string)
if sys.argv[1] == 'syncmanagerapi.service':
systemd_service_file = sys.argv[1]
install_dir = config['default_section'].get('INSTALL_DIR', '/opt/syncmanagerapi').strip('"\'')
fs_root_dir = config['default_section'].get('FS_ROOT', '/var/syncmanager').strip('"\'')
context = {
'unix_user': config['default_section'].get('UNIX_USER', 'syncman').strip('"\''),
'unix_group': config['default_section'].get('UNIX_USER', 'syncman').strip('"\''),
'install_dir': install_dir,
'server_port': config['default_section'].get('SERVER_PORT', '5010'),
'hostname': config['default_section'].get('HOSTNAME', socket.gethostname()).strip('"\'')
}
conf_file = TEMPLATE_ENVIRONMENT.get_template('{}.j2'.format(systemd_service_file)).render(context)
f = open(os.path.join(deploy_dir, systemd_service_file), 'w')
f.write(conf_file)
f.close()
# generate database init script
if sys.argv[1] == 'init_db.sql':
init_db_file = sys.argv[1]
db_user_name = config['default_section'].get('DB_USER', 'syncmanager').strip('"\'')
# password must be provided, in future this should be replaced by a retrieval from a password vault
passw = getpass.getpass("Provide password for Mysql user {}:".format(db_user_name))
context = {
'db_schema_name': config['default_section'].get('DB_SCHEMA_NAME', 'syncmanerapi').strip('"\''),
'db_user': db_user_name,
'db_user_password': passw
}
conf_file = TEMPLATE_ENVIRONMENT.get_template('{}.j2'.format(init_db_file)).render(context)
f = open(os.path.join(deploy_dir, init_db_file), 'w')
f.write(conf_file)
f.close()
print(f"DB_PASSWORD=\"{passw}\"")
| 0 | 0 | 0 |
03b975e4d0e82a9ce99cd9cd2964182b4a8c7bfc | 128 | py | Python | src/hash_dict.py | erickcan/sound-change-applier | f6c1d75933c4f1269c7b158ace61b95488c84843 | [
"MIT"
] | 2 | 2021-04-20T14:30:59.000Z | 2021-10-03T18:58:26.000Z | src/hash_dict.py | erickcan/sound-change-applier | f6c1d75933c4f1269c7b158ace61b95488c84843 | [
"MIT"
] | null | null | null | src/hash_dict.py | erickcan/sound-change-applier | f6c1d75933c4f1269c7b158ace61b95488c84843 | [
"MIT"
] | null | null | null | __all__ = ["HashableDict"]
| 18.285714 | 48 | 0.664063 | __all__ = ["HashableDict"]
class HashableDict(dict):
def __hash__(self):
return hash(tuple(sorted(self.items())))
| 47 | 4 | 49 |
4ab1e637733e87745a814f217155d6d476bf867c | 1,437 | py | Python | src/fermulerpy/elementary/pythagorean_triangles.py | sumit-158/fermulerpy | f24dfb0c4648db7115e5793cfac4fd2bdd5694f4 | [
"MIT"
] | 7 | 2021-05-09T15:45:58.000Z | 2021-09-21T16:45:17.000Z | src/fermulerpy/elementary/pythagorean_triangles.py | sumit-158/fermulerpy | f24dfb0c4648db7115e5793cfac4fd2bdd5694f4 | [
"MIT"
] | 50 | 2021-05-09T09:24:47.000Z | 2022-01-30T09:00:19.000Z | src/fermulerpy/elementary/pythagorean_triangles.py | sumit-158/fermulerpy | f24dfb0c4648db7115e5793cfac4fd2bdd5694f4 | [
"MIT"
] | 6 | 2021-05-05T09:55:13.000Z | 2022-01-25T07:34:40.000Z | import math
import warnings
def is_pythagorean_triplet(a,b,c):
"""
Checks if a,b and c forms pythagorean triplet i.e., a^2 + b^2 = c^2
Parameters
----------
a : int
denotes positive integer a in a^2 + b^2 = c^2
b : int
denotes positive integer b in a^2 + b^2 = c^2
c : int
denotes positive integer c in a^2 + b^2 = c^2
return : bool
returns true if a, b and c forms pythagorean triplet otherwise false
"""
if(a<1 or a!=int(a) or b<1 or b!=int(b) or c<1 or c!=int(c)):
raise ValueError(
"a,b and c are positive integers"
)
count = 0
if(a%2 == 0):
count += 1
if(b%2 == 0):
count += 1
if(count!=1):
return False
if(c%2 == 0):
return False
return (a**2) + (b**2) == (c**2)
def generate_pythagorean_triplet(m , n):
"""
Generates pythagorean triplets from the given two integers
Parameters
----------
m : int
denotes positive integer
n : int
denotes positive integer
return : 3 int
returns three positive integers
"""
if(m<1 or m!=int(m) or n<1 or n!=int(n)):
raise ValueError(
"m and n must be positive integers"
)
if(m<=n):
raise ValueError(
"m must be greater than n"
)
a = 2*m*n
b = (m**2) - (n**2)
c = (m**2) + (n**2)
return a,b,c
| 23.177419 | 76 | 0.511482 | import math
import warnings
def is_pythagorean_triplet(a,b,c):
"""
Checks if a,b and c forms pythagorean triplet i.e., a^2 + b^2 = c^2
Parameters
----------
a : int
denotes positive integer a in a^2 + b^2 = c^2
b : int
denotes positive integer b in a^2 + b^2 = c^2
c : int
denotes positive integer c in a^2 + b^2 = c^2
return : bool
returns true if a, b and c forms pythagorean triplet otherwise false
"""
if(a<1 or a!=int(a) or b<1 or b!=int(b) or c<1 or c!=int(c)):
raise ValueError(
"a,b and c are positive integers"
)
count = 0
if(a%2 == 0):
count += 1
if(b%2 == 0):
count += 1
if(count!=1):
return False
if(c%2 == 0):
return False
return (a**2) + (b**2) == (c**2)
def generate_pythagorean_triplet(m , n):
"""
Generates pythagorean triplets from the given two integers
Parameters
----------
m : int
denotes positive integer
n : int
denotes positive integer
return : 3 int
returns three positive integers
"""
if(m<1 or m!=int(m) or n<1 or n!=int(n)):
raise ValueError(
"m and n must be positive integers"
)
if(m<=n):
raise ValueError(
"m must be greater than n"
)
a = 2*m*n
b = (m**2) - (n**2)
c = (m**2) + (n**2)
return a,b,c
| 0 | 0 | 0 |
6bf009478bce813807143deee7b1aaba09a87d90 | 6,900 | py | Python | applications/compare_cumulative_psf.py | gammasim/gammasim-tools | 0b746254916f4c2e2a3fbd1854c565c3bc90d493 | [
"BSD-3-Clause"
] | 5 | 2020-06-02T09:46:38.000Z | 2022-03-26T16:42:26.000Z | applications/compare_cumulative_psf.py | gammasim/gammasim-tools | 0b746254916f4c2e2a3fbd1854c565c3bc90d493 | [
"BSD-3-Clause"
] | 166 | 2020-04-24T10:22:16.000Z | 2022-03-31T12:51:02.000Z | applications/compare_cumulative_psf.py | gammasim/gammasim-tools | 0b746254916f4c2e2a3fbd1854c565c3bc90d493 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
'''
Summary
-------
This application simulates the cumulative PSF and compare with data (if available).
The telescope zenith angle and the source distance can be set by command line arguments.
The measured cumulative PSF should be provided by using the command line argument data. \
A file name is expected, in which the file should contains 3 columns: radial distance in mm, \
differential value of photon intensisity and its integral value.
The MC model can be changed by providing a yaml file with the new parameter values using \
the argument pars (see example below).
Examples of the plots generated by this applications are shown below. On the left, \
the cumulative PSF and on the right, the simulated PSF image.
.. _compare_cumulative_psf_plot:
.. image:: images/compare_cumulative_psf_North-LST-1_cumulativePSF.png
:width: 49 %
.. image:: images/compare_cumulative_psf_North-LST-1_image.png
:width: 49 %
Command line arguments
----------------------
site (str, required)
North or South.
telescope (str, required)
Telescope model name (e.g. LST-1, SST-D, ...).
model_version (str, optional)
Model version (default=prod4).
src_distance (float, optional)
Source distance in km (default=10).
zenith (float, optional)
Zenith angle in deg (default=20).
data (str, optional)
Name of the data file with the measured cumulative PSF.
pars (str, optional)
Yaml file with the new model parameters to replace the default ones.
test (activation mode, optional)
If activated, application will be faster by simulating fewer photons.
verbosity (str, optional)
Log level to print (default=INFO).
Example
-------
LST-1 Prod5
Runtime < 1 min.
First, create an yml file named lst_pars.yml with the following content:
.. code-block:: yaml
mirror_reflection_random_angle: '0.0075,0.15,0.035'
mirror_align_random_horizontal: '0.0040,28.,0.0,0.0'
mirror_align_random_vertical: '0.0040,28.,0.0,0.0'
And the run:
.. code-block:: console
python applications/compare_cumulative_psf.py --site North --telescope LST-1 --model_version prod4 --pars lst_pars.yml --data PSFcurve_data_v2.txt
.. todo::
* Change default model to default (after this feature is implemented in db_handler)
'''
import logging
import matplotlib.pyplot as plt
import argparse
import yaml
from collections import OrderedDict
import numpy as np
import astropy.units as u
import simtools.io_handler as io
import simtools.util.general as gen
import simtools.config as cfg
from simtools.ray_tracing import RayTracing
from simtools.model.telescope_model import TelescopeModel
from simtools import visualize
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Calculate and plot the PSF and eff. mirror area as a function of off-axis angle '
'of the telescope requested.'
)
)
parser.add_argument(
'-s',
'--site',
help='North or South',
type=str,
required=True
)
parser.add_argument(
'-t',
'--telescope',
help='Telescope model name (e.g. MST-FlashCam-D, LST-1)',
type=str,
required=True
)
parser.add_argument(
'-m',
'--model_version',
help='Model version (default=prod4)',
type=str,
default='prod4'
)
parser.add_argument(
'--src_distance',
help='Source distance in km (default=10)',
type=float,
default=10
)
parser.add_argument(
'--zenith',
help='Zenith angle in deg (default=20)',
type=float,
default=20
)
parser.add_argument(
'--data',
help='Data file name with the measured PSF vs radius [cm]',
type=str
)
parser.add_argument(
'--pars',
help='Yaml file with the model parameters to be replaced',
type=str
)
parser.add_argument(
'--test',
help='Test option will be faster by simulating fewer photons.',
action='store_true'
)
parser.add_argument(
'-v',
'--verbosity',
dest='logLevel',
action='store',
default='info',
help='Log level to print (default is INFO)'
)
args = parser.parse_args()
label = 'compare_cumulative_psf'
logger = logging.getLogger()
logger.setLevel(gen.getLogLevelFromUser(args.logLevel))
# Output directory to save files related directly to this app
outputDir = io.getApplicationOutputDirectory(cfg.get('outputLocation'), label)
telModel = TelescopeModel(
site=args.site,
telescopeModelName=args.telescope,
modelVersion=args.model_version,
label=label
)
# New parameters
if args.pars is not None:
with open(args.pars) as file:
newPars = yaml.load(file, Loader=yaml.FullLoader)
telModel.changeParameters(**newPars)
ray = RayTracing.fromKwargs(
telescopeModel=telModel,
sourceDistance=args.src_distance * u.km,
zenithAngle=args.zenith * u.deg,
offAxisAngle=[0. * u.deg]
)
ray.simulate(test=args.test, force=False)
ray.analyze(force=False)
# Plotting cumulative PSF
im = ray.images()[0]
print('d80 in cm = {}'.format(im.getPSF()))
# Plotting cumulative PSF
dataToPlot = OrderedDict()
dataToPlot[r'sim$\_$telarray'] = im.getCumulativeData()
if args.data is not None:
dataFile = cfg.findFile(args.data)
dataToPlot['measured'] = loadData(dataFile)
plt = visualize.plot1D(dataToPlot)
plt.gca().set_ylim(0, 1.05)
plotFileName = label + '_' + telModel.name + '_cumulativePSF'
plotFile = outputDir.joinpath(plotFileName)
for f in ['pdf', 'png']:
plt.savefig(str(plotFile) + '.' + f, format=f, bbox_inches='tight')
plt.clf()
# Plotting image
dataToPlot = im.getImageData()
visualize.plotHist2D(dataToPlot, bins=80)
circle = plt.Circle((0, 0), im.getPSF(0.8) / 2, color='k', fill=False, lw=2, ls='--')
plt.gca().add_artist(circle)
plotFileName = label + '_' + telModel.name + '_image'
plotFile = outputDir.joinpath(plotFileName)
for f in ['pdf', 'png']:
plt.savefig(str(plotFile) + '.' + f, format=f, bbox_inches='tight')
plt.clf()
| 30.131004 | 154 | 0.638551 | #!/usr/bin/python3
'''
Summary
-------
This application simulates the cumulative PSF and compare with data (if available).
The telescope zenith angle and the source distance can be set by command line arguments.
The measured cumulative PSF should be provided by using the command line argument data. \
A file name is expected, in which the file should contains 3 columns: radial distance in mm, \
differential value of photon intensisity and its integral value.
The MC model can be changed by providing a yaml file with the new parameter values using \
the argument pars (see example below).
Examples of the plots generated by this applications are shown below. On the left, \
the cumulative PSF and on the right, the simulated PSF image.
.. _compare_cumulative_psf_plot:
.. image:: images/compare_cumulative_psf_North-LST-1_cumulativePSF.png
:width: 49 %
.. image:: images/compare_cumulative_psf_North-LST-1_image.png
:width: 49 %
Command line arguments
----------------------
site (str, required)
North or South.
telescope (str, required)
Telescope model name (e.g. LST-1, SST-D, ...).
model_version (str, optional)
Model version (default=prod4).
src_distance (float, optional)
Source distance in km (default=10).
zenith (float, optional)
Zenith angle in deg (default=20).
data (str, optional)
Name of the data file with the measured cumulative PSF.
pars (str, optional)
Yaml file with the new model parameters to replace the default ones.
test (activation mode, optional)
If activated, application will be faster by simulating fewer photons.
verbosity (str, optional)
Log level to print (default=INFO).
Example
-------
LST-1 Prod5
Runtime < 1 min.
First, create an yml file named lst_pars.yml with the following content:
.. code-block:: yaml
mirror_reflection_random_angle: '0.0075,0.15,0.035'
mirror_align_random_horizontal: '0.0040,28.,0.0,0.0'
mirror_align_random_vertical: '0.0040,28.,0.0,0.0'
And the run:
.. code-block:: console
python applications/compare_cumulative_psf.py --site North --telescope LST-1 --model_version prod4 --pars lst_pars.yml --data PSFcurve_data_v2.txt
.. todo::
* Change default model to default (after this feature is implemented in db_handler)
'''
import logging
import matplotlib.pyplot as plt
import argparse
import yaml
from collections import OrderedDict
import numpy as np
import astropy.units as u
import simtools.io_handler as io
import simtools.util.general as gen
import simtools.config as cfg
from simtools.ray_tracing import RayTracing
from simtools.model.telescope_model import TelescopeModel
from simtools import visualize
def loadData(datafile):
dType = {
'names': ('Radius [cm]', 'Relative intensity'),
'formats': ('f8', 'f8')
}
# testDataFile = io.getTestDataFile('PSFcurve_data_v2.txt')
data = np.loadtxt(datafile, dtype=dType, usecols=(0, 2))
data['Radius [cm]'] *= 0.1
data['Relative intensity'] /= np.max(np.abs(data['Relative intensity']))
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Calculate and plot the PSF and eff. mirror area as a function of off-axis angle '
'of the telescope requested.'
)
)
parser.add_argument(
'-s',
'--site',
help='North or South',
type=str,
required=True
)
parser.add_argument(
'-t',
'--telescope',
help='Telescope model name (e.g. MST-FlashCam-D, LST-1)',
type=str,
required=True
)
parser.add_argument(
'-m',
'--model_version',
help='Model version (default=prod4)',
type=str,
default='prod4'
)
parser.add_argument(
'--src_distance',
help='Source distance in km (default=10)',
type=float,
default=10
)
parser.add_argument(
'--zenith',
help='Zenith angle in deg (default=20)',
type=float,
default=20
)
parser.add_argument(
'--data',
help='Data file name with the measured PSF vs radius [cm]',
type=str
)
parser.add_argument(
'--pars',
help='Yaml file with the model parameters to be replaced',
type=str
)
parser.add_argument(
'--test',
help='Test option will be faster by simulating fewer photons.',
action='store_true'
)
parser.add_argument(
'-v',
'--verbosity',
dest='logLevel',
action='store',
default='info',
help='Log level to print (default is INFO)'
)
args = parser.parse_args()
label = 'compare_cumulative_psf'
logger = logging.getLogger()
logger.setLevel(gen.getLogLevelFromUser(args.logLevel))
# Output directory to save files related directly to this app
outputDir = io.getApplicationOutputDirectory(cfg.get('outputLocation'), label)
telModel = TelescopeModel(
site=args.site,
telescopeModelName=args.telescope,
modelVersion=args.model_version,
label=label
)
# New parameters
if args.pars is not None:
with open(args.pars) as file:
newPars = yaml.load(file, Loader=yaml.FullLoader)
telModel.changeParameters(**newPars)
ray = RayTracing.fromKwargs(
telescopeModel=telModel,
sourceDistance=args.src_distance * u.km,
zenithAngle=args.zenith * u.deg,
offAxisAngle=[0. * u.deg]
)
ray.simulate(test=args.test, force=False)
ray.analyze(force=False)
# Plotting cumulative PSF
im = ray.images()[0]
print('d80 in cm = {}'.format(im.getPSF()))
# Plotting cumulative PSF
dataToPlot = OrderedDict()
dataToPlot[r'sim$\_$telarray'] = im.getCumulativeData()
if args.data is not None:
dataFile = cfg.findFile(args.data)
dataToPlot['measured'] = loadData(dataFile)
plt = visualize.plot1D(dataToPlot)
plt.gca().set_ylim(0, 1.05)
plotFileName = label + '_' + telModel.name + '_cumulativePSF'
plotFile = outputDir.joinpath(plotFileName)
for f in ['pdf', 'png']:
plt.savefig(str(plotFile) + '.' + f, format=f, bbox_inches='tight')
plt.clf()
# Plotting image
dataToPlot = im.getImageData()
visualize.plotHist2D(dataToPlot, bins=80)
circle = plt.Circle((0, 0), im.getPSF(0.8) / 2, color='k', fill=False, lw=2, ls='--')
plt.gca().add_artist(circle)
plotFileName = label + '_' + telModel.name + '_image'
plotFile = outputDir.joinpath(plotFileName)
for f in ['pdf', 'png']:
plt.savefig(str(plotFile) + '.' + f, format=f, bbox_inches='tight')
plt.clf()
| 359 | 0 | 23 |
f39b89011e1d55544b55f13ce508a86b357cd1e2 | 4,305 | py | Python | autopxd/__init__.py | elijahr/python-autopxd2 | 4675488de8227390f87e864c7da84d7ffcf32db7 | [
"MIT"
] | 25 | 2018-04-02T22:28:06.000Z | 2021-10-07T20:11:42.000Z | autopxd/__init__.py | elijahr/python-autopxd2 | 4675488de8227390f87e864c7da84d7ffcf32db7 | [
"MIT"
] | 23 | 2019-01-21T17:50:48.000Z | 2021-12-14T16:24:59.000Z | autopxd/__init__.py | elijahr/python-autopxd2 | 4675488de8227390f87e864c7da84d7ffcf32db7 | [
"MIT"
] | 5 | 2019-03-03T22:48:48.000Z | 2021-09-26T19:37:22.000Z | import os
import platform
import subprocess
import sys
import click
from pycparser import (
c_parser,
)
from .declarations import (
BUILTIN_HEADERS_DIR,
DARWIN_HEADERS_DIR,
IGNORE_DECLARATIONS,
)
from .writer import (
AutoPxd,
)
__version__ = "2.0.4"
def ensure_binary(s, encoding="utf-8", errors="strict"):
"""Coerce **s** to bytes.
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, str):
return s.encode(encoding, errors)
if isinstance(s, bytes):
return s
raise TypeError("not expecting type '%s'" % type(s))
def translate(code, hdrname, extra_cpp_args=None, whitelist=None, debug=False):
"""
to generate pxd mappings for only certain files, populate the whitelist parameter
with the filenames (including relative path):
whitelist = ['/usr/include/baz.h', 'include/tux.h']
if the input file is a file that we want in the whitelist, i.e. `whitelist = [hdrname]`,
the following extra step is required:
extra_cpp_args += [hdrname]
"""
if extra_cpp_args is None:
extra_cpp_args = []
extra_incdir = os.path.dirname(hdrname)
if extra_incdir:
extra_cpp_args += ["-I%s" % extra_incdir]
p = AutoPxd(hdrname)
p.visit(parse(code, extra_cpp_args=extra_cpp_args, whitelist=whitelist, debug=debug))
pxd_string = ""
if p.stdint_declarations:
pxd_string += "from libc.stdint cimport {:s}\n\n".format(", ".join(p.stdint_declarations))
pxd_string += str(p)
return pxd_string
WHITELIST = []
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.command(
context_settings=CONTEXT_SETTINGS,
help="Generate a Cython pxd file from a C header file.",
)
@click.option("--version", "-v", is_flag=True, help="Print program version and exit.")
@click.option(
"--include-dir",
"-I",
multiple=True,
metavar="<dir>",
help="Allow the C preprocessor to search for files in <dir>.",
)
@click.option(
"--compiler-directive",
"-D",
multiple=True,
help="Additional directives for the C compiler.",
metavar="<directive>",
)
@click.option(
"--debug/--no-debug",
default=False,
help="Dump preprocessor output to stderr.",
)
@click.argument(
"infile",
type=click.File("r"),
default=sys.stdin,
)
@click.argument(
"outfile",
type=click.File("w"),
default=sys.stdout,
)
| 26.574074 | 98 | 0.627875 | import os
import platform
import subprocess
import sys
import click
from pycparser import (
c_parser,
)
from .declarations import (
BUILTIN_HEADERS_DIR,
DARWIN_HEADERS_DIR,
IGNORE_DECLARATIONS,
)
from .writer import (
AutoPxd,
)
__version__ = "2.0.4"
def ensure_binary(s, encoding="utf-8", errors="strict"):
"""Coerce **s** to bytes.
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, str):
return s.encode(encoding, errors)
if isinstance(s, bytes):
return s
raise TypeError("not expecting type '%s'" % type(s))
def preprocess(code, extra_cpp_args=None, debug=False):
if extra_cpp_args is None:
extra_cpp_args = []
if platform.system() == "Darwin":
cmd = ["clang", "-E", "-I%s" % DARWIN_HEADERS_DIR]
else:
cmd = ["cpp"]
cmd += (
[
"-nostdinc",
"-D__attribute__(x)=",
"-D__extension__=",
"-D__inline=",
"-D__asm=",
"-I%s" % BUILTIN_HEADERS_DIR,
]
+ extra_cpp_args
+ ["-"]
)
with subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
result = [proc.communicate(input=ensure_binary(code))[0]]
while proc.poll() is None:
result.append(proc.communicate()[0])
if proc.returncode:
raise Exception("Invoking C preprocessor failed")
res = b"".join(result).decode("utf-8")
if debug:
sys.stderr.write(res)
return res.replace("\r\n", "\n")
def parse(code, extra_cpp_args=None, whitelist=None, debug=False):
if extra_cpp_args is None:
extra_cpp_args = []
preprocessed = preprocess(code, extra_cpp_args=extra_cpp_args, debug=debug)
parser = c_parser.CParser()
ast = parser.parse(preprocessed)
decls = []
for decl in ast.ext:
if not hasattr(decl, "name") or decl.name not in IGNORE_DECLARATIONS:
if not whitelist or decl.coord.file in whitelist:
decls.append(decl)
ast.ext = decls
return ast
def translate(code, hdrname, extra_cpp_args=None, whitelist=None, debug=False):
"""
to generate pxd mappings for only certain files, populate the whitelist parameter
with the filenames (including relative path):
whitelist = ['/usr/include/baz.h', 'include/tux.h']
if the input file is a file that we want in the whitelist, i.e. `whitelist = [hdrname]`,
the following extra step is required:
extra_cpp_args += [hdrname]
"""
if extra_cpp_args is None:
extra_cpp_args = []
extra_incdir = os.path.dirname(hdrname)
if extra_incdir:
extra_cpp_args += ["-I%s" % extra_incdir]
p = AutoPxd(hdrname)
p.visit(parse(code, extra_cpp_args=extra_cpp_args, whitelist=whitelist, debug=debug))
pxd_string = ""
if p.stdint_declarations:
pxd_string += "from libc.stdint cimport {:s}\n\n".format(", ".join(p.stdint_declarations))
pxd_string += str(p)
return pxd_string
WHITELIST = []
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.command(
context_settings=CONTEXT_SETTINGS,
help="Generate a Cython pxd file from a C header file.",
)
@click.option("--version", "-v", is_flag=True, help="Print program version and exit.")
@click.option(
"--include-dir",
"-I",
multiple=True,
metavar="<dir>",
help="Allow the C preprocessor to search for files in <dir>.",
)
@click.option(
"--compiler-directive",
"-D",
multiple=True,
help="Additional directives for the C compiler.",
metavar="<directive>",
)
@click.option(
"--debug/--no-debug",
default=False,
help="Dump preprocessor output to stderr.",
)
@click.argument(
"infile",
type=click.File("r"),
default=sys.stdin,
)
@click.argument(
"outfile",
type=click.File("w"),
default=sys.stdout,
)
def cli(
version,
infile,
outfile,
include_dir,
compiler_directive,
debug,
):
if version:
print(__version__)
return
extra_cpp_args = ["-D%s" % directive for directive in compiler_directive]
for directory in include_dir:
extra_cpp_args += ["-I%s" % directory]
outfile.write(translate(infile.read(), infile.name, extra_cpp_args, debug=debug))
| 1,818 | 0 | 68 |
b82a0e214234e21b205794994a814b108f285609 | 16,244 | py | Python | get_spectrum.py | drvdputt/dust_fuse_h2 | 3bff87d1cb475abd20f4426e18412379aa3ca991 | [
"BSD-3-Clause"
] | null | null | null | get_spectrum.py | drvdputt/dust_fuse_h2 | 3bff87d1cb475abd20f4426e18412379aa3ca991 | [
"BSD-3-Clause"
] | null | null | null | get_spectrum.py | drvdputt/dust_fuse_h2 | 3bff87d1cb475abd20f4426e18412379aa3ca991 | [
"BSD-3-Clause"
] | null | null | null | """Tools for getting spectra for lya fitting.
Includes choosing a data file for each star, reading the files, and
processing the spectral data (from either IUE, STIS, ...) into a format
that can be used directly for the fitting.
The variable target_use_which_spectrum indicates which data to use for
each star. It can be customized by editing this file. Running this
module directly will print out the default value for this dictionary.
"""
from astropy.table import Table
from astropy.io import fits
import numpy as np
from pathlib import Path
from warnings import warn
from scipy.interpolate import interp1d
import collections
# \(swp[0-9]\{5\}\)
# can be manually tweaked. If the value is a list or contains *, the
# spectra will be coadded
target_use_which_spectrum = {
"HD097471": "data/HD097471/mastDownload/IUE/swp19375/swp19375mxlo_vo.fits",
"HD037525": "data/HD037525/mastDownload/IUE/swp27579/swp27579.mxhi.gz",
"HD093827": "data/HD093827/mastDownload/IUE/swp50536/swp50536.mxhi.gz",
# "HD093827": "data/HD093827/*mxlo_vo.fits",
"HD051013": "data/HD051013/mastDownload/IUE/swp22860/swp22860.mxhi.gz",
"HD096675": "data/HD096675/mastDownload/IUE/swp41717/swp41717.mxhi.gz",
"HD023060": "data/HD023060/mastDownload/IUE/swp11151/swp11151mxlo_vo.fits",
"HD099872": "data/HD099872/mastDownload/HST/**/*_x1d.fits",
# "HD152248": "data/HD152248/mastDownload/IUE/swp54576/swp54576.mxhi.gz",
"HD152248": "data/HD152248/**/*.mxhi.gz",
"HD209339": "data/HD209339/mastDownload/HST/**/*_x1d.fits",
# "HD197770": "data/HD197770/mastDownload/HST/oedl04010/oedl04010_x1d.fits",
"HD197770": "data/HD197770/**/*.mxhi.gz",
"HD037332": "data/HD037332/mastDownload/IUE/swp32289/swp32289.mxhi.gz",
"HD093028": "data/HD093028/mastDownload/IUE/swp05521/swp05521.mxhi.gz",
# "HD062542": "data/HD062542/mastDownload/HST/obik01020/obik01020_x1d.fits", # wavelength range
# "HD062542": "data/HD062542/*.mxhi.gz", # way too noisy
"HD062542": "data/HD062542/**/*mxlo_vo.fits",
# "HD190603": "data/HD190603/*.mxhi.gz",
"HD190603": "data/HD190603/**/*mxlo_vo.fits",
# "HD046202": "data/HD046202/mastDownload/IUE/swp08845/swp08845.mxhi.gz",
# "HD046202": "data/HD046202/mastDownload/HST/ocb6e0030/ocb6e0030_x1d.fits",
# "HD046202": "data/HD046202/mastDownload/HST/ocb6e1030/ocb6e1030_x1d.fits",
"HD046202": "data/HD046202/mastDownload/HST/**/*_x1d.fits",
# "HD047129": "data/HD047129/mastDownload/IUE/swp07077/swp07077.mxhi.gz",
"HD047129": "data/HD047129/**/*.mxhi.gz",
"HD235874": "data/HD235874/mastDownload/IUE/swp34158/swp34158mxlo_vo.fits",
"HD216898": "data/HD216898/swp43934.mxhi.gz",
# "HD216898": "data/HD216898/mastDownload/IUE/swp17175/swp17175mxlo_vo.fits",
"HD326329": "data/HD326329/mastDownload/IUE/swp48698/swp48698.mxhi.gz",
"HD179406": [
"data/HD179406/mastDownload/IUE/swp08974/swp08974.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp08976/swp08976.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp13865/swp13865.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp36939/swp36939.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp36940/swp36940.mxhi.gz",
],
"BD+52d3210": "data/BD+52d3210/mastDownload/IUE/swp34153/swp34153mxlo_vo.fits",
"BD+56d524": "data/BD+56d524/mastDownload/IUE/swp20330/swp20330mxlo_vo.fits",
# data for comparison to existing HI results
"HD094493": "data/HD094493/mastDownload/HST/o54306010/o54306010_x1d.fits",
"HD045314": "data/HD045314/mastDownload/IUE/**/*mxhi.gz"
}
# namedtuple defines a simple class
Spectrum = collections.namedtuple(
"Spectrum", ["wavs", "flux", "errs", "net", "exptime"]
)
def processed(target, wmin=0, wmax=1400, disp=0.25):
"""Get spectrum data ready for fitting Lya for the given target.
Tweak the variable get_spectrum.target_use_which_spectrum to choose
the right data. Depending on whether a IUE or STIS spectrum was
chosen, different steps will be taken. The end result is the
spectral data in a common format, processed with different steps
depending on the source of the data.
Returns
-------
wav, flux: ndarray of wavelengths (angstrom) and fluxes (erg s-1 cm-2 angstrom-1)
"""
# choose data
filename = target_use_which_spectrum[target]
print("Getting data from ", filename)
spectrum, rebin = auto_wavs_flux_errs(filename)
if rebin:
binnedwavs, binnedflux = rebin_spectrum_around_lya(spectrum, wmin, wmax, disp)
else:
wavs, flux = spectrum.wavs, spectrum.flux
use = np.logical_and(wmin < wavs, wavs < wmax)
binnedwavs, binnedflux = wavs[use], flux[use]
# remove nans (these are very annoying when they propagate, e.g.
# max([array with nan]) = nan).
safe = np.isfinite(binnedflux)
safewavs = binnedwavs[safe]
safeflux = binnedflux[safe]
return safewavs, safeflux, filename
def auto_wavs_flux_errs(filename):
"""Load spectrum or multiple spectra based on file name."""
# determine if multiple files were provided. If a glob pattern was provided, this counts as
if isinstance(filename, list):
to_be_coadded = filename
elif isinstance(filename, str):
if "*" in filename:
to_be_coadded = [str(p) for p in Path(".").glob(filename)]
elif "x1d" in filename:
# a single x1d file can contain multiple extensions, which
# need to be coadded
to_be_coadded = [filename]
else:
to_be_coadded = None
else:
warn("filename should be str or list!")
raise
if to_be_coadded is None:
if "x1d" in filename:
spectrum = merged_stis_data(filename)
rebin = True
elif "mxhi" in filename:
spectrum = merged_iue_h_data(filename)
rebin = True
elif "mxlo" in filename:
spectrum = iue_l_data(filename)
rebin = False
else:
warn("File {} not supported yet, exiting".format(filename))
exit()
else:
if "x1d" in to_be_coadded[0]:
spectrum = coadd_hst_stis(to_be_coadded)
rebin = True
elif "mxhi" in to_be_coadded[0]:
spectrum = coadd_iue_h(to_be_coadded)
rebin = True
elif "mxlo" in to_be_coadded[0]:
spectrum = coadd_iue_l(to_be_coadded)
rebin = False
return spectrum, rebin
def merged_stis_data(filename, extension=1):
"""Get spectrum data from all STIS spectral orders.
If only filename is given, use SCI extension.
Returns
-------
wavs: numpy array, all wavelengths, sorted
flux: all fluxes at these wavelengths
errs: all errors at these wavelengths
"""
with fits.open(filename) as f:
t = f[extension].data
exptime = get_exptime(f[extension].header)
output_columns = ["WAVELENGTH", "FLUX", "ERROR", "NET"]
fields = [np.concatenate(t[c]) for c in output_columns]
# clean up by dq
dq = np.concatenate(t["DQ"])
good = dq == 0
print(f"STIS: {good.sum()} out of {len(good)} wavelength points are good")
fields = [c[good] for c in fields]
# sort by wavelength
idxs = np.argsort(fields[0])
fields = [c[idxs] for c in fields]
# add exptime and create Spectrum (namedtuple) object (* unpacks,
# should be in right order)
fields.append(exptime)
return Spectrum(*fields)
def merged_iue_h_data(filename):
"""
Get Spectrumn info over all orders of high res IUE data.
Returns
-------
Spectrum
"""
t = Table.read(filename)
allwavs = np.concatenate([iue_wavs(i) for i in range(len(t))])
colnames = ["WAVELENGTH", "ABS_CAL", "NOISE", "NET"]
column_values = [allwavs]
for colname in colnames[1:]:
column_values.append(all_of_column(colname))
# clean up using DQ
dq = all_of_column("QUALITY")
good = dq == 0
print(f"IUE: {good.sum()} out of {len(good)} wavelength points are good")
for array in column_values:
array = array[good]
# sort by wavelength
idxs = np.argsort(column_values[0])
column_values = [c[idxs] for c in column_values]
# add exptime and create Spectrum
exptime = get_exptime(fits.getheader(filename, ext=0))
fields = column_values + [exptime]
return Spectrum(*fields)
def coadd_general(spectrums):
"""General function for coadding spectra.
spectrums : list of Spectrum objects
Returns
-------
spectrum : Spectrum object representing the coadded data
"""
# get all the per-wavelength data
all_wavs = [s.wavs for s in spectrums]
# determine new wavelength grid, using max of median of wavelength
# increment as step size
maxwav = np.amax(np.concatenate(all_wavs))
minwav = np.amin(np.concatenate(all_wavs))
disp = np.amax([np.median(np.diff(w)) for w in all_wavs])
newwavs = np.arange(minwav, maxwav, disp)
# instead of binning, we're just going to do nearest neighbour on a
# slightly coarser wavelength grid. It worked for Julia, so...
flux_sum = np.zeros(len(newwavs))
weight_sum = np.zeros(len(newwavs))
variance_sum = np.zeros(len(newwavs))
net_sum = np.zeros(len(newwavs))
total_exptime = np.zeros(len(newwavs))
for s in spectrums:
# nearest neighbour interpolation of all relevant quantities
fi = do_interp1d(s.flux)
ei = do_interp1d(s.errs)
ni = do_interp1d(s.net)
exptime = s.exptime
# weights scale with ni / fi = sensitivity
good_fi_ni = (fi != 0) & np.isfinite(fi) & (ni != 0) & np.isfinite(ni)
wi = np.where(good_fi_ni, ni / fi, 0) * exptime
good_wi = wi > 0
# total_counts = flux * sensitivity * exptime
# --> flux = total_counts / (sensitivity * exptime)
#
# V(flux) = V(total_counts) / (sensitivity * exptime)**2
# = total_counts / (sensitivity * exptime)**2 (poisson)
# = flux * sensitivity * exptime / (sensitivity * exptime)**2
# = flux / (sensitivity * exptime)
# sens = counts per flux unit
weight_sum[good_wi] += wi[good_wi]
flux_sum[good_wi] += wi[good_wi] * fi[good_wi]
variance_sum[good_wi] += np.square(ei[good_wi] * wi[good_wi])
net_sum[good_wi] += ni[good_wi] * exptime
total_exptime[good_wi] += exptime
flux_result = flux_sum / weight_sum
errs_result = np.sqrt(variance_sum) / weight_sum
net_result = net_sum / total_exptime
return Spectrum(newwavs, flux_result, errs_result, net_result, total_exptime)
def rebin_spectrum_around_lya(spectrum, wmin=0, wmax=1400, disp=0.25):
"""Rebin spectrum to for lya fitting, and reject certain points.
A rebinning of the spectrum to make it more useful for lya fitting.
Every new point is the weighted average of all data within the range
of a bin. The weights are flux / net * exptime if those are
available. If not 1 / errs**2 is used. The bins can be specified by
choosing a minimum, maximum wavelength and a resolution (in
Angstrom). Additionally, only the points that satisfy some basic
data rejection criteria are used.
Returns
-------
newwavs: average wavelength in each bin
newflux: average flux in each bin
"""
wavs = spectrum.wavs
flux = spectrum.flux
wavmin = max(wmin, np.amin(wavs))
wavmax = min(wmax, np.amax(wavs))
wavbins = np.arange(wavmin, wavmax, disp)
if spectrum.net is not None and spectrum.exptime is not None:
weights = spectrum.net / flux * spectrum.exptime
else:
weights = 1 / spectrum.errs ** 2
# np.digitize returns list of indices. b = 1 means that the data point
# is between wav[0] (first) and wav[1]. b = n-1 means between wav[n-2]
# and wav[n-1] (last). b = 0 or n mean out of range.
bs = np.digitize(wavs, wavbins)
newwavs = np.zeros(len(wavbins) - 1)
newflux = np.zeros(len(wavbins) - 1)
for i in range(0, len(wavbins) - 1):
in_bin = bs == i + 1 # b runs from 1 to n-1
use = np.logical_and.reduce(
[in_bin, np.isfinite(flux), weights > 0, np.isfinite(weights)]
)
# if a bin is empty or something else is wrong, the nans will be
# filtered out later
if not use.any():
newwavs[i] = 0
newflux[i] = np.nan
continue
newwavs[i] = np.average(wavs[use], weights=weights[use])
newflux[i] = np.average(flux[use], weights=weights[use])
return newwavs, newflux
def get_exptime(header):
"""Tries a couple of keywords to find the exposure time in a FITS header"""
for exptime_key in ("EXPTIME", "LEXPTIME", "SEXPTIME"):
if exptime_key in header:
exptime = float(header[exptime_key])
return exptime
# Some code to generate the above dict from scratch. Manual tweaking can
# occur after.
if __name__ == "__main__":
gen_dict = {}
here = Path(".")
for d in list(here.glob("./data/HD*")) + list(here.glob("./data/BD*")):
has_iue_h = False
has_iue_l = False
has_hst_stis = False
# has_hst_cos = False
# lower in this list of ifs is higher priority
target = Path(d).name
# def set_if_exists(glob_pattern):
# files = d.glob(glob_pattern)
# if len(files) > 0:
# spectrum_file = files[0]
iue_l_files = list(d.glob("*mxlo_vo.fits"))
if len(iue_l_files) > 0:
spectrum_file = str(iue_l_files[0])
iue_h_files = list(d.glob("*mxhi.gz"))
if len(iue_h_files) > 0:
spectrum_file = str(iue_h_files[0])
hst_stis_files = list(d.glob("**/*x1d.fits"))
if len(hst_stis_files) > 0:
spectrum_file = str(hst_stis_files[0])
gen_dict[target] = spectrum_file
print(gen_dict)
| 35.701099 | 99 | 0.650025 | """Tools for getting spectra for lya fitting.
Includes choosing a data file for each star, reading the files, and
processing the spectral data (from either IUE, STIS, ...) into a format
that can be used directly for the fitting.
The variable target_use_which_spectrum indicates which data to use for
each star. It can be customized by editing this file. Running this
module directly will print out the default value for this dictionary.
"""
from astropy.table import Table
from astropy.io import fits
import numpy as np
from pathlib import Path
from warnings import warn
from scipy.interpolate import interp1d
import collections
# \(swp[0-9]\{5\}\)
# can be manually tweaked. If the value is a list or contains *, the
# spectra will be coadded
target_use_which_spectrum = {
"HD097471": "data/HD097471/mastDownload/IUE/swp19375/swp19375mxlo_vo.fits",
"HD037525": "data/HD037525/mastDownload/IUE/swp27579/swp27579.mxhi.gz",
"HD093827": "data/HD093827/mastDownload/IUE/swp50536/swp50536.mxhi.gz",
# "HD093827": "data/HD093827/*mxlo_vo.fits",
"HD051013": "data/HD051013/mastDownload/IUE/swp22860/swp22860.mxhi.gz",
"HD096675": "data/HD096675/mastDownload/IUE/swp41717/swp41717.mxhi.gz",
"HD023060": "data/HD023060/mastDownload/IUE/swp11151/swp11151mxlo_vo.fits",
"HD099872": "data/HD099872/mastDownload/HST/**/*_x1d.fits",
# "HD152248": "data/HD152248/mastDownload/IUE/swp54576/swp54576.mxhi.gz",
"HD152248": "data/HD152248/**/*.mxhi.gz",
"HD209339": "data/HD209339/mastDownload/HST/**/*_x1d.fits",
# "HD197770": "data/HD197770/mastDownload/HST/oedl04010/oedl04010_x1d.fits",
"HD197770": "data/HD197770/**/*.mxhi.gz",
"HD037332": "data/HD037332/mastDownload/IUE/swp32289/swp32289.mxhi.gz",
"HD093028": "data/HD093028/mastDownload/IUE/swp05521/swp05521.mxhi.gz",
# "HD062542": "data/HD062542/mastDownload/HST/obik01020/obik01020_x1d.fits", # wavelength range
# "HD062542": "data/HD062542/*.mxhi.gz", # way too noisy
"HD062542": "data/HD062542/**/*mxlo_vo.fits",
# "HD190603": "data/HD190603/*.mxhi.gz",
"HD190603": "data/HD190603/**/*mxlo_vo.fits",
# "HD046202": "data/HD046202/mastDownload/IUE/swp08845/swp08845.mxhi.gz",
# "HD046202": "data/HD046202/mastDownload/HST/ocb6e0030/ocb6e0030_x1d.fits",
# "HD046202": "data/HD046202/mastDownload/HST/ocb6e1030/ocb6e1030_x1d.fits",
"HD046202": "data/HD046202/mastDownload/HST/**/*_x1d.fits",
# "HD047129": "data/HD047129/mastDownload/IUE/swp07077/swp07077.mxhi.gz",
"HD047129": "data/HD047129/**/*.mxhi.gz",
"HD235874": "data/HD235874/mastDownload/IUE/swp34158/swp34158mxlo_vo.fits",
"HD216898": "data/HD216898/swp43934.mxhi.gz",
# "HD216898": "data/HD216898/mastDownload/IUE/swp17175/swp17175mxlo_vo.fits",
"HD326329": "data/HD326329/mastDownload/IUE/swp48698/swp48698.mxhi.gz",
"HD179406": [
"data/HD179406/mastDownload/IUE/swp08974/swp08974.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp08976/swp08976.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp13865/swp13865.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp36939/swp36939.mxhi.gz",
"data/HD179406/mastDownload/IUE/swp36940/swp36940.mxhi.gz",
],
"BD+52d3210": "data/BD+52d3210/mastDownload/IUE/swp34153/swp34153mxlo_vo.fits",
"BD+56d524": "data/BD+56d524/mastDownload/IUE/swp20330/swp20330mxlo_vo.fits",
# data for comparison to existing HI results
"HD094493": "data/HD094493/mastDownload/HST/o54306010/o54306010_x1d.fits",
"HD045314": "data/HD045314/mastDownload/IUE/**/*mxhi.gz"
}
# namedtuple defines a simple class
Spectrum = collections.namedtuple(
"Spectrum", ["wavs", "flux", "errs", "net", "exptime"]
)
def processed(target, wmin=0, wmax=1400, disp=0.25):
"""Get spectrum data ready for fitting Lya for the given target.
Tweak the variable get_spectrum.target_use_which_spectrum to choose
the right data. Depending on whether a IUE or STIS spectrum was
chosen, different steps will be taken. The end result is the
spectral data in a common format, processed with different steps
depending on the source of the data.
Returns
-------
wav, flux: ndarray of wavelengths (angstrom) and fluxes (erg s-1 cm-2 angstrom-1)
"""
# choose data
filename = target_use_which_spectrum[target]
print("Getting data from ", filename)
spectrum, rebin = auto_wavs_flux_errs(filename)
if rebin:
binnedwavs, binnedflux = rebin_spectrum_around_lya(spectrum, wmin, wmax, disp)
else:
wavs, flux = spectrum.wavs, spectrum.flux
use = np.logical_and(wmin < wavs, wavs < wmax)
binnedwavs, binnedflux = wavs[use], flux[use]
# remove nans (these are very annoying when they propagate, e.g.
# max([array with nan]) = nan).
safe = np.isfinite(binnedflux)
safewavs = binnedwavs[safe]
safeflux = binnedflux[safe]
return safewavs, safeflux, filename
def auto_wavs_flux_errs(filename):
"""Load spectrum or multiple spectra based on file name."""
# determine if multiple files were provided. If a glob pattern was provided, this counts as
if isinstance(filename, list):
to_be_coadded = filename
elif isinstance(filename, str):
if "*" in filename:
to_be_coadded = [str(p) for p in Path(".").glob(filename)]
elif "x1d" in filename:
# a single x1d file can contain multiple extensions, which
# need to be coadded
to_be_coadded = [filename]
else:
to_be_coadded = None
else:
warn("filename should be str or list!")
raise
if to_be_coadded is None:
if "x1d" in filename:
spectrum = merged_stis_data(filename)
rebin = True
elif "mxhi" in filename:
spectrum = merged_iue_h_data(filename)
rebin = True
elif "mxlo" in filename:
spectrum = iue_l_data(filename)
rebin = False
else:
warn("File {} not supported yet, exiting".format(filename))
exit()
else:
if "x1d" in to_be_coadded[0]:
spectrum = coadd_hst_stis(to_be_coadded)
rebin = True
elif "mxhi" in to_be_coadded[0]:
spectrum = coadd_iue_h(to_be_coadded)
rebin = True
elif "mxlo" in to_be_coadded[0]:
spectrum = coadd_iue_l(to_be_coadded)
rebin = False
return spectrum, rebin
def merged_stis_data(filename, extension=1):
"""Get spectrum data from all STIS spectral orders.
If only filename is given, use SCI extension.
Returns
-------
wavs: numpy array, all wavelengths, sorted
flux: all fluxes at these wavelengths
errs: all errors at these wavelengths
"""
with fits.open(filename) as f:
t = f[extension].data
exptime = get_exptime(f[extension].header)
output_columns = ["WAVELENGTH", "FLUX", "ERROR", "NET"]
fields = [np.concatenate(t[c]) for c in output_columns]
# clean up by dq
dq = np.concatenate(t["DQ"])
good = dq == 0
print(f"STIS: {good.sum()} out of {len(good)} wavelength points are good")
fields = [c[good] for c in fields]
# sort by wavelength
idxs = np.argsort(fields[0])
fields = [c[idxs] for c in fields]
# add exptime and create Spectrum (namedtuple) object (* unpacks,
# should be in right order)
fields.append(exptime)
return Spectrum(*fields)
def merged_iue_h_data(filename):
"""
Get Spectrumn info over all orders of high res IUE data.
Returns
-------
Spectrum
"""
t = Table.read(filename)
def iue_wavs(i):
return t[i]["WAVELENGTH"] + t[i]["DELTAW"] * np.arange(t[i]["NPOINTS"])
def pixrange(i):
return slice(t[i]["STARTPIX"], t[i]["STARTPIX"] + t[i]["NPOINTS"])
def all_of_column(colname):
return np.concatenate([t[i][colname][pixrange(i)] for i in range(len(t))])
allwavs = np.concatenate([iue_wavs(i) for i in range(len(t))])
colnames = ["WAVELENGTH", "ABS_CAL", "NOISE", "NET"]
column_values = [allwavs]
for colname in colnames[1:]:
column_values.append(all_of_column(colname))
# clean up using DQ
dq = all_of_column("QUALITY")
good = dq == 0
print(f"IUE: {good.sum()} out of {len(good)} wavelength points are good")
for array in column_values:
array = array[good]
# sort by wavelength
idxs = np.argsort(column_values[0])
column_values = [c[idxs] for c in column_values]
# add exptime and create Spectrum
exptime = get_exptime(fits.getheader(filename, ext=0))
fields = column_values + [exptime]
return Spectrum(*fields)
def iue_l_data(filename):
t = Table.read(filename)
wavs = t["WAVE"][0]
flux = t["FLUX"][0]
sigma = t["SIGMA"][0]
# net is not available
net = None
# exptime is not used (for now)
exptime = None
return Spectrum(wavs, flux, sigma, net, exptime)
def coadd_iue_h(filenames):
print(f"Coadding {len(filenames)} IUE H exposures")
return coadd_general([merged_iue_h_data(fn) for fn in filenames])
def coadd_iue_l(filenames):
print(f"Coadding {len(filenames)} IUE L exposures")
spectrums = [iue_l_data(fn) for fn in filenames]
if not np.equal.reduce([s.wavs for s in spectrums]).all():
warn("Not all wavs are equal in IUE L. Implement fix pls.")
raise
# Assume that the wavs are always the same. If not, the above error
# will trigger, and I should reconsider.
numwavs = len(spectrums[0].wavs)
flux_sum = np.zeros(numwavs)
weight_sum = np.zeros(numwavs)
for s in spectrums:
good = np.isfinite(s.flux) & (s.errs > 0)
weight = 1 / s.errs ** 2
flux_sum[good] += s.flux[good] * weight[good]
weight_sum[good] += weight[good]
# simply the 1/sigma2 weighting rule
new_flux = flux_sum / weight_sum
new_errs = np.sqrt(1 / weight_sum)
return Spectrum(spectrums[0].wavs, new_flux, new_errs, None, None)
def coadd_hst_stis(filenames):
# get all SCI exposures
spectrums = []
# remember handles so we can close them later
for fn in filenames:
with fits.open(fn) as hdus:
for extension in range(1, len(hdus)):
spectrums.append(merged_stis_data(fn, extension))
print(f"Coadding {len(spectrums)} STIS exposures from {len(filenames)} files")
return coadd_general(spectrums)
def coadd_general(spectrums):
"""General function for coadding spectra.
spectrums : list of Spectrum objects
Returns
-------
spectrum : Spectrum object representing the coadded data
"""
# get all the per-wavelength data
all_wavs = [s.wavs for s in spectrums]
# determine new wavelength grid, using max of median of wavelength
# increment as step size
maxwav = np.amax(np.concatenate(all_wavs))
minwav = np.amin(np.concatenate(all_wavs))
disp = np.amax([np.median(np.diff(w)) for w in all_wavs])
newwavs = np.arange(minwav, maxwav, disp)
# instead of binning, we're just going to do nearest neighbour on a
# slightly coarser wavelength grid. It worked for Julia, so...
flux_sum = np.zeros(len(newwavs))
weight_sum = np.zeros(len(newwavs))
variance_sum = np.zeros(len(newwavs))
net_sum = np.zeros(len(newwavs))
total_exptime = np.zeros(len(newwavs))
for s in spectrums:
# nearest neighbour interpolation of all relevant quantities
def do_interp1d(quantity):
return interp1d(
s.wavs, quantity, kind="nearest", fill_value=np.nan, bounds_error=False,
)(newwavs)
fi = do_interp1d(s.flux)
ei = do_interp1d(s.errs)
ni = do_interp1d(s.net)
exptime = s.exptime
# weights scale with ni / fi = sensitivity
good_fi_ni = (fi != 0) & np.isfinite(fi) & (ni != 0) & np.isfinite(ni)
wi = np.where(good_fi_ni, ni / fi, 0) * exptime
good_wi = wi > 0
# total_counts = flux * sensitivity * exptime
# --> flux = total_counts / (sensitivity * exptime)
#
# V(flux) = V(total_counts) / (sensitivity * exptime)**2
# = total_counts / (sensitivity * exptime)**2 (poisson)
# = flux * sensitivity * exptime / (sensitivity * exptime)**2
# = flux / (sensitivity * exptime)
# sens = counts per flux unit
weight_sum[good_wi] += wi[good_wi]
flux_sum[good_wi] += wi[good_wi] * fi[good_wi]
variance_sum[good_wi] += np.square(ei[good_wi] * wi[good_wi])
net_sum[good_wi] += ni[good_wi] * exptime
total_exptime[good_wi] += exptime
flux_result = flux_sum / weight_sum
errs_result = np.sqrt(variance_sum) / weight_sum
net_result = net_sum / total_exptime
return Spectrum(newwavs, flux_result, errs_result, net_result, total_exptime)
def rebin_spectrum_around_lya(spectrum, wmin=0, wmax=1400, disp=0.25):
"""Rebin spectrum to for lya fitting, and reject certain points.
A rebinning of the spectrum to make it more useful for lya fitting.
Every new point is the weighted average of all data within the range
of a bin. The weights are flux / net * exptime if those are
available. If not 1 / errs**2 is used. The bins can be specified by
choosing a minimum, maximum wavelength and a resolution (in
Angstrom). Additionally, only the points that satisfy some basic
data rejection criteria are used.
Returns
-------
newwavs: average wavelength in each bin
newflux: average flux in each bin
"""
wavs = spectrum.wavs
flux = spectrum.flux
wavmin = max(wmin, np.amin(wavs))
wavmax = min(wmax, np.amax(wavs))
wavbins = np.arange(wavmin, wavmax, disp)
if spectrum.net is not None and spectrum.exptime is not None:
weights = spectrum.net / flux * spectrum.exptime
else:
weights = 1 / spectrum.errs ** 2
# np.digitize returns list of indices. b = 1 means that the data point
# is between wav[0] (first) and wav[1]. b = n-1 means between wav[n-2]
# and wav[n-1] (last). b = 0 or n mean out of range.
bs = np.digitize(wavs, wavbins)
newwavs = np.zeros(len(wavbins) - 1)
newflux = np.zeros(len(wavbins) - 1)
for i in range(0, len(wavbins) - 1):
in_bin = bs == i + 1 # b runs from 1 to n-1
use = np.logical_and.reduce(
[in_bin, np.isfinite(flux), weights > 0, np.isfinite(weights)]
)
# if a bin is empty or something else is wrong, the nans will be
# filtered out later
if not use.any():
newwavs[i] = 0
newflux[i] = np.nan
continue
newwavs[i] = np.average(wavs[use], weights=weights[use])
newflux[i] = np.average(flux[use], weights=weights[use])
return newwavs, newflux
def get_exptime(header):
"""Tries a couple of keywords to find the exposure time in a FITS header"""
for exptime_key in ("EXPTIME", "LEXPTIME", "SEXPTIME"):
if exptime_key in header:
exptime = float(header[exptime_key])
return exptime
# Some code to generate the above dict from scratch. Manual tweaking can
# occur after.
if __name__ == "__main__":
gen_dict = {}
here = Path(".")
for d in list(here.glob("./data/HD*")) + list(here.glob("./data/BD*")):
has_iue_h = False
has_iue_l = False
has_hst_stis = False
# has_hst_cos = False
# lower in this list of ifs is higher priority
target = Path(d).name
# def set_if_exists(glob_pattern):
# files = d.glob(glob_pattern)
# if len(files) > 0:
# spectrum_file = files[0]
iue_l_files = list(d.glob("*mxlo_vo.fits"))
if len(iue_l_files) > 0:
spectrum_file = str(iue_l_files[0])
iue_h_files = list(d.glob("*mxhi.gz"))
if len(iue_h_files) > 0:
spectrum_file = str(iue_h_files[0])
hst_stis_files = list(d.glob("**/*x1d.fits"))
if len(hst_stis_files) > 0:
spectrum_file = str(hst_stis_files[0])
gen_dict[target] = spectrum_file
print(gen_dict)
| 2,048 | 0 | 203 |
374f2f18ef85e59b1260b1b13697bd0bd04a1253 | 1,543 | py | Python | python/strings.py | cbare/Etudes | 8a803621f2abd20966843ccec696aec397d3c9f9 | [
"Apache-2.0"
] | null | null | null | python/strings.py | cbare/Etudes | 8a803621f2abd20966843ccec696aec397d3c9f9 | [
"Apache-2.0"
] | null | null | null | python/strings.py | cbare/Etudes | 8a803621f2abd20966843ccec696aec397d3c9f9 | [
"Apache-2.0"
] | null | null | null | """
String algorithms
"""
assert balanced_parens('')
assert balanced_parens('()')
assert balanced_parens('((()))')
assert balanced_parens('((()()()))')
assert balanced_parens('((()()()))()(())(()())')
assert not balanced_parens('(()')
assert not balanced_parens('((())))')
assert not balanced_parens('((()())')
assert not balanced_parens('())(()')
def longest_valid_parens(s: str) -> int:
"""
return the length of the longest run of valid nested parens.
Given a string containing just the characters '(' and ')', find the length
of the longest well-formed substring.
"""
seeds = [(i,i+1) for i in range(len(s)-1) if s[i:i+2]=='()']
grew = True
while grew or merged:
grew = 0
merged = 0
# grow
for i in range(len(seeds)):
a,b = seeds[i]
if a>0 and b+1<len(s) and s[a-1]=='(' and s[b+1]==')':
grew += 1
seeds[i] = (a-1, b+1)
# merge
new_seeds = []
s0 = seeds[0]
for s1 in seeds[1:]:
if s0[1]+1==s1[0]:
merged += 1
s0 = (s0[0], s1[1])
else:
new_seeds.append(s0)
s0 = s1
new_seeds.append(s0)
seeds = new_seeds
return max(b-a+1 for a,b in seeds)
| 24.109375 | 78 | 0.483474 | """
String algorithms
"""
def balanced_parens(s: str) -> bool:
open = 0
for c in s:
if c=='(': open += 1
if c==')':
if open > 0:
open -= 1
else:
return False
return open==0
assert balanced_parens('')
assert balanced_parens('()')
assert balanced_parens('((()))')
assert balanced_parens('((()()()))')
assert balanced_parens('((()()()))()(())(()())')
assert not balanced_parens('(()')
assert not balanced_parens('((())))')
assert not balanced_parens('((()())')
assert not balanced_parens('())(()')
def longest_valid_parens(s: str) -> int:
"""
return the length of the longest run of valid nested parens.
Given a string containing just the characters '(' and ')', find the length
of the longest well-formed substring.
"""
seeds = [(i,i+1) for i in range(len(s)-1) if s[i:i+2]=='()']
grew = True
while grew or merged:
grew = 0
merged = 0
# grow
for i in range(len(seeds)):
a,b = seeds[i]
if a>0 and b+1<len(s) and s[a-1]=='(' and s[b+1]==')':
grew += 1
seeds[i] = (a-1, b+1)
# merge
new_seeds = []
s0 = seeds[0]
for s1 in seeds[1:]:
if s0[1]+1==s1[0]:
merged += 1
s0 = (s0[0], s1[1])
else:
new_seeds.append(s0)
s0 = s1
new_seeds.append(s0)
seeds = new_seeds
return max(b-a+1 for a,b in seeds)
| 208 | 0 | 23 |
f38a4dd9b4566a47a3885490463bd76e704d9005 | 6,317 | py | Python | Module 3/Python_practice.py | KathiaF/Election_Analysis | 400c1e5329ebed9977445cfd0f1245182afecdd1 | [
"MIT"
] | null | null | null | Module 3/Python_practice.py | KathiaF/Election_Analysis | 400c1e5329ebed9977445cfd0f1245182afecdd1 | [
"MIT"
] | null | null | null | Module 3/Python_practice.py | KathiaF/Election_Analysis | 400c1e5329ebed9977445cfd0f1245182afecdd1 | [
"MIT"
] | null | null | null | print("Hello World")
type(3)
#How many votes did you get?
#my_votes = int(input("How many votes did you get in the election?"))
#Total votes in the election
#total_votes = int(input("What is the total votes in the election?"))
#Calculate the percentage of votes you receive
#percentage_votes = (my_votes / total_votes)*100
#print("I received " + str(percentage_votes)+"% of the total votes")
#IF-ELSE STATEMENT
counties = ["Arapahoe","Denver","Jefferson"]
if counties[1] == 'Denver':
print(counties[1])
#IndexError: list index out of range
#if counties[3] != 'Jefferson':
# print(counties[2])
#temperature = int(input("What is the temperature outside? "))
#if temperature > 80:
# print("Turn on the AC.")
#else:
# print("Open the windows.")
"""
#What is the score?
score = int(input("What is your test score? "))
# Determine the grade.
if score >= 90:
print('Your grade is an A.')
else:
if score >= 80:
print('Your grade is a B.')
else:
if score >= 70:
print('Your grade is a C.')
else:
if score >= 60:
print('Your grade is a D.')
else:
print('Your grade is an F.')
# What is the score?
score = int(input("What is your test score? "))
# Determine the grade.
if score >= 90:
print('Your grade is an A.')
elif score >= 80:
print('Your grade is a B.')
elif score >= 70:
print('Your grade is a C.')
elif score >= 60:
print('Your grade is a D.')
else:
print('Your grade is an F.')
"""
"""
if "Arapahoe" in counties:
print("True")
else:
print("False")
if "El Paso" not in counties:
print("True")
else:
print("False")
counties = ["Arapahoe","Denver","Jefferson"]
if "El Paso" in counties:
print("El Paso is in the list of counties.")
else:
print("El Paso is not the list of counties.")
x = 5
y = 5
if x == 5 and y == 5:
print("True")
else:
print("False")
if x == 3 or y == 5:
print("True")
else:
print("False")
if not(x > y):
print("True")
else:
print("False")
if "Arapahoe" in counties and "El Paso" in counties:
print("Arapahoe and El Paso are in the list of counties.")
else:
print("Arapahoe or El Paso is not in the list of counties.")
if "Arapahoe" in counties or "El Paso" in counties:
print("Arapahoe or El Paso is in the list of counties.")
else:
print("Arapahoe and El Paso are not in the list of counties.")
if "Arapahoe" in counties and "El Paso" not in counties:
print("Only Arapahoe is in the list of counties.")
else:
print("Arapahoe is in the list of counties and El Paso is not in the list of counties.")
"""
#REPETITION STATEMENT
"""
#while loop
x = 0
while x <= 5:
print(x)
x = x + 1
#infinite loop: If you forget to write code inside the loop that makes the test condition false, the while loop will continue to run
#for loop
#the county variable is declared and set equal to the first item in the list of counties, "Arapahoe."
for county in counties:
print(county)
numbers = [0, 1, 2, 3, 4]
for num in numbers:
print(num)
#range
for num in range(5):
print(num)
#Indexing can also be used to iterate through a list
for i in range(len(counties)):
print(counties[i])
#Iterate through a dictionary
counties_dict = {"Arapahoe": 422829, "Denver": 463353, "Jefferson": 432438}
#key
for county in counties_dict:
print(county)
#key
for county in counties_dict.keys():
print(county)
#values
for voters in counties_dict.values():
print(voters)
#values
for county in counties_dict:
print(counties_dict[county])
#values
for county in counties_dict:
print(counties_dict.get(county))
#Get the Key-Value Pairs of a Dictionary
#for key, value in dictionary_name.items():
# print(key, value)
for county, voters in counties_dict.items():
print(county, voters)
#When iterating over a dictionary:
#The first variable declared in the for loop is assigned to the keys.
#The second variable is assigned to the values.
for county, voters in counties_dict.items():
print(str(county) + "county has " + str(voters) + " registered")
voting_data = [{"county":"Arapahoe", "registered_voters": 422829},
{"county":"Denver", "registered_voters": 463353},
{"county":"Jefferson", "registered_voters": 432438}]
#Get Each Dictionary in a List of Dictionaries
for county_dict in voting_data:
print(county_dict)
for i in range(len(voting_data)):
print(voting_data[i]['county'])
#Get the Values from a List of Dictionaries
for county_dict in voting_data:
for value in county_dict.values():
print(value)
#How would you retrieve the number of registered voters from each dictionary?
for county_dict in voting_data:
print(county_dict['registered_voters'])
#If we only want to print the county name from each dictionary, we can use county_dict['county']
for county_dict in voting_data:
print(county_dict['county'])
"""
#Printing Formats
"""
#my_votes = int(input("How many votes did you get in the election? "))
#total_votes = int(input("What is the total votes in the election? "))
#percentage_votes = (my_votes / total_votes) * 100
#print("I received " + str(percentage_votes)+"% of the total votes.")
#print(f"I received {my_votes / total_votes * 100}% of the total votes.")
counties_dict = {"Arapahoe": 369237, "Denver":413229, "Jefferson": 390222}
for county, voters in counties_dict.items():
print(county + " county has " + str(voters) + " registered voters.")
for county, voters in counties_dict.items():
print(f"{county} county has {voters} registered voters.")
candidate_votes = int(input("How many votes did the candidate get in the election? "))
total_votes = int(input("What is the total number of votes in the election? "))
message_to_candidate = (
f"You received {candidate_votes} number of votes. "
f"The total number of votes in the election was {total_votes}. "
f"You received {candidate_votes / total_votes * 100}% of the total votes.")
print(message_to_candidate)
message_to_candidate = (
f"You received {candidate_votes:,} number of votes. "
f"The total number of votes in the election was {total_votes:,}. "
f"You received {candidate_votes / total_votes * 100:.2f}% of the total votes.")
print(message_to_candidate)
"""
| 27.111588 | 132 | 0.677062 | print("Hello World")
type(3)
#How many votes did you get?
#my_votes = int(input("How many votes did you get in the election?"))
#Total votes in the election
#total_votes = int(input("What is the total votes in the election?"))
#Calculate the percentage of votes you receive
#percentage_votes = (my_votes / total_votes)*100
#print("I received " + str(percentage_votes)+"% of the total votes")
#IF-ELSE STATEMENT
counties = ["Arapahoe","Denver","Jefferson"]
if counties[1] == 'Denver':
print(counties[1])
#IndexError: list index out of range
#if counties[3] != 'Jefferson':
# print(counties[2])
#temperature = int(input("What is the temperature outside? "))
#if temperature > 80:
# print("Turn on the AC.")
#else:
# print("Open the windows.")
"""
#What is the score?
score = int(input("What is your test score? "))
# Determine the grade.
if score >= 90:
print('Your grade is an A.')
else:
if score >= 80:
print('Your grade is a B.')
else:
if score >= 70:
print('Your grade is a C.')
else:
if score >= 60:
print('Your grade is a D.')
else:
print('Your grade is an F.')
# What is the score?
score = int(input("What is your test score? "))
# Determine the grade.
if score >= 90:
print('Your grade is an A.')
elif score >= 80:
print('Your grade is a B.')
elif score >= 70:
print('Your grade is a C.')
elif score >= 60:
print('Your grade is a D.')
else:
print('Your grade is an F.')
"""
"""
if "Arapahoe" in counties:
print("True")
else:
print("False")
if "El Paso" not in counties:
print("True")
else:
print("False")
counties = ["Arapahoe","Denver","Jefferson"]
if "El Paso" in counties:
print("El Paso is in the list of counties.")
else:
print("El Paso is not the list of counties.")
x = 5
y = 5
if x == 5 and y == 5:
print("True")
else:
print("False")
if x == 3 or y == 5:
print("True")
else:
print("False")
if not(x > y):
print("True")
else:
print("False")
if "Arapahoe" in counties and "El Paso" in counties:
print("Arapahoe and El Paso are in the list of counties.")
else:
print("Arapahoe or El Paso is not in the list of counties.")
if "Arapahoe" in counties or "El Paso" in counties:
print("Arapahoe or El Paso is in the list of counties.")
else:
print("Arapahoe and El Paso are not in the list of counties.")
if "Arapahoe" in counties and "El Paso" not in counties:
print("Only Arapahoe is in the list of counties.")
else:
print("Arapahoe is in the list of counties and El Paso is not in the list of counties.")
"""
#REPETITION STATEMENT
"""
#while loop
x = 0
while x <= 5:
print(x)
x = x + 1
#infinite loop: If you forget to write code inside the loop that makes the test condition false, the while loop will continue to run
#for loop
#the county variable is declared and set equal to the first item in the list of counties, "Arapahoe."
for county in counties:
print(county)
numbers = [0, 1, 2, 3, 4]
for num in numbers:
print(num)
#range
for num in range(5):
print(num)
#Indexing can also be used to iterate through a list
for i in range(len(counties)):
print(counties[i])
#Iterate through a dictionary
counties_dict = {"Arapahoe": 422829, "Denver": 463353, "Jefferson": 432438}
#key
for county in counties_dict:
print(county)
#key
for county in counties_dict.keys():
print(county)
#values
for voters in counties_dict.values():
print(voters)
#values
for county in counties_dict:
print(counties_dict[county])
#values
for county in counties_dict:
print(counties_dict.get(county))
#Get the Key-Value Pairs of a Dictionary
#for key, value in dictionary_name.items():
# print(key, value)
for county, voters in counties_dict.items():
print(county, voters)
#When iterating over a dictionary:
#The first variable declared in the for loop is assigned to the keys.
#The second variable is assigned to the values.
for county, voters in counties_dict.items():
print(str(county) + "county has " + str(voters) + " registered")
voting_data = [{"county":"Arapahoe", "registered_voters": 422829},
{"county":"Denver", "registered_voters": 463353},
{"county":"Jefferson", "registered_voters": 432438}]
#Get Each Dictionary in a List of Dictionaries
for county_dict in voting_data:
print(county_dict)
for i in range(len(voting_data)):
print(voting_data[i]['county'])
#Get the Values from a List of Dictionaries
for county_dict in voting_data:
for value in county_dict.values():
print(value)
#How would you retrieve the number of registered voters from each dictionary?
for county_dict in voting_data:
print(county_dict['registered_voters'])
#If we only want to print the county name from each dictionary, we can use county_dict['county']
for county_dict in voting_data:
print(county_dict['county'])
"""
#Printing Formats
"""
#my_votes = int(input("How many votes did you get in the election? "))
#total_votes = int(input("What is the total votes in the election? "))
#percentage_votes = (my_votes / total_votes) * 100
#print("I received " + str(percentage_votes)+"% of the total votes.")
#print(f"I received {my_votes / total_votes * 100}% of the total votes.")
counties_dict = {"Arapahoe": 369237, "Denver":413229, "Jefferson": 390222}
for county, voters in counties_dict.items():
print(county + " county has " + str(voters) + " registered voters.")
for county, voters in counties_dict.items():
print(f"{county} county has {voters} registered voters.")
candidate_votes = int(input("How many votes did the candidate get in the election? "))
total_votes = int(input("What is the total number of votes in the election? "))
message_to_candidate = (
f"You received {candidate_votes} number of votes. "
f"The total number of votes in the election was {total_votes}. "
f"You received {candidate_votes / total_votes * 100}% of the total votes.")
print(message_to_candidate)
message_to_candidate = (
f"You received {candidate_votes:,} number of votes. "
f"The total number of votes in the election was {total_votes:,}. "
f"You received {candidate_votes / total_votes * 100:.2f}% of the total votes.")
print(message_to_candidate)
"""
| 0 | 0 | 0 |
1d6c00811acee7425a8c4805d367029265b86709 | 9,760 | py | Python | examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/generate_data_upsample.py | herrkun/ogb | 7e4f25bbc013e76c8f04990e1d9d659a67f5f491 | [
"MIT"
] | null | null | null | examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/generate_data_upsample.py | herrkun/ogb | 7e4f25bbc013e76c8f04990e1d9d659a67f5f491 | [
"MIT"
] | null | null | null | examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/generate_data_upsample.py | herrkun/ogb | 7e4f25bbc013e76c8f04990e1d9d659a67f5f491 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import torch as th
import tqdm
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='.')
parser.add_argument('--include_val', help='whether include val')
parser.add_argument('--head_upsample_epochs', help='upample rate')
parser.add_argument('--t_threshold', help='train score threshold')
parser.add_argument('--v_threshold', help='val score threshold')
generator_val_hrt()
generator_new_train(args.include_val,args.head_upsample_epochs,args.t_threshold,args.v_threshold)
generator_new_val(args.include_val,args.head_upsample_epochs,args.t_threshold,args.v_threshold)
generator_for_finefune(args.include_val,args.head_upsample_epochs,args.t_threshold,args.v_threshold)
| 42.251082 | 128 | 0.684221 | import numpy as np
import pandas as pd
import torch as th
import tqdm
import os
def get_head_score_train(reload=False):
saved_path = './dataset/wikikg90m_kddcup2021/processed/trian_head_score_wyk.npy'
if reload and os.path.isfile(saved_path):
return np.load(saved_path)
trip_list = np.load('./dataset/wikikg90m_kddcup2021/processed/train_hrt.npy')
n_sample = trip_list.shape[0]
head_score = np.zeros((87143637, 1))
for i in range(n_sample):
head_score[trip_list[i][0]] += 1
np.save(saved_path, head_score)
return head_score
def get_tail_score_train(reload=False):
saved_path = './dataset/wikikg90m_kddcup2021/processed/trian_tail_score_wyk.npy'
if reload and os.path.isfile(saved_path):
return np.load(saved_path)
trip_list = np.load('./dataset/wikikg90m_kddcup2021/processed/train_hrt.npy')
n_sample = trip_list.shape[0]
tail_score = np.zeros((87143637, 1))
for i in range(n_sample):
tail_score[trip_list[i][2]] += 1
np.save(saved_path, tail_score)
return tail_score
def get_head_score_val(reload=False):
saved_path = './dataset/wikikg90m_kddcup2021/processed/val_head_score_wyk.npy'
if reload and os.path.isfile(saved_path):
return np.load(saved_path)
candidate_list = np.load('./dataset/wikikg90m_kddcup2021/processed/val_hr.npy')
n_sample = candidate_list.shape[0]
head_score = np.zeros((87143637, 1))
for i in tqdm.tqdm(range(n_sample)):
head_score[candidate_list[i][0]] += 1
np.save(saved_path, head_score)
print('head count have saved ')
return head_score
def get_tail_score_val(reload=False):
saved_path = './dataset/wikikg90m_kddcup2021/processed/val_tail_score_wyk.npy'
if reload and os.path.isfile(saved_path):
return np.load(saved_path)
candidate_list = generator_val_hrt()
n_sample = candidate_list.shape[0]
n_candidate = candidate_list.shape[1]
tail_score = np.zeros((87143637, 1))
for i in range(n_sample):
tail_score[candidate_list[i][2]] += 1
np.save(saved_path, tail_score)
return tail_score
def generator_val_hrt():
val_candis = np.load('./dataset/wikikg90m_kddcup2021/processed/val_t_candidate.npy')
val_correct_index = np.load('./dataset/wikikg90m_kddcup2021/processed/val_t_correct_index.npy')
val_hr = np.load('./dataset/wikikg90m_kddcup2021/processed/val_hr.npy')
print('loaded val true candi')
val_t = []
for i in tqdm.tqdm(range(val_candis.shape[0])):
val_t.append(val_candis[i][val_correct_index[i]])
val_t = np.array(val_t).reshape(len(val_t),1)
val_hrt = np.concatenate((val_hr, val_t), axis = 1)
np.save('./dataset/wikikg90m_kddcup2021/processed/val_hrt_wyk.npy', val_hrt)
return val_hrt
def generator_new_train(include_val=True, head_upsample_epochs=2,t_threshold,v_threshold):
train_tail_score = get_tail_score_train()
val_tail_score = get_tail_score_val()
train_trip = np.load('./dataset/wikikg90m_kddcup2021/processed/train_hrt.npy')
val_trip = np.load('./dataset/wikikg90m_kddcup2021/processed/val_hrt_wyk.npy')
new_train_hrt = []
val_candi = np.load('./dataset/wikikg90m_kddcup2021/processed/val_t_candidate.npy')
global_index = 0
filter_topk = 10
counter1, counter2, counter3, counter4 = 0,0,0,0
proc_num = 4
for proc_no in range(proc_num):
ori_res = th.load(f"./ensemble_valid_v2_backup/load_ensemble_valid_rerank_{proc_no}.pkl", map_location=th.device('cpu'))
ori_res_pred_top10 = ori_res['h,r->t']['t_pred_top10'][:,:10].numpy()
print(ori_res['h,r->t']['t_pred_top10'].shape)
res_correct_index = ori_res['h,r->t']['t_correct_index'].numpy()
for index in tqdm.tqdm(range(res_correct_index.shape[0])):
for topk_index in range(filter_topk):
tmp_index = val_candi[global_index][ori_res_pred_top10[index][topk_index]]
if train_tail_score[tmp_index]<=t_threshold+val_tail_score[tmp_index]<=v_threshold:
val_tail_score[tmp_index] = -1
counter1 += 1
global_index += 1
global_index = 0
for i in tqdm.tqdm(range(train_trip.shape[0])):
tmp_train = train_trip[i]
if (train_tail_score[tmp_train[2]]+val_tail_score[tmp_train[2]] <=t_threshold) or val_tail_score[tmp_train[2]]==-1:
new_train_hrt.append(tmp_train)
val_head_score = get_head_score_val()
for epoch in range(head_upsample_epochs):
for i in tqdm.tqdm(range(train_trip.shape[0])):
tmp_train = train_trip[i]
if val_head_score[tmp_train[0]] > 0:
new_train_hrt.append(tmp_train)
np.random.shuffle(new_train_hrt)
try:
print(len(new_train_hrt))
except:
print('errors')
np.save('./dataset/wikikg90m_kddcup2021/processed/trian_val_topk_add_h.npy', new_train_hrt)
return 0
def generator_new_val(include_val=True,head_upsample_epochs=2,t_threshold,v_threshold):
train_tail_score = get_tail_score_train()
val_tail_score = get_tail_score_val()
train_trip = np.load('./dataset/wikikg90m_kddcup2021/processed/train_hrt.npy')
val_trip = np.load('./dataset/wikikg90m_kddcup2021/processed/val_hrt_wyk.npy')
new_train_hrt = []
val_candi = np.load('./dataset/wikikg90m_kddcup2021/processed/val_t_candidate.npy')
global_index = 0
filter_topk = 10
counter1, counter2, counter3, counter4 = 0,0,0,0
proc_num = 4
for proc_no in range(proc_num):
ori_res = th.load(f"./ensemble_valid_v2_backup/load_ensemble_valid_rerank_{proc_no}.pkl", map_location=th.device('cpu'))
ori_res_pred_top10 = ori_res['h,r->t']['t_pred_top10'][:,:10].numpy()
print(ori_res['h,r->t']['t_pred_top10'].shape)
res_correct_index = ori_res['h,r->t']['t_correct_index'].numpy()
for index in tqdm.tqdm(range(res_correct_index.shape[0])):
for topk_index in range(filter_topk):
tmp_index = val_candi[global_index][ori_res_pred_top10[index][topk_index]]
if train_tail_score[tmp_index]<=t_threshold+val_tail_score[tmp_index]<=v_threshold:
val_tail_score[tmp_index] = -1
counter1 += 1
global_index += 1
for epoch in range(head_upsample_epochs):
for i in tqdm.tqdm(range(val_trip.shape[0])):
tmp_val = val_trip[i]
if train_tail_score[tmp_val[2]]+val_tail_score[tmp_val[2]] <=t_threshold or val_tail_score[tmp_val[2]]==-1:
new_train_hrt.append(tmp_val)
np.random.shuffle(new_train_hrt)
try:
print(len(new_train_hrt))
except:
print('errors')
np.save('./dataset/wikikg90m_kddcup2021/processed/upsample_on_val_wyk.npy', new_train_hrt)
return 0
def generator_for_finefune(include_val=True,head_upsample_epochs=1,t_threshold,v_threshold):
train_tail_score = get_tail_score_train()
val_tail_score = get_tail_score_val()
train_trip = np.load('./dataset/wikikg90m_kddcup2021/processed/train_hrt.npy')
val_trip = np.load('./dataset/wikikg90m_kddcup2021/processed/val_hrt_wyk.npy')
new_train_hrt = []
val_candi = np.load('./dataset/wikikg90m_kddcup2021/processed/val_t_candidate.npy')
global_index = 0
filter_topk = 10
proc_num = 4
for proc_no in range(proc_num):
ori_res = th.load(f"./ensemble_valid_v2_backup/load_ensemble_valid_rerank_{proc_no}.pkl", map_location=th.device('cpu'))
ori_res_pred_top10 = ori_res['h,r->t']['t_pred_top10'][:,:10].numpy()
print(ori_res['h,r->t']['t_pred_top10'].shape)
res_correct_index = ori_res['h,r->t']['t_correct_index'].numpy()
for index in tqdm.tqdm(range(res_correct_index.shape[0])):
for topk_index in range(filter_topk):
tmp_index = val_candi[global_index][ori_res_pred_top10[index][topk_index]]
if train_tail_score[tmp_index]<=t_threshold+val_tail_score[tmp_index]<=v_threshold:
val_tail_score[tmp_index] = -1
counter1 += 1
global_index += 1
for i in tqdm.tqdm(range(train_trip.shape[0])):
tmp_train = train_trip[i]
if val_tail_score[tmp_train[2]]==-1:
new_train_hrt.append(tmp_train)
head_apper_count = 0
val_head_score = np.load('./dataset/wikikg90m_kddcup2021/processed/val_head_score_wyk.npy')
for epoch in range(head_upsample_epochs):
for i in tqdm.tqdm(range(train_trip.shape[0])):
tmp_train = train_trip[i]
if val_head_score[tmp_train[0]] > 0:
new_train_hrt.append(tmp_train)
np.random.shuffle(new_train_hrt)
try:
print(len(new_train_hrt))
except:
print('errors')
np.save('./dataset/wikikg90m_kddcup2021/processed/trian_for_finetune.npy', new_train_hrt)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='.')
parser.add_argument('--include_val', help='whether include val')
parser.add_argument('--head_upsample_epochs', help='upample rate')
parser.add_argument('--t_threshold', help='train score threshold')
parser.add_argument('--v_threshold', help='val score threshold')
generator_val_hrt()
generator_new_train(args.include_val,args.head_upsample_epochs,args.t_threshold,args.v_threshold)
generator_new_val(args.include_val,args.head_upsample_epochs,args.t_threshold,args.v_threshold)
generator_for_finefune(args.include_val,args.head_upsample_epochs,args.t_threshold,args.v_threshold)
| 8,800 | 0 | 184 |
49fd10c36d15ed795406716624c6084585a307d2 | 4,748 | py | Python | fuji_server/helper/metadata_collector_ore_atom.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 25 | 2020-09-22T08:28:45.000Z | 2022-02-23T07:10:28.000Z | fuji_server/helper/metadata_collector_ore_atom.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 188 | 2020-05-11T08:54:59.000Z | 2022-03-31T12:28:15.000Z | fuji_server/helper/metadata_collector_ore_atom.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 20 | 2020-05-04T13:56:26.000Z | 2022-03-02T13:39:04.000Z | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from fuji_server.helper.metadata_collector import MetaDataCollector
import feedparser
class MetaDataCollectorOreAtom(MetaDataCollector):
"""
A class to collect the Object Reuse and Exchange (ORE) Atom metadata from the data. This class is child class of MetadataCollector.
...
Attributes
----------
source_name : str
Source name of metadata
target_url : str
Target URL of the metadata
Methods
--------
parse_metadata()
Method to parse the ORE Atom metadata from the data.
"""
source_name = None
def __init__(self, loggerinst, target_url):
"""
Parameters
----------
loggerinst : logging.Logger
Logger instance
target_url : str
Target URL
"""
#self.is_pid = ispid
self.target_url = target_url
super().__init__(logger=loggerinst)
def parse_metadata(self):
"""Parse the ORE Atom metadata from the data
Returns
------
str
a string of source name
dict
a dictionary of ORE Atom metadata
"""
ore_metadata = {}
if self.target_url:
self.source_name = self.getEnumSourceNames().OAI_ORE.value
try:
feed = feedparser.parse(self.target_url)
if feed:
if feed.get('entries'):
if len(feed.get('entries')) == 1:
ore_metadata['title'] = feed.get('entries')[0].get('title')
ore_metadata['creator'] = feed.get('entries')[0].get('author')
ore_metadata['publisher'] = feed.get('entries')[0].get('source')
ore_metadata['publication_date'] = feed.get('entries')[0].get('published')
if feed.get('entries')[0].get('source'):
ore_metadata['publisher'] = feed.get('entries')[0].get('source').get('author')
ore_metadata['object_identifier'] = [feed.get('entries')[0].get('id')]
if feed.get('entries')[0].get('link'):
ore_metadata['object_identifier'].append(feed.get('entries')[0].get('link'))
if feed.get('entries')[0].get('link'):
pid = feed.get('entries')[0].get('link')
if pid != self.target_url:
ore_metadata['object_identifier'] = feed.get('entries')[0].get('link')
if feed.get('entries')[0].get('links'):
ore_metadata['object_content_identifier'] = []
for link in feed.get('entries')[0].get('links'):
if 'ore/terms/aggregates' in str(link.get('rel')):
ore_metadata['object_content_identifier'].append({
'url': str(link.get('href')),
'type': str(link.get('type')),
'size': str(link.get('length'))
})
except Exception as err:
#print(err.with_traceback())
self.logger.info('FsF-F2-01M : Failed to parse OAI ORE XML -: {}'.format(err))
else:
self.logger.info('FsF-F2-01M : Could not identify OAI ORE metadata')
return self.source_name, ore_metadata
| 43.962963 | 135 | 0.555392 | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from fuji_server.helper.metadata_collector import MetaDataCollector
import feedparser
class MetaDataCollectorOreAtom(MetaDataCollector):
"""
A class to collect the Object Reuse and Exchange (ORE) Atom metadata from the data. This class is child class of MetadataCollector.
...
Attributes
----------
source_name : str
Source name of metadata
target_url : str
Target URL of the metadata
Methods
--------
parse_metadata()
Method to parse the ORE Atom metadata from the data.
"""
source_name = None
def __init__(self, loggerinst, target_url):
"""
Parameters
----------
loggerinst : logging.Logger
Logger instance
target_url : str
Target URL
"""
#self.is_pid = ispid
self.target_url = target_url
super().__init__(logger=loggerinst)
def parse_metadata(self):
"""Parse the ORE Atom metadata from the data
Returns
------
str
a string of source name
dict
a dictionary of ORE Atom metadata
"""
ore_metadata = {}
if self.target_url:
self.source_name = self.getEnumSourceNames().OAI_ORE.value
try:
feed = feedparser.parse(self.target_url)
if feed:
if feed.get('entries'):
if len(feed.get('entries')) == 1:
ore_metadata['title'] = feed.get('entries')[0].get('title')
ore_metadata['creator'] = feed.get('entries')[0].get('author')
ore_metadata['publisher'] = feed.get('entries')[0].get('source')
ore_metadata['publication_date'] = feed.get('entries')[0].get('published')
if feed.get('entries')[0].get('source'):
ore_metadata['publisher'] = feed.get('entries')[0].get('source').get('author')
ore_metadata['object_identifier'] = [feed.get('entries')[0].get('id')]
if feed.get('entries')[0].get('link'):
ore_metadata['object_identifier'].append(feed.get('entries')[0].get('link'))
if feed.get('entries')[0].get('link'):
pid = feed.get('entries')[0].get('link')
if pid != self.target_url:
ore_metadata['object_identifier'] = feed.get('entries')[0].get('link')
if feed.get('entries')[0].get('links'):
ore_metadata['object_content_identifier'] = []
for link in feed.get('entries')[0].get('links'):
if 'ore/terms/aggregates' in str(link.get('rel')):
ore_metadata['object_content_identifier'].append({
'url': str(link.get('href')),
'type': str(link.get('type')),
'size': str(link.get('length'))
})
except Exception as err:
#print(err.with_traceback())
self.logger.info('FsF-F2-01M : Failed to parse OAI ORE XML -: {}'.format(err))
else:
self.logger.info('FsF-F2-01M : Could not identify OAI ORE metadata')
return self.source_name, ore_metadata
| 0 | 0 | 0 |
c69754a59c03c2beaf5c0cb947fd28754581d0cf | 4,683 | py | Python | optimal/tests/algorithms/test_crossentropy.py | Azzukhan/optimal | 75c59742b3942f12b2dfca9b4d1a282e76a394c9 | [
"MIT"
] | 36 | 2017-09-08T14:47:27.000Z | 2022-03-31T02:12:31.000Z | optimal/tests/algorithms/test_crossentropy.py | Azzukhan/optimal | 75c59742b3942f12b2dfca9b4d1a282e76a394c9 | [
"MIT"
] | 1 | 2020-03-03T04:29:57.000Z | 2020-07-06T03:42:56.000Z | optimal/tests/algorithms/test_crossentropy.py | Azzukhan/optimal | 75c59742b3942f12b2dfca9b4d1a282e76a394c9 | [
"MIT"
] | 14 | 2017-10-03T12:45:20.000Z | 2021-12-16T01:48:00.000Z | ###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2014 Justin Lovinger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import pytest
from optimal import problems, optimize, GenAlg
from optimal.algorithms import crossentropy
@pytest.mark.parametrize('solution,pdf,expected', [
([1, 1], [0.5, 0.5], 0.25),
([0, 0], [0.5, 0.5], 0.25),
([0, 0], [0.0, 0.0], 1.0),
([1, 1], [1.0, 1.0], 1.0),
([1, 1, 1], [1.0, 1.0, 1.0], 1.0),
([1, 1, 1], [0.0, 0.0, 0.0], 0.0),
([0, 0, 0], [1.0, 1.0, 1.0], 0.0),
([0, 0, 0], [0.5, 0.5, 0.5], 0.125),
([1, 1, 1], [0.5, 0.5, 0.5], 0.125),
])
@pytest.mark.parametrize('values,q,expected', [
([0.0, 0.5, 1.0], 1, 0.5),
([0.0, 0.5, 1.0], 0, 1.0),
([0.0, 0.5, 1.0], 2, 0.0),
([1.0, 0.5, 0.0], 0, 1.0),
])
@pytest.mark.parametrize('num_values,q,expected', [(10, 1.0, 0), (10, 0.0, 9),
(10, 0.5, 4)])
@pytest.mark.slowtest()
@pytest.mark.slowtest()
| 37.766129 | 81 | 0.638053 | ###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2014 Justin Lovinger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import pytest
from optimal import problems, optimize, GenAlg
from optimal.algorithms import crossentropy
@pytest.mark.parametrize('solution,pdf,expected', [
([1, 1], [0.5, 0.5], 0.25),
([0, 0], [0.5, 0.5], 0.25),
([0, 0], [0.0, 0.0], 1.0),
([1, 1], [1.0, 1.0], 1.0),
([1, 1, 1], [1.0, 1.0, 1.0], 1.0),
([1, 1, 1], [0.0, 0.0, 0.0], 0.0),
([0, 0, 0], [1.0, 1.0, 1.0], 0.0),
([0, 0, 0], [0.5, 0.5, 0.5], 0.125),
([1, 1, 1], [0.5, 0.5, 0.5], 0.125),
])
def test_chance(solution, pdf, expected):
assert crossentropy._chance(solution, pdf) == expected
@pytest.mark.parametrize('values,q,expected', [
([0.0, 0.5, 1.0], 1, 0.5),
([0.0, 0.5, 1.0], 0, 1.0),
([0.0, 0.5, 1.0], 2, 0.0),
([1.0, 0.5, 0.0], 0, 1.0),
])
def test_quantile_cutoff(values, q, expected):
assert crossentropy._get_quantile_cutoff(values, q) == expected
@pytest.mark.parametrize('num_values,q,expected', [(10, 1.0, 0), (10, 0.0, 9),
(10, 0.5, 4)])
def test_get_quantile_offset(num_values, q, expected):
assert crossentropy._get_quantile_offset(num_values, q) == expected
def test_best_pdf():
solutions = [[1, 1], [0, 1], [0, 0]]
fitnesses = [1.0, 0.5, 0.25]
pdfs = [[1.0, 1.0], [0.5, 0.5], [0.0, 0.0]]
assert crossentropy._best_pdf(pdfs, solutions, fitnesses,
0.4) == [1.0, 1.0]
fitnesses = [0.25, 0.5, 1.0]
assert crossentropy._best_pdf(pdfs, solutions, fitnesses,
0.4) == [0.0, 0.0]
fitnesses = [1.0, 0.5, 0.25]
pdfs = [[1.0, 1.0], [0.5, 1.0], [0.0, 0.0]]
assert crossentropy._best_pdf(pdfs, solutions, fitnesses,
0.4) == [0.5, 1.0]
def test_crossentropy_sphere():
optimizer = crossentropy.CrossEntropy(32, population_size=20)
optimizer.optimize(
problems.sphere_binary,
max_iterations=1000,
logging_func=
lambda *args: optimize._print_fitnesses(*args, frequency=100))
assert optimizer.solution_found
@pytest.mark.slowtest()
def test_crossentropy_problems():
# Attempt to solve various problems
# Assert that the optimizer can find the solutions
# NOTE: since crossentropy is not very effective, we give it simpler problems
optimizer = crossentropy.CrossEntropy(32, population_size=20)
optimizer.optimize(
problems.sphere_binary,
max_iterations=1000,
logging_func=
lambda *args: optimize._print_fitnesses(*args, frequency=100))
assert optimizer.solution_found
# TODO: test other functions
@pytest.mark.slowtest()
def test_metaoptimize_crossentropy():
optimizer = crossentropy.CrossEntropy(32)
prev_hyperparameters = optimizer._get_hyperparameters()
# Test without metaoptimize, save iterations to solution
optimizer.optimize(problems.sphere_binary)
iterations_to_solution = optimizer.iteration
# Test with metaoptimize, assert that iterations to solution is lower
optimizer.optimize_hyperparameters(
problems.sphere_binary,
smoothing=1,
max_iterations=1,
_meta_optimizer=GenAlg(None, population_size=2))
optimizer.optimize(problems.sphere_binary)
assert optimizer._get_hyperparameters() != prev_hyperparameters
#assert optimizer.iteration < iterations_to_solution # Improvements are made
| 2,380 | 0 | 156 |
7d939d18a50e10b970f034f3594585342f78e9bf | 3,570 | py | Python | countess/tests/test_module_ambivert_aligner.py | VariantEffect/Enrich2-py3 | 5f8534c8c9259d90d99d70e5bd9140fd0fdc8ea4 | [
"BSD-3-Clause"
] | 4 | 2020-01-14T19:24:07.000Z | 2020-01-16T18:11:35.000Z | countess/tests/test_module_ambivert_aligner.py | VariantEffect/CountESS | 5f8534c8c9259d90d99d70e5bd9140fd0fdc8ea4 | [
"BSD-3-Clause"
] | 3 | 2020-01-01T10:38:15.000Z | 2020-01-03T09:45:41.000Z | countess/tests/test_module_ambivert_aligner.py | VariantEffect/CountESS | 5f8534c8c9259d90d99d70e5bd9140fd0fdc8ea4 | [
"BSD-3-Clause"
] | 1 | 2022-02-20T00:35:24.000Z | 2022-02-20T00:35:24.000Z | import unittest
from ..sequence.aligner import Aligner
if __name__ == "__main__":
unittest.main()
| 34.660194 | 69 | 0.480112 | import unittest
from ..sequence.aligner import Aligner
class TestAlignerModule(unittest.TestCase):
def setUp(self):
self.aligner = Aligner(backend="ambivert")
def tearDown(self):
pass
def test_correct_alignment_insertion(self):
trace = self.aligner.align("ATG", "ACTG")
expected_trace = [
(0, 0, "match", None),
(0, 1, "insertion", 1),
(1, 2, "match", None),
(2, 3, "match", None),
]
self.assertEquals(trace, expected_trace)
def test_correct_alignment_deletion(self):
trace = self.aligner.align("ACTG", "ATG")
expected_trace = [
(0, 0, "match", None),
(1, 0, "deletion", 1),
(2, 1, "match", None),
(3, 2, "match", None),
]
self.assertEquals(trace, expected_trace)
def test_correct_alignment_mismatch(self):
trace = self.aligner.align("ATG", "ACG")
expected_trace = [
(0, 0, "match", None),
(1, 1, "mismatch", None),
(2, 2, "match", None),
]
self.assertEquals(trace, expected_trace)
def test_correct_alignment_exact_match(self):
trace = self.aligner.align("ATG", "ATG")
expected_trace = [
(0, 0, "match", None),
(1, 1, "match", None),
(2, 2, "match", None),
]
self.assertEquals(trace, expected_trace)
def test_typeerror_non_string_input(self):
with self.assertRaises(TypeError):
self.aligner.align(123, "ATG")
with self.assertRaises(TypeError):
self.aligner.align("ATG", 123)
with self.assertRaises(TypeError):
self.aligner.align("ATG", None)
def test_valueerror_empty_input(self):
with self.assertRaises(ValueError):
self.aligner.align("", "ATG")
with self.assertRaises(ValueError):
self.aligner.align("ATG", "")
with self.assertRaises(ValueError):
self.aligner.align("", "")
def test_lower_upper_string_characters_considered_equal(self):
trace = self.aligner.align("ATG", "atg")
expected_trace = [
(0, 0, "match", None),
(1, 1, "match", None),
(2, 2, "match", None),
]
self.assertEquals(trace, expected_trace)
def test_value_error_missing_gap_penalty(self):
simple_similarity = {
"A": {"A": 1, "C": -1, "G": -1, "T": -1, "N": 0, "X": 0},
"C": {"A": -1, "C": 1, "G": -1, "T": -1, "N": 0, "X": 0},
"G": {"A": -1, "C": -1, "G": 1, "T": -1, "N": 0, "X": 0},
"T": {"A": -1, "C": -1, "G": -1, "T": 1, "N": 0, "X": 0},
"N": {"A": 0, "C": 0, "G": 0, "T": 0, "N": 0, "X": 0},
"X": {"A": 0, "C": 0, "G": 0, "T": 0, "N": 0, "X": 0},
}
with self.assertRaises(ValueError):
Aligner(simple_similarity)
def test_value_error_asymmetric_scoring(self):
simple_similarity = {
"A": {"A": 1, "C": -1, "G": -1, "T": -1, "N": 0},
"C": {"A": -1, "C": 1, "G": -1, "T": -1, "N": 0},
"G": {"A": -1, "C": -1, "G": 1, "T": -1, "N": 0, "X": 0},
"T": {"A": -1, "C": -1, "G": -1, "T": 1, "N": 0, "X": 0},
"N": {"A": 0, "C": 0, "G": 0, "T": 0, "N": 0, "X": 0},
"X": {"A": 0, "C": 0, "G": 0, "T": 0, "N": 0, "X": 0},
}
with self.assertRaises(ValueError):
Aligner(simple_similarity)
if __name__ == "__main__":
unittest.main()
| 3,123 | 22 | 319 |
4a1e4997c8f99c301b78532e6e6ac79f8e9dde27 | 5,480 | py | Python | twitterAuthorization/main.py | actlaboratory/twitter-authorization | aa78fd569576d6d81fbf1ace55ad0511e8498b56 | [
"MIT"
] | null | null | null | twitterAuthorization/main.py | actlaboratory/twitter-authorization | aa78fd569576d6d81fbf1ace55ad0511e8498b56 | [
"MIT"
] | null | null | null | twitterAuthorization/main.py | actlaboratory/twitter-authorization | aa78fd569576d6d81fbf1ace55ad0511e8498b56 | [
"MIT"
] | null | null | null | import threading
import wsgiref.util
from wsgiref.simple_server import make_server
import http.server
import socketserver
import socket
import tweepy
import urllib.parse
def server_bind(self):
"""Override server_bind to fix UnicodeDecodeError when computer name has non-ascii characters."""
socketserver.TCPServer.server_bind(self)
host, port = self.server_address[:2]
try:
self.server_name = socket.getfqdn(host)
except UnicodeDecodeError:
self.server_name = "localhost"
self.server_port = port
http.server.HTTPServer.server_bind = server_bind
class _RedirectWSGIApp(object):
"""
WSGI app to handle the authorization redirect.
Stores the request URI and displays the given success message.
"""
def __init__(self, port, hook,failedHook):
"""
Args:
port (int): The port number That receive request
hook (callable): The function when got token
failedHook (callable): The function when authorization failed (ex: disagreed authorize)
"""
self.successMessage="Authorization successful. Close this window and go back to your application."
self.failedMessage="Authorization failed. Please try again."
self.transferMessage="If the screen does not change after a while, open this page in another browser."
self.lang = "ja"
self.port = port
self.hook = hook
self.failedHook = failedHook
def setMessage(self,lang,success,failed,transfer):
"""
Set Message that viewd in browser
Args:
lang (string): The message language code (ex:ja,en,...)
success (string): The success message
failed (string): The failed message
transfer (string): The transfer error message that appear in old or Javascript disabled browser
"""
self.lang=lang
self.successMessage=success
self.failedMessage=failed
self.transferMessage=transfer
def __call__(self, environ, start_response):
"""
Args:
environ (Mapping[str, Any]): The WSGI environment.
start_response (Callable[str, list]): The WSGI start_response
callable.
Returns:
Iterable[bytes]: The response body.
"""
try:
uri = wsgiref.util.request_uri(environ)
query = urllib.parse.urlparse(uri).query
queryDic = urllib.parse.parse_qs(query)
#例外発生しなければ正当なリクエスト
#サーバ側で処理
if query != "":
self.hook(queryDic)
start_response('200 OK', [('Content-type', 'text/html; charset=utf-8')])
response=[("<html lang='"+self.lang+"'><head><title>Authorization result</title><meta charset='utf-8'></head><body>"+self.successMessage+"<script><!--\n").encode('utf-8')]
response.append("window.close()\n".encode("utf-8"))
response.append("--></script></body></html>".encode("utf-8"))
return response
except Exception as e:
if query != "": #favicon.icoなどの不要なリクエストへの対策
self.failedHook()
start_response('400 Bad Request', [('Content-type', 'text/html; charset=utf-8')])
return [("<html lang='"+self.lang+"'><head><title>Authorization result</title><meta charset='utf-8'></head><body>"+self.failedMessage+"</body></html>").encode('utf-8')]
| 30.444444 | 174 | 0.722628 | import threading
import wsgiref.util
from wsgiref.simple_server import make_server
import http.server
import socketserver
import socket
import tweepy
import urllib.parse
def server_bind(self):
"""Override server_bind to fix UnicodeDecodeError when computer name has non-ascii characters."""
socketserver.TCPServer.server_bind(self)
host, port = self.server_address[:2]
try:
self.server_name = socket.getfqdn(host)
except UnicodeDecodeError:
self.server_name = "localhost"
self.server_port = port
http.server.HTTPServer.server_bind = server_bind
class TwitterAuthorization:
def __init__(self,consumerKey,consumerSecret,receivePort):
"""
Args:
consumerKey (string): The consumerKey from Twitter developper portal
consumerSecret (string): The consumerSecret from Twitter developper portal
receivedPort (string): The port number to receive request
"""
self.result=None
self.key = consumerKey
self.secret = consumerSecret
self.port = receivePort
self.localServer = None
#generate request URL
self.tweepy = tweepy.OAuthHandler(self.key, self.secret,"http://localhost:%d" % self.port)
try:
self.url = self.tweepy.get_authorization_url()
except tweepy.TweepError as e:
raise Exception(e)
#start local web server
self.wsgi_app = _RedirectWSGIApp(
self.port,
self._registToken,
self._failedRequest
)
self.localServer = wsgiref.simple_server.make_server("localhost", self.port, self.wsgi_app, handler_class=_WSGIRequestHandler)
thread = threading.Thread(target=self._localServerThread,args=(self.localServer,))
thread.start()
def setMessage(self,lang,success,failed,transfer):
"""
Set Message that viewd in browser
Args:
lang (string): The message language code (ex:ja,en,...)
success (string): The success message
failed (string): The failed message
transfer (string): The transfer error message that appear in old or Javascript disabled browser
"""
self.wsgi_app.setMessage(lang,success,failed,transfer)
def getUrl(self):
"""
Get Authorization url
Returns:
AuthorizationUrl (string)
"""
return self.url
def getToken(self):
"""
Get accesstoken (success), "" (failed) or None (waiting)
If returned "" and the browser stays open, software should close that.
Returns:
tokenData (dict) or None
"""
if self.result!=None:
self.shutdown()
return self.result
def _registToken(self,result):
self.result = self.tweepy.get_access_token(result["oauth_verifier"][0])
#(result["oauth_token"][0]
def _failedRequest(self):
self.result=""
def __del__(self):
self.shutdown()
def shutdown(self):
if self.localServer:
self.localServer.shutdown()
self.localServer=None
def _localServerThread(self,server):
server.serve_forever()
class _WSGIRequestHandler(wsgiref.simple_server.WSGIRequestHandler):
def __init__(self,*args,**argv):
super().__init__(*args,*argv)
#コネクションは毎回切断する
self.close_connection=True
def log_message(self, *args):
#disable logger
pass
class _RedirectWSGIApp(object):
"""
WSGI app to handle the authorization redirect.
Stores the request URI and displays the given success message.
"""
def __init__(self, port, hook,failedHook):
"""
Args:
port (int): The port number That receive request
hook (callable): The function when got token
failedHook (callable): The function when authorization failed (ex: disagreed authorize)
"""
self.successMessage="Authorization successful. Close this window and go back to your application."
self.failedMessage="Authorization failed. Please try again."
self.transferMessage="If the screen does not change after a while, open this page in another browser."
self.lang = "ja"
self.port = port
self.hook = hook
self.failedHook = failedHook
def setMessage(self,lang,success,failed,transfer):
"""
Set Message that viewd in browser
Args:
lang (string): The message language code (ex:ja,en,...)
success (string): The success message
failed (string): The failed message
transfer (string): The transfer error message that appear in old or Javascript disabled browser
"""
self.lang=lang
self.successMessage=success
self.failedMessage=failed
self.transferMessage=transfer
def __call__(self, environ, start_response):
"""
Args:
environ (Mapping[str, Any]): The WSGI environment.
start_response (Callable[str, list]): The WSGI start_response
callable.
Returns:
Iterable[bytes]: The response body.
"""
try:
uri = wsgiref.util.request_uri(environ)
query = urllib.parse.urlparse(uri).query
queryDic = urllib.parse.parse_qs(query)
#例外発生しなければ正当なリクエスト
#サーバ側で処理
if query != "":
self.hook(queryDic)
start_response('200 OK', [('Content-type', 'text/html; charset=utf-8')])
response=[("<html lang='"+self.lang+"'><head><title>Authorization result</title><meta charset='utf-8'></head><body>"+self.successMessage+"<script><!--\n").encode('utf-8')]
response.append("window.close()\n".encode("utf-8"))
response.append("--></script></body></html>".encode("utf-8"))
return response
except Exception as e:
if query != "": #favicon.icoなどの不要なリクエストへの対策
self.failedHook()
start_response('400 Bad Request', [('Content-type', 'text/html; charset=utf-8')])
return [("<html lang='"+self.lang+"'><head><title>Authorization result</title><meta charset='utf-8'></head><body>"+self.failedMessage+"</body></html>").encode('utf-8')]
| 413 | 1,990 | 93 |
4c06ea43b6a000ef12083d1c9c89f27554ea72f6 | 35,045 | py | Python | f-denser/fast_denser/utils.py | fillassuncao/fast-denser3 | 5cb83d1df6b17ec8a40db41d86e4b57b0df90219 | [
"Apache-2.0"
] | 9 | 2019-11-19T12:18:15.000Z | 2021-04-21T17:15:37.000Z | f-denser/fast_denser/utils.py | fillassuncao/fast-denser3 | 5cb83d1df6b17ec8a40db41d86e4b57b0df90219 | [
"Apache-2.0"
] | 1 | 2022-02-09T23:35:00.000Z | 2022-02-09T23:35:00.000Z | f-denser/fast_denser/utils.py | fillassuncao/fast-denser3 | 5cb83d1df6b17ec8a40db41d86e4b57b0df90219 | [
"Apache-2.0"
] | 6 | 2019-11-19T12:21:00.000Z | 2022-03-23T21:58:06.000Z | # Copyright 2019 Filipe Assuncao
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import keras
from keras import backend
from time import time
import tensorflow as tf
import numpy as np
from keras.callbacks import Callback, ModelCheckpoint
import os
from fast_denser.utilities.data import load_dataset
from multiprocessing import Pool
import contextlib
#TODO: future -- impose memory constraints
# tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=50)])
DEBUG = False
class TimedStopping(keras.callbacks.Callback):
"""
Stop training when maximum time has passed.
Code from:
https://github.com/keras-team/keras-contrib/issues/87
Attributes
----------
start_time : float
time when the training started
seconds : float
maximum time before stopping.
verbose : bool
verbosity mode.
Methods
-------
on_train_begin(logs)
method called upon training beginning
on_epoch_end(epoch, logs={})
method called after the end of each training epoch
"""
def __init__(self, seconds=None, verbose=0):
"""
Parameters
----------
seconds : float
maximum time before stopping.
vebose : bool
verbosity mode
"""
super(keras.callbacks.Callback, self).__init__()
self.start_time = 0
self.seconds = seconds
self.verbose = verbose
def on_train_begin(self, logs={}):
"""
Method called upon training beginning
Parameters
----------
logs : dict
training logs
"""
self.start_time = time()
def on_epoch_end(self, epoch, logs={}):
"""
Method called after the end of each training epoch.
Checks if the maximum time has passed
Parameters
----------
epoch : int
current epoch
logs : dict
training logs
"""
if time() - self.start_time > self.seconds:
self.model.stop_training = True
if self.verbose:
print('Stopping after %s seconds.' % self.seconds)
class Evaluator:
"""
Stores the dataset, maps the phenotype into a trainable model, and
evaluates it
Attributes
----------
dataset : dict
dataset instances and partitions
fitness_metric : function
fitness_metric (y_true, y_pred)
y_pred are the confidences
Methods
-------
get_layers(phenotype)
parses the phenotype corresponding to the layers
auxiliary function of the assemble_network function
get_learning(learning)
parses the phenotype corresponding to the learning
auxiliary function of the assemble_optimiser function
assemble_network(keras_layers, input_size)
maps the layers phenotype into a keras model
assemble_optimiser(learning)
maps the learning into a keras optimiser
evaluate(phenotype, load_prev_weights, weights_save_path, parent_weights_path,
train_time, num_epochs, datagen=None, input_size=(32, 32, 3))
evaluates the keras model using the keras optimiser
testing_performance(self, model_path)
compute testing performance of the model
"""
def __init__(self, dataset, fitness_metric):
"""
Creates the Evaluator instance and loads the dataset.
Parameters
----------
dataset : str
dataset to be loaded
"""
self.dataset = load_dataset(dataset)
self.fitness_metric = fitness_metric
def get_layers(self, phenotype):
"""
Parses the phenotype corresponding to the layers.
Auxiliary function of the assemble_network function.
Parameters
----------
phenotye : str
individual layers phenotype
Returns
-------
layers : list
list of tuples (layer_type : str, node properties : dict)
"""
raw_phenotype = phenotype.split(' ')
idx = 0
first = True
node_type, node_val = raw_phenotype[idx].split(':')
layers = []
while idx < len(raw_phenotype):
if node_type == 'layer':
if not first:
layers.append((layer_type, node_properties))
else:
first = False
layer_type = node_val
node_properties = {}
else:
node_properties[node_type] = node_val.split(',')
idx += 1
if idx < len(raw_phenotype):
node_type, node_val = raw_phenotype[idx].split(':')
layers.append((layer_type, node_properties))
return layers
def get_learning(self, learning):
"""
Parses the phenotype corresponding to the learning
Auxiliary function of the assemble_optimiser function
Parameters
----------
learning : str
learning phenotype of the individual
Returns
-------
learning_params : dict
learning parameters
"""
raw_learning = learning.split(' ')
idx = 0
learning_params = {}
while idx < len(raw_learning):
param_name, param_value = raw_learning[idx].split(':')
learning_params[param_name] = param_value.split(',')
idx += 1
for _key_ in sorted(list(learning_params.keys())):
if len(learning_params[_key_]) == 1:
try:
learning_params[_key_] = eval(learning_params[_key_][0])
except NameError:
learning_params[_key_] = learning_params[_key_][0]
return learning_params
def assemble_network(self, keras_layers, input_size):
"""
Maps the layers phenotype into a keras model
Parameters
----------
keras_layers : list
output from get_layers
input_size : tuple
network input shape
Returns
-------
model : keras.models.Model
keras trainable model
"""
#input layer
inputs = keras.layers.Input(shape=input_size)
#Create layers -- ADD NEW LAYERS HERE
layers = []
for layer_type, layer_params in keras_layers:
#convolutional layer
if layer_type == 'conv':
conv_layer = keras.layers.Conv2D(filters=int(layer_params['num-filters'][0]),
kernel_size=(int(layer_params['filter-shape'][0]), int(layer_params['filter-shape'][0])),
strides=(int(layer_params['stride'][0]), int(layer_params['stride'][0])),
padding=layer_params['padding'][0],
activation=layer_params['act'][0],
use_bias=eval(layer_params['bias'][0]),
kernel_initializer='he_normal',
kernel_regularizer=keras.regularizers.l2(0.0005))
layers.append(conv_layer)
#batch-normalisation
elif layer_type == 'batch-norm':
#TODO - check because channels are not first
batch_norm = keras.layers.BatchNormalization()
layers.append(batch_norm)
#average pooling layer
elif layer_type == 'pool-avg':
pool_avg = keras.layers.AveragePooling2D(pool_size=(int(layer_params['kernel-size'][0]), int(layer_params['kernel-size'][0])),
strides=int(layer_params['stride'][0]),
padding=layer_params['padding'][0])
layers.append(pool_avg)
#max pooling layer
elif layer_type == 'pool-max':
pool_max = keras.layers.MaxPooling2D(pool_size=(int(layer_params['kernel-size'][0]), int(layer_params['kernel-size'][0])),
strides=int(layer_params['stride'][0]),
padding=layer_params['padding'][0])
layers.append(pool_max)
#fully-connected layer
elif layer_type == 'fc':
fc = keras.layers.Dense(int(layer_params['num-units'][0]),
activation=layer_params['act'][0],
use_bias=eval(layer_params['bias'][0]),
kernel_initializer='he_normal',
kernel_regularizer=keras.regularizers.l2(0.0005))
layers.append(fc)
#dropout layer
elif layer_type == 'dropout':
dropout = keras.layers.Dropout(rate=min(0.5, float(layer_params['rate'][0])))
layers.append(dropout)
#gru layer #TODO: initializers, recurrent dropout, dropout, unroll, reset_after
elif layer_type == 'gru':
gru = keras.layers.GRU(units=int(layer_params['units'][0]),
activation=layer_params['act'][0],
recurrent_activation=layer_params['rec_act'][0],
use_bias=eval(layer_params['bias'][0]))
layers.append(gru)
#lstm layer #TODO: initializers, recurrent dropout, dropout, unroll, reset_after
elif layer_type == 'lstm':
lstm = keras.layers.LSTM(units=int(layer_params['units'][0]),
activation=layer_params['act'][0],
recurrent_activation=layer_params['rec_act'][0],
use_bias=eval(layer_params['bias'][0]))
layers.append(lstm)
#rnn #TODO: initializers, recurrent dropout, dropout, unroll, reset_after
elif layer_type == 'rnn':
rnn = keras.layers.SimpleRNN(units=int(layer_params['units'][0]),
activation=layer_params['act'][0],
use_bias=eval(layer_params['bias'][0]))
layers.append(rnn)
elif layer_type == 'conv1d': #todo initializer
conv1d = keras.layers.Conv1D(filters=int(layer_params['num-filters'][0]),
kernel_size=int(layer_params['kernel-size'][0]),
strides=int(layer_params['strides'][0]),
padding=layer_params['padding'][0],
activation=layer_params['activation'][0],
use_bias=eval(layer_params['bias'][0]))
layers.add(conv1d)
#END ADD NEW LAYERS
#Connection between layers
for layer in keras_layers:
layer[1]['input'] = list(map(int, layer[1]['input']))
first_fc = True
data_layers = []
invalid_layers = []
for layer_idx, layer in enumerate(layers):
try:
if len(keras_layers[layer_idx][1]['input']) == 1:
if keras_layers[layer_idx][1]['input'][0] == -1:
data_layers.append(layer(inputs))
else:
if keras_layers[layer_idx][0] == 'fc' and first_fc:
first_fc = False
flatten = keras.layers.Flatten()(data_layers[keras_layers[layer_idx][1]['input'][0]])
data_layers.append(layer(flatten))
continue
data_layers.append(layer(data_layers[keras_layers[layer_idx][1]['input'][0]]))
else:
#Get minimum shape: when merging layers all the signals are converted to the minimum shape
minimum_shape = input_size[0]
for input_idx in keras_layers[layer_idx][1]['input']:
if input_idx != -1 and input_idx not in invalid_layers:
if data_layers[input_idx].shape[-3:][0] < minimum_shape:
minimum_shape = int(data_layers[input_idx].shape[-3:][0])
#Reshape signals to the same shape
merge_signals = []
for input_idx in keras_layers[layer_idx][1]['input']:
if input_idx == -1:
if inputs.shape[-3:][0] > minimum_shape:
actual_shape = int(inputs.shape[-3:][0])
merge_signals.append(keras.layers.MaxPooling2D(pool_size=(actual_shape-(minimum_shape-1), actual_shape-(minimum_shape-1)), strides=1)(inputs))
else:
merge_signals.append(inputs)
elif input_idx not in invalid_layers:
if data_layers[input_idx].shape[-3:][0] > minimum_shape:
actual_shape = int(data_layers[input_idx].shape[-3:][0])
merge_signals.append(keras.layers.MaxPooling2D(pool_size=(actual_shape-(minimum_shape-1), actual_shape-(minimum_shape-1)), strides=1)(data_layers[input_idx]))
else:
merge_signals.append(data_layers[input_idx])
if len(merge_signals) == 1:
merged_signal = merge_signals[0]
elif len(merge_signals) > 1:
merged_signal = keras.layers.concatenate(merge_signals)
else:
merged_signal = data_layers[-1]
data_layers.append(layer(merged_signal))
except ValueError as e:
data_layers.append(data_layers[-1])
invalid_layers.append(layer_idx)
if DEBUG:
print(keras_layers[layer_idx][0])
print(e)
model = keras.models.Model(inputs=inputs, outputs=data_layers[-1])
if DEBUG:
model.summary()
return model
def assemble_optimiser(self, learning):
"""
Maps the learning into a keras optimiser
Parameters
----------
learning : dict
output of get_learning
Returns
-------
optimiser : keras.optimizers.Optimizer
keras optimiser that will be later used to train the model
"""
if learning['learning'] == 'rmsprop':
return keras.optimizers.RMSprop(learning_rate = float(learning['lr']),
rho = float(learning['rho']),
decay = float(learning['decay']))
elif learning['learning'] == 'gradient-descent':
return keras.optimizers.SGD(learning_rate = float(learning['lr']),
momentum = float(learning['momentum']),
decay = float(learning['decay']),
nesterov = bool(learning['nesterov']))
elif learning['learning'] == 'adam':
return keras.optimizers.Adam(learning_rate = float(learning['lr']),
beta_1 = float(learning['beta1']),
beta_2 = float(learning['beta2']),
decay = float(learning['decay']))
def evaluate(self, phenotype, load_prev_weights, weights_save_path, parent_weights_path,\
train_time, num_epochs, datagen=None, datagen_test = None, input_size=(32, 32, 3)): #pragma: no cover
"""
Evaluates the keras model using the keras optimiser
Parameters
----------
phenotype : str
individual phenotype
load_prev_weights : bool
resume training from a previous train or not
weights_save_path : str
path where to save the model weights after training
parent_weights_path : str
path to the weights of the previous training
train_time : float
maximum training time
num_epochs : int
maximum number of epochs
datagen : keras.preprocessing.image.ImageDataGenerator
Data augmentation method image data generator
input_size : tuple
dataset input shape
Returns
-------
score_history : dict
training data: loss and accuracy
"""
model_phenotype, learning_phenotype = phenotype.split('learning:')
learning_phenotype = 'learning:'+learning_phenotype.rstrip().lstrip()
model_phenotype = model_phenotype.rstrip().lstrip().replace(' ', ' ')
keras_layers = self.get_layers(model_phenotype)
keras_learning = self.get_learning(learning_phenotype)
batch_size = int(keras_learning['batch_size'])
if load_prev_weights and os.path.exists(parent_weights_path.replace('.hdf5', '.h5')):
model = keras.models.load_model(parent_weights_path.replace('.hdf5', '.h5'))
else:
if load_prev_weights:
num_epochs = 0
model = self.assemble_network(keras_layers, input_size)
opt = self.assemble_optimiser(keras_learning)
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
#early stopping
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=int(keras_learning['early_stop']),
restore_best_weights=True)
#time based stopping
time_stop = TimedStopping(seconds=train_time, verbose=DEBUG)
#save individual with the lowest validation loss
#useful for when traaining is halted because of time
monitor = ModelCheckpoint(weights_save_path, monitor='val_loss',
verbose=DEBUG, save_best_only=True)
trainable_count = model.count_params()
if datagen is not None:
score = model.fit_generator(datagen.flow(self.dataset['evo_x_train'],
self.dataset['evo_y_train'],
batch_size=batch_size),
steps_per_epoch=(self.dataset['evo_x_train'].shape[0]//batch_size),
epochs=int(keras_learning['epochs']),
validation_data=(datagen_test.flow(self.dataset['evo_x_val'], self.dataset['evo_y_val'], batch_size=batch_size)),
validation_steps = (self.dataset['evo_x_val'].shape[0]//batch_size),
callbacks = [early_stop, time_stop, monitor],
initial_epoch = num_epochs,
verbose= DEBUG)
else:
score = model.fit(x = self.dataset['evo_x_train'],
y = self.dataset['evo_y_train'],
batch_size = batch_size,
epochs = int(keras_learning['epochs']),
steps_per_epoch=(self.dataset['evo_x_train'].shape[0]//batch_size),
validation_data=(self.dataset['evo_x_val'], self.dataset['evo_y_val']),
callbacks = [early_stop, time_stop, monitor],
initial_epoch = num_epochs,
verbose = DEBUG)
#save final moodel to file
model.save(weights_save_path.replace('.hdf5', '.h5'))
#measure test performance
if datagen_test is None:
y_pred_test = model.predict(self.dataset['evo_x_test'], batch_size=batch_size, verbose=0)
else:
y_pred_test = model.predict_generator(datagen_test.flow(self.dataset['evo_x_test'], batch_size=100, shuffle=False), steps=self.dataset['evo_x_test'].shape[0]//100, verbose=DEBUG)
accuracy_test = self.fitness_metric(self.dataset['evo_y_test'], y_pred_test)
if DEBUG:
print(phenotype, accuracy_test)
score.history['trainable_parameters'] = trainable_count
score.history['accuracy_test'] = accuracy_test
keras.backend.clear_session()
return score.history
def testing_performance(self, model_path, datagen_test): #pragma: no cover
"""
Compute testing performance of the model
Parameters
----------
model_path : str
Path to the model .h5 file
Returns
-------
accuracy : float
Model accuracy
"""
model = keras.models.load_model(model_path)
if datagen_test is None:
y_pred = model.predict(self.dataset['x_test'])
else:
y_pred = model.predict_generator(datagen_test.flow(self.dataset['x_test'], shuffle=False, batch_size=1))
accuracy = self.fitness_metric(self.dataset['y_test'], y_pred)
return accuracy
def evaluate(args): #pragma: no cover
"""
Function used to deploy a new process to train a candidate solution.
Each candidate solution is trained in a separe process to avoid memory problems.
Parameters
----------
args : tuple
cnn_eval : Evaluator
network evaluator
phenotype : str
individual phenotype
load_prev_weights : bool
resume training from a previous train or not
weights_save_path : str
path where to save the model weights after training
parent_weights_path : str
path to the weights of the previous training
train_time : float
maximum training time
num_epochs : int
maximum number of epochs
Returns
-------
score_history : dict
training data: loss and accuracy
"""
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
cnn_eval, phenotype, load_prev_weights, weights_save_path, parent_weights_path, train_time, num_epochs, datagen, datagen_test = args
try:
return cnn_eval.evaluate(phenotype, load_prev_weights, weights_save_path, parent_weights_path, train_time, num_epochs, datagen, datagen_test)
except tf.errors.ResourceExhaustedError as e:
keras.backend.clear_session()
return None
except TypeError as e2:
keras.backend.clear_session()
return None
class Module:
"""
Each of the units of the outer-level genotype
Attributes
----------
module : str
non-terminal symbol
min_expansions : int
minimum expansions of the block
max_expansions : int
maximum expansions of the block
levels_back : dict
number of previous layers a given layer can receive as input
layers : list
list of layers of the module
connections : dict
list of connetions of each layer
Methods
-------
initialise(grammar, reuse)
Randomly creates a module
"""
def __init__(self, module, min_expansions, max_expansions, levels_back, min_expansins):
"""
Parameters
----------
module : str
non-terminal symbol
min_expansions : int
minimum expansions of the block
max_expansions : int
maximum expansions of the block
levels_back : dict
number of previous layers a given layer can receive as input
"""
self.module = module
self.min_expansions = min_expansins
self.max_expansions = max_expansions
self.levels_back = levels_back
self.layers = []
self.connections = {}
def initialise(self, grammar, reuse, init_max):
"""
Randomly creates a module
Parameters
----------
grammar : Grammar
grammar instace that stores the expansion rules
reuse : float
likelihood of reusing an existing layer
Returns
-------
score_history : dict
training data: loss and accuracy
"""
num_expansions = random.choice(init_max[self.module])
#Initialise layers
for idx in range(num_expansions):
if idx>0 and random.random() <= reuse:
r_idx = random.randint(0, idx-1)
self.layers.append(self.layers[r_idx])
else:
self.layers.append(grammar.initialise(self.module))
#Initialise connections: feed-forward and allowing skip-connections
self.connections = {}
for layer_idx in range(num_expansions):
if layer_idx == 0:
#the -1 layer is the input
self.connections[layer_idx] = [-1,]
else:
connection_possibilities = list(range(max(0, layer_idx-self.levels_back), layer_idx-1))
if len(connection_possibilities) < self.levels_back-1:
connection_possibilities.append(-1)
sample_size = random.randint(0, len(connection_possibilities))
self.connections[layer_idx] = [layer_idx-1]
if sample_size > 0:
self.connections[layer_idx] += random.sample(connection_possibilities, sample_size)
class Individual:
"""
Candidate solution.
Attributes
----------
network_structure : list
ordered list of tuples formated as follows
[(non-terminal, min_expansions, max_expansions), ...]
output_rule : str
output non-terminal symbol
macro_rules : list
list of non-terminals (str) with the marco rules (e.g., learning)
modules : list
list of Modules (genotype) of the layers
output : dict
output rule genotype
macro : list
list of Modules (genotype) for the macro rules
phenotype : str
phenotype of the candidate solution
fitness : float
fitness value of the candidate solution
metrics : dict
training metrics
num_epochs : int
number of performed epochs during training
trainable_parameters : int
number of trainable parameters of the network
time : float
network training time
current_time : float
performed network training time
train_time : float
maximum training time
id : int
individual unique identifier
Methods
-------
initialise(grammar, levels_back, reuse)
Randomly creates a candidate solution
decode(grammar)
Maps the genotype to the phenotype
evaluate(grammar, cnn_eval, weights_save_path, parent_weights_path='')
Performs the evaluation of a candidate solution
"""
def __init__(self, network_structure, macro_rules, output_rule, ind_id):
"""
Parameters
----------
network_structure : list
ordered list of tuples formated as follows
[(non-terminal, min_expansions, max_expansions), ...]
macro_rules : list
list of non-terminals (str) with the marco rules (e.g., learning)
output_rule : str
output non-terminal symbol
ind_id : int
individual unique identifier
"""
self.network_structure = network_structure
self.output_rule = output_rule
self.macro_rules = macro_rules
self.modules = []
self.output = None
self.macro = []
self.phenotype = None
self.fitness = None
self.metrics = None
self.num_epochs = 0
self.trainable_parameters = None
self.time = None
self.current_time = 0
self.train_time = 0
self.id = ind_id
def initialise(self, grammar, levels_back, reuse, init_max):
"""
Randomly creates a candidate solution
Parameters
----------
grammar : Grammar
grammar instaces that stores the expansion rules
levels_back : dict
number of previous layers a given layer can receive as input
reuse : float
likelihood of reusing an existing layer
Returns
-------
candidate_solution : Individual
randomly created candidate solution
"""
for non_terminal, min_expansions, max_expansions in self.network_structure:
new_module = Module(non_terminal, min_expansions, max_expansions, levels_back[non_terminal], min_expansions)
new_module.initialise(grammar, reuse, init_max)
self.modules.append(new_module)
#Initialise output
self.output = grammar.initialise(self.output_rule)
# Initialise the macro structure: learning, data augmentation, etc.
for rule in self.macro_rules:
self.macro.append(grammar.initialise(rule))
return self
def decode(self, grammar):
"""
Maps the genotype to the phenotype
Parameters
----------
grammar : Grammar
grammar instaces that stores the expansion rules
Returns
-------
phenotype : str
phenotype of the individual to be used in the mapping to the keras model.
"""
phenotype = ''
offset = 0
layer_counter = 0
for module in self.modules:
offset = layer_counter
for layer_idx, layer_genotype in enumerate(module.layers):
layer_counter += 1
phenotype += ' ' + grammar.decode(module.module, layer_genotype)+ ' input:'+",".join(map(str, np.array(module.connections[layer_idx])+offset))
phenotype += ' '+grammar.decode(self.output_rule, self.output)+' input:'+str(layer_counter-1)
for rule_idx, macro_rule in enumerate(self.macro_rules):
phenotype += ' '+grammar.decode(macro_rule, self.macro[rule_idx])
self.phenotype = phenotype.rstrip().lstrip()
return self.phenotype
def evaluate(self, grammar, cnn_eval, datagen, datagen_test, weights_save_path, parent_weights_path=''): #pragma: no cover
"""
Performs the evaluation of a candidate solution
Parameters
----------
grammar : Grammar
grammar instaces that stores the expansion rules
cnn_eval : Evaluator
Evaluator instance used to train the networks
datagen : keras.preprocessing.image.ImageDataGenerator
Data augmentation method image data generator
weights_save_path : str
path where to save the model weights after training
parent_weights_path : str
path to the weights of the previous training
Returns
-------
fitness : float
quality of the candidate solutions
"""
phenotype = self.decode(grammar)
start = time()
load_prev_weights = True
if self.current_time == 0:
load_prev_weights = False
train_time = self.train_time - self.current_time
num_pool_workers=1
with contextlib.closing(Pool(num_pool_workers)) as po:
pool_results = po.map_async(evaluate, [(cnn_eval, phenotype, load_prev_weights,\
weights_save_path, parent_weights_path,\
train_time, self.num_epochs, datagen, datagen_test)])
metrics = pool_results.get()[0]
if metrics is not None:
metrics['val_accuracy'] = [i.item() for i in metrics['val_accuracy']]
metrics['loss'] = [i.item() for i in metrics['loss']]
metrics['accuracy'] = [i.item() for i in metrics['accuracy']]
self.metrics = metrics
self.fitness = self.metrics['accuracy_test'].item()
self.num_epochs += len(self.metrics['val_accuracy'])
self.trainable_parameters = self.metrics['trainable_parameters']
self.current_time += (self.train_time-self.current_time)
else:
self.metrics = None
self.fitness = -1
self.num_epochs = 0
self.trainable_parameters = -1
self.current_time = 0
self.time = time() - start
return self.fitness
| 35.363269 | 190 | 0.54119 | # Copyright 2019 Filipe Assuncao
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import keras
from keras import backend
from time import time
import tensorflow as tf
import numpy as np
from keras.callbacks import Callback, ModelCheckpoint
import os
from fast_denser.utilities.data import load_dataset
from multiprocessing import Pool
import contextlib
#TODO: future -- impose memory constraints
# tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=50)])
DEBUG = False
class TimedStopping(keras.callbacks.Callback):
"""
Stop training when maximum time has passed.
Code from:
https://github.com/keras-team/keras-contrib/issues/87
Attributes
----------
start_time : float
time when the training started
seconds : float
maximum time before stopping.
verbose : bool
verbosity mode.
Methods
-------
on_train_begin(logs)
method called upon training beginning
on_epoch_end(epoch, logs={})
method called after the end of each training epoch
"""
def __init__(self, seconds=None, verbose=0):
"""
Parameters
----------
seconds : float
maximum time before stopping.
vebose : bool
verbosity mode
"""
super(keras.callbacks.Callback, self).__init__()
self.start_time = 0
self.seconds = seconds
self.verbose = verbose
def on_train_begin(self, logs={}):
"""
Method called upon training beginning
Parameters
----------
logs : dict
training logs
"""
self.start_time = time()
def on_epoch_end(self, epoch, logs={}):
"""
Method called after the end of each training epoch.
Checks if the maximum time has passed
Parameters
----------
epoch : int
current epoch
logs : dict
training logs
"""
if time() - self.start_time > self.seconds:
self.model.stop_training = True
if self.verbose:
print('Stopping after %s seconds.' % self.seconds)
class Evaluator:
"""
Stores the dataset, maps the phenotype into a trainable model, and
evaluates it
Attributes
----------
dataset : dict
dataset instances and partitions
fitness_metric : function
fitness_metric (y_true, y_pred)
y_pred are the confidences
Methods
-------
get_layers(phenotype)
parses the phenotype corresponding to the layers
auxiliary function of the assemble_network function
get_learning(learning)
parses the phenotype corresponding to the learning
auxiliary function of the assemble_optimiser function
assemble_network(keras_layers, input_size)
maps the layers phenotype into a keras model
assemble_optimiser(learning)
maps the learning into a keras optimiser
evaluate(phenotype, load_prev_weights, weights_save_path, parent_weights_path,
train_time, num_epochs, datagen=None, input_size=(32, 32, 3))
evaluates the keras model using the keras optimiser
testing_performance(self, model_path)
compute testing performance of the model
"""
def __init__(self, dataset, fitness_metric):
"""
Creates the Evaluator instance and loads the dataset.
Parameters
----------
dataset : str
dataset to be loaded
"""
self.dataset = load_dataset(dataset)
self.fitness_metric = fitness_metric
def get_layers(self, phenotype):
"""
Parses the phenotype corresponding to the layers.
Auxiliary function of the assemble_network function.
Parameters
----------
phenotye : str
individual layers phenotype
Returns
-------
layers : list
list of tuples (layer_type : str, node properties : dict)
"""
raw_phenotype = phenotype.split(' ')
idx = 0
first = True
node_type, node_val = raw_phenotype[idx].split(':')
layers = []
while idx < len(raw_phenotype):
if node_type == 'layer':
if not first:
layers.append((layer_type, node_properties))
else:
first = False
layer_type = node_val
node_properties = {}
else:
node_properties[node_type] = node_val.split(',')
idx += 1
if idx < len(raw_phenotype):
node_type, node_val = raw_phenotype[idx].split(':')
layers.append((layer_type, node_properties))
return layers
def get_learning(self, learning):
"""
Parses the phenotype corresponding to the learning
Auxiliary function of the assemble_optimiser function
Parameters
----------
learning : str
learning phenotype of the individual
Returns
-------
learning_params : dict
learning parameters
"""
raw_learning = learning.split(' ')
idx = 0
learning_params = {}
while idx < len(raw_learning):
param_name, param_value = raw_learning[idx].split(':')
learning_params[param_name] = param_value.split(',')
idx += 1
for _key_ in sorted(list(learning_params.keys())):
if len(learning_params[_key_]) == 1:
try:
learning_params[_key_] = eval(learning_params[_key_][0])
except NameError:
learning_params[_key_] = learning_params[_key_][0]
return learning_params
def assemble_network(self, keras_layers, input_size):
"""
Maps the layers phenotype into a keras model
Parameters
----------
keras_layers : list
output from get_layers
input_size : tuple
network input shape
Returns
-------
model : keras.models.Model
keras trainable model
"""
#input layer
inputs = keras.layers.Input(shape=input_size)
#Create layers -- ADD NEW LAYERS HERE
layers = []
for layer_type, layer_params in keras_layers:
#convolutional layer
if layer_type == 'conv':
conv_layer = keras.layers.Conv2D(filters=int(layer_params['num-filters'][0]),
kernel_size=(int(layer_params['filter-shape'][0]), int(layer_params['filter-shape'][0])),
strides=(int(layer_params['stride'][0]), int(layer_params['stride'][0])),
padding=layer_params['padding'][0],
activation=layer_params['act'][0],
use_bias=eval(layer_params['bias'][0]),
kernel_initializer='he_normal',
kernel_regularizer=keras.regularizers.l2(0.0005))
layers.append(conv_layer)
#batch-normalisation
elif layer_type == 'batch-norm':
#TODO - check because channels are not first
batch_norm = keras.layers.BatchNormalization()
layers.append(batch_norm)
#average pooling layer
elif layer_type == 'pool-avg':
pool_avg = keras.layers.AveragePooling2D(pool_size=(int(layer_params['kernel-size'][0]), int(layer_params['kernel-size'][0])),
strides=int(layer_params['stride'][0]),
padding=layer_params['padding'][0])
layers.append(pool_avg)
#max pooling layer
elif layer_type == 'pool-max':
pool_max = keras.layers.MaxPooling2D(pool_size=(int(layer_params['kernel-size'][0]), int(layer_params['kernel-size'][0])),
strides=int(layer_params['stride'][0]),
padding=layer_params['padding'][0])
layers.append(pool_max)
#fully-connected layer
elif layer_type == 'fc':
fc = keras.layers.Dense(int(layer_params['num-units'][0]),
activation=layer_params['act'][0],
use_bias=eval(layer_params['bias'][0]),
kernel_initializer='he_normal',
kernel_regularizer=keras.regularizers.l2(0.0005))
layers.append(fc)
#dropout layer
elif layer_type == 'dropout':
dropout = keras.layers.Dropout(rate=min(0.5, float(layer_params['rate'][0])))
layers.append(dropout)
#gru layer #TODO: initializers, recurrent dropout, dropout, unroll, reset_after
elif layer_type == 'gru':
gru = keras.layers.GRU(units=int(layer_params['units'][0]),
activation=layer_params['act'][0],
recurrent_activation=layer_params['rec_act'][0],
use_bias=eval(layer_params['bias'][0]))
layers.append(gru)
#lstm layer #TODO: initializers, recurrent dropout, dropout, unroll, reset_after
elif layer_type == 'lstm':
lstm = keras.layers.LSTM(units=int(layer_params['units'][0]),
activation=layer_params['act'][0],
recurrent_activation=layer_params['rec_act'][0],
use_bias=eval(layer_params['bias'][0]))
layers.append(lstm)
#rnn #TODO: initializers, recurrent dropout, dropout, unroll, reset_after
elif layer_type == 'rnn':
rnn = keras.layers.SimpleRNN(units=int(layer_params['units'][0]),
activation=layer_params['act'][0],
use_bias=eval(layer_params['bias'][0]))
layers.append(rnn)
elif layer_type == 'conv1d': #todo initializer
conv1d = keras.layers.Conv1D(filters=int(layer_params['num-filters'][0]),
kernel_size=int(layer_params['kernel-size'][0]),
strides=int(layer_params['strides'][0]),
padding=layer_params['padding'][0],
activation=layer_params['activation'][0],
use_bias=eval(layer_params['bias'][0]))
layers.add(conv1d)
#END ADD NEW LAYERS
#Connection between layers
for layer in keras_layers:
layer[1]['input'] = list(map(int, layer[1]['input']))
first_fc = True
data_layers = []
invalid_layers = []
for layer_idx, layer in enumerate(layers):
try:
if len(keras_layers[layer_idx][1]['input']) == 1:
if keras_layers[layer_idx][1]['input'][0] == -1:
data_layers.append(layer(inputs))
else:
if keras_layers[layer_idx][0] == 'fc' and first_fc:
first_fc = False
flatten = keras.layers.Flatten()(data_layers[keras_layers[layer_idx][1]['input'][0]])
data_layers.append(layer(flatten))
continue
data_layers.append(layer(data_layers[keras_layers[layer_idx][1]['input'][0]]))
else:
#Get minimum shape: when merging layers all the signals are converted to the minimum shape
minimum_shape = input_size[0]
for input_idx in keras_layers[layer_idx][1]['input']:
if input_idx != -1 and input_idx not in invalid_layers:
if data_layers[input_idx].shape[-3:][0] < minimum_shape:
minimum_shape = int(data_layers[input_idx].shape[-3:][0])
#Reshape signals to the same shape
merge_signals = []
for input_idx in keras_layers[layer_idx][1]['input']:
if input_idx == -1:
if inputs.shape[-3:][0] > minimum_shape:
actual_shape = int(inputs.shape[-3:][0])
merge_signals.append(keras.layers.MaxPooling2D(pool_size=(actual_shape-(minimum_shape-1), actual_shape-(minimum_shape-1)), strides=1)(inputs))
else:
merge_signals.append(inputs)
elif input_idx not in invalid_layers:
if data_layers[input_idx].shape[-3:][0] > minimum_shape:
actual_shape = int(data_layers[input_idx].shape[-3:][0])
merge_signals.append(keras.layers.MaxPooling2D(pool_size=(actual_shape-(minimum_shape-1), actual_shape-(minimum_shape-1)), strides=1)(data_layers[input_idx]))
else:
merge_signals.append(data_layers[input_idx])
if len(merge_signals) == 1:
merged_signal = merge_signals[0]
elif len(merge_signals) > 1:
merged_signal = keras.layers.concatenate(merge_signals)
else:
merged_signal = data_layers[-1]
data_layers.append(layer(merged_signal))
except ValueError as e:
data_layers.append(data_layers[-1])
invalid_layers.append(layer_idx)
if DEBUG:
print(keras_layers[layer_idx][0])
print(e)
model = keras.models.Model(inputs=inputs, outputs=data_layers[-1])
if DEBUG:
model.summary()
return model
def assemble_optimiser(self, learning):
"""
Maps the learning into a keras optimiser
Parameters
----------
learning : dict
output of get_learning
Returns
-------
optimiser : keras.optimizers.Optimizer
keras optimiser that will be later used to train the model
"""
if learning['learning'] == 'rmsprop':
return keras.optimizers.RMSprop(learning_rate = float(learning['lr']),
rho = float(learning['rho']),
decay = float(learning['decay']))
elif learning['learning'] == 'gradient-descent':
return keras.optimizers.SGD(learning_rate = float(learning['lr']),
momentum = float(learning['momentum']),
decay = float(learning['decay']),
nesterov = bool(learning['nesterov']))
elif learning['learning'] == 'adam':
return keras.optimizers.Adam(learning_rate = float(learning['lr']),
beta_1 = float(learning['beta1']),
beta_2 = float(learning['beta2']),
decay = float(learning['decay']))
def evaluate(self, phenotype, load_prev_weights, weights_save_path, parent_weights_path,\
train_time, num_epochs, datagen=None, datagen_test = None, input_size=(32, 32, 3)): #pragma: no cover
"""
Evaluates the keras model using the keras optimiser
Parameters
----------
phenotype : str
individual phenotype
load_prev_weights : bool
resume training from a previous train or not
weights_save_path : str
path where to save the model weights after training
parent_weights_path : str
path to the weights of the previous training
train_time : float
maximum training time
num_epochs : int
maximum number of epochs
datagen : keras.preprocessing.image.ImageDataGenerator
Data augmentation method image data generator
input_size : tuple
dataset input shape
Returns
-------
score_history : dict
training data: loss and accuracy
"""
model_phenotype, learning_phenotype = phenotype.split('learning:')
learning_phenotype = 'learning:'+learning_phenotype.rstrip().lstrip()
model_phenotype = model_phenotype.rstrip().lstrip().replace(' ', ' ')
keras_layers = self.get_layers(model_phenotype)
keras_learning = self.get_learning(learning_phenotype)
batch_size = int(keras_learning['batch_size'])
if load_prev_weights and os.path.exists(parent_weights_path.replace('.hdf5', '.h5')):
model = keras.models.load_model(parent_weights_path.replace('.hdf5', '.h5'))
else:
if load_prev_weights:
num_epochs = 0
model = self.assemble_network(keras_layers, input_size)
opt = self.assemble_optimiser(keras_learning)
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
#early stopping
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=int(keras_learning['early_stop']),
restore_best_weights=True)
#time based stopping
time_stop = TimedStopping(seconds=train_time, verbose=DEBUG)
#save individual with the lowest validation loss
#useful for when traaining is halted because of time
monitor = ModelCheckpoint(weights_save_path, monitor='val_loss',
verbose=DEBUG, save_best_only=True)
trainable_count = model.count_params()
if datagen is not None:
score = model.fit_generator(datagen.flow(self.dataset['evo_x_train'],
self.dataset['evo_y_train'],
batch_size=batch_size),
steps_per_epoch=(self.dataset['evo_x_train'].shape[0]//batch_size),
epochs=int(keras_learning['epochs']),
validation_data=(datagen_test.flow(self.dataset['evo_x_val'], self.dataset['evo_y_val'], batch_size=batch_size)),
validation_steps = (self.dataset['evo_x_val'].shape[0]//batch_size),
callbacks = [early_stop, time_stop, monitor],
initial_epoch = num_epochs,
verbose= DEBUG)
else:
score = model.fit(x = self.dataset['evo_x_train'],
y = self.dataset['evo_y_train'],
batch_size = batch_size,
epochs = int(keras_learning['epochs']),
steps_per_epoch=(self.dataset['evo_x_train'].shape[0]//batch_size),
validation_data=(self.dataset['evo_x_val'], self.dataset['evo_y_val']),
callbacks = [early_stop, time_stop, monitor],
initial_epoch = num_epochs,
verbose = DEBUG)
#save final moodel to file
model.save(weights_save_path.replace('.hdf5', '.h5'))
#measure test performance
if datagen_test is None:
y_pred_test = model.predict(self.dataset['evo_x_test'], batch_size=batch_size, verbose=0)
else:
y_pred_test = model.predict_generator(datagen_test.flow(self.dataset['evo_x_test'], batch_size=100, shuffle=False), steps=self.dataset['evo_x_test'].shape[0]//100, verbose=DEBUG)
accuracy_test = self.fitness_metric(self.dataset['evo_y_test'], y_pred_test)
if DEBUG:
print(phenotype, accuracy_test)
score.history['trainable_parameters'] = trainable_count
score.history['accuracy_test'] = accuracy_test
keras.backend.clear_session()
return score.history
def testing_performance(self, model_path, datagen_test): #pragma: no cover
"""
Compute testing performance of the model
Parameters
----------
model_path : str
Path to the model .h5 file
Returns
-------
accuracy : float
Model accuracy
"""
model = keras.models.load_model(model_path)
if datagen_test is None:
y_pred = model.predict(self.dataset['x_test'])
else:
y_pred = model.predict_generator(datagen_test.flow(self.dataset['x_test'], shuffle=False, batch_size=1))
accuracy = self.fitness_metric(self.dataset['y_test'], y_pred)
return accuracy
def evaluate(args): #pragma: no cover
"""
Function used to deploy a new process to train a candidate solution.
Each candidate solution is trained in a separe process to avoid memory problems.
Parameters
----------
args : tuple
cnn_eval : Evaluator
network evaluator
phenotype : str
individual phenotype
load_prev_weights : bool
resume training from a previous train or not
weights_save_path : str
path where to save the model weights after training
parent_weights_path : str
path to the weights of the previous training
train_time : float
maximum training time
num_epochs : int
maximum number of epochs
Returns
-------
score_history : dict
training data: loss and accuracy
"""
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
cnn_eval, phenotype, load_prev_weights, weights_save_path, parent_weights_path, train_time, num_epochs, datagen, datagen_test = args
try:
return cnn_eval.evaluate(phenotype, load_prev_weights, weights_save_path, parent_weights_path, train_time, num_epochs, datagen, datagen_test)
except tf.errors.ResourceExhaustedError as e:
keras.backend.clear_session()
return None
except TypeError as e2:
keras.backend.clear_session()
return None
class Module:
"""
Each of the units of the outer-level genotype
Attributes
----------
module : str
non-terminal symbol
min_expansions : int
minimum expansions of the block
max_expansions : int
maximum expansions of the block
levels_back : dict
number of previous layers a given layer can receive as input
layers : list
list of layers of the module
connections : dict
list of connetions of each layer
Methods
-------
initialise(grammar, reuse)
Randomly creates a module
"""
def __init__(self, module, min_expansions, max_expansions, levels_back, min_expansins):
"""
Parameters
----------
module : str
non-terminal symbol
min_expansions : int
minimum expansions of the block
max_expansions : int
maximum expansions of the block
levels_back : dict
number of previous layers a given layer can receive as input
"""
self.module = module
self.min_expansions = min_expansins
self.max_expansions = max_expansions
self.levels_back = levels_back
self.layers = []
self.connections = {}
def initialise(self, grammar, reuse, init_max):
"""
Randomly creates a module
Parameters
----------
grammar : Grammar
grammar instace that stores the expansion rules
reuse : float
likelihood of reusing an existing layer
Returns
-------
score_history : dict
training data: loss and accuracy
"""
num_expansions = random.choice(init_max[self.module])
#Initialise layers
for idx in range(num_expansions):
if idx>0 and random.random() <= reuse:
r_idx = random.randint(0, idx-1)
self.layers.append(self.layers[r_idx])
else:
self.layers.append(grammar.initialise(self.module))
#Initialise connections: feed-forward and allowing skip-connections
self.connections = {}
for layer_idx in range(num_expansions):
if layer_idx == 0:
#the -1 layer is the input
self.connections[layer_idx] = [-1,]
else:
connection_possibilities = list(range(max(0, layer_idx-self.levels_back), layer_idx-1))
if len(connection_possibilities) < self.levels_back-1:
connection_possibilities.append(-1)
sample_size = random.randint(0, len(connection_possibilities))
self.connections[layer_idx] = [layer_idx-1]
if sample_size > 0:
self.connections[layer_idx] += random.sample(connection_possibilities, sample_size)
class Individual:
"""
Candidate solution.
Attributes
----------
network_structure : list
ordered list of tuples formated as follows
[(non-terminal, min_expansions, max_expansions), ...]
output_rule : str
output non-terminal symbol
macro_rules : list
list of non-terminals (str) with the marco rules (e.g., learning)
modules : list
list of Modules (genotype) of the layers
output : dict
output rule genotype
macro : list
list of Modules (genotype) for the macro rules
phenotype : str
phenotype of the candidate solution
fitness : float
fitness value of the candidate solution
metrics : dict
training metrics
num_epochs : int
number of performed epochs during training
trainable_parameters : int
number of trainable parameters of the network
time : float
network training time
current_time : float
performed network training time
train_time : float
maximum training time
id : int
individual unique identifier
Methods
-------
initialise(grammar, levels_back, reuse)
Randomly creates a candidate solution
decode(grammar)
Maps the genotype to the phenotype
evaluate(grammar, cnn_eval, weights_save_path, parent_weights_path='')
Performs the evaluation of a candidate solution
"""
def __init__(self, network_structure, macro_rules, output_rule, ind_id):
"""
Parameters
----------
network_structure : list
ordered list of tuples formated as follows
[(non-terminal, min_expansions, max_expansions), ...]
macro_rules : list
list of non-terminals (str) with the marco rules (e.g., learning)
output_rule : str
output non-terminal symbol
ind_id : int
individual unique identifier
"""
self.network_structure = network_structure
self.output_rule = output_rule
self.macro_rules = macro_rules
self.modules = []
self.output = None
self.macro = []
self.phenotype = None
self.fitness = None
self.metrics = None
self.num_epochs = 0
self.trainable_parameters = None
self.time = None
self.current_time = 0
self.train_time = 0
self.id = ind_id
def initialise(self, grammar, levels_back, reuse, init_max):
"""
Randomly creates a candidate solution
Parameters
----------
grammar : Grammar
grammar instaces that stores the expansion rules
levels_back : dict
number of previous layers a given layer can receive as input
reuse : float
likelihood of reusing an existing layer
Returns
-------
candidate_solution : Individual
randomly created candidate solution
"""
for non_terminal, min_expansions, max_expansions in self.network_structure:
new_module = Module(non_terminal, min_expansions, max_expansions, levels_back[non_terminal], min_expansions)
new_module.initialise(grammar, reuse, init_max)
self.modules.append(new_module)
#Initialise output
self.output = grammar.initialise(self.output_rule)
# Initialise the macro structure: learning, data augmentation, etc.
for rule in self.macro_rules:
self.macro.append(grammar.initialise(rule))
return self
def decode(self, grammar):
"""
Maps the genotype to the phenotype
Parameters
----------
grammar : Grammar
grammar instaces that stores the expansion rules
Returns
-------
phenotype : str
phenotype of the individual to be used in the mapping to the keras model.
"""
phenotype = ''
offset = 0
layer_counter = 0
for module in self.modules:
offset = layer_counter
for layer_idx, layer_genotype in enumerate(module.layers):
layer_counter += 1
phenotype += ' ' + grammar.decode(module.module, layer_genotype)+ ' input:'+",".join(map(str, np.array(module.connections[layer_idx])+offset))
phenotype += ' '+grammar.decode(self.output_rule, self.output)+' input:'+str(layer_counter-1)
for rule_idx, macro_rule in enumerate(self.macro_rules):
phenotype += ' '+grammar.decode(macro_rule, self.macro[rule_idx])
self.phenotype = phenotype.rstrip().lstrip()
return self.phenotype
def evaluate(self, grammar, cnn_eval, datagen, datagen_test, weights_save_path, parent_weights_path=''): #pragma: no cover
"""
Performs the evaluation of a candidate solution
Parameters
----------
grammar : Grammar
grammar instaces that stores the expansion rules
cnn_eval : Evaluator
Evaluator instance used to train the networks
datagen : keras.preprocessing.image.ImageDataGenerator
Data augmentation method image data generator
weights_save_path : str
path where to save the model weights after training
parent_weights_path : str
path to the weights of the previous training
Returns
-------
fitness : float
quality of the candidate solutions
"""
phenotype = self.decode(grammar)
start = time()
load_prev_weights = True
if self.current_time == 0:
load_prev_weights = False
train_time = self.train_time - self.current_time
num_pool_workers=1
with contextlib.closing(Pool(num_pool_workers)) as po:
pool_results = po.map_async(evaluate, [(cnn_eval, phenotype, load_prev_weights,\
weights_save_path, parent_weights_path,\
train_time, self.num_epochs, datagen, datagen_test)])
metrics = pool_results.get()[0]
if metrics is not None:
metrics['val_accuracy'] = [i.item() for i in metrics['val_accuracy']]
metrics['loss'] = [i.item() for i in metrics['loss']]
metrics['accuracy'] = [i.item() for i in metrics['accuracy']]
self.metrics = metrics
self.fitness = self.metrics['accuracy_test'].item()
self.num_epochs += len(self.metrics['val_accuracy'])
self.trainable_parameters = self.metrics['trainable_parameters']
self.current_time += (self.train_time-self.current_time)
else:
self.metrics = None
self.fitness = -1
self.num_epochs = 0
self.trainable_parameters = -1
self.current_time = 0
self.time = time() - start
return self.fitness
| 0 | 0 | 0 |
7c232b7f6dc413dd8c126415ffad5cd085e55720 | 895 | py | Python | topomc/symbols/earthhill.py | AB-Spud/topomc | 7349e9d6ceb6e98a4e854e60377837d2bfca1575 | [
"MIT"
] | null | null | null | topomc/symbols/earthhill.py | AB-Spud/topomc | 7349e9d6ceb6e98a4e854e60377837d2bfca1575 | [
"MIT"
] | null | null | null | topomc/symbols/earthhill.py | AB-Spud/topomc | 7349e9d6ceb6e98a4e854e60377837d2bfca1575 | [
"MIT"
] | null | null | null | from topomc.common.coordinates import Coordinates
from topomc.processes.topomap import Depression, Hill, TopoMap
from topomc.symbol import PointSymbol
from topomc import app
| 42.619048 | 95 | 0.620112 | from topomc.common.coordinates import Coordinates
from topomc.processes.topomap import Depression, Hill, TopoMap
from topomc.symbol import PointSymbol
from topomc import app
class EarthHill(PointSymbol):
def __init__(self, processes):
self.topomap = super().__init__(processes, klass=TopoMap)
self.set_properties(color="#BA5E1A")
def render(self):
for isoline in self.topomap.closed_isolines:
if isoline.small_feature and isinstance(isoline, Hill):
if isoline.first_small_feature:
if isoline.depth >= app.settings["Steep features threshold"]:
self.plot(Coordinates(
sum([p.x for p in isoline.vertices]) / len(isoline.vertices) + 0.5,
sum([p.y for p in isoline.vertices]) / len(isoline.vertices) + 0.5
)) | 633 | 8 | 80 |
d57e9e769bcd69bb60e7a40cb86165d570884c10 | 628 | py | Python | kyu_8/find_the_first_non_consecutive_number/first_non_consecutive.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 1 | 2022-02-12T05:56:04.000Z | 2022-02-12T05:56:04.000Z | kyu_8/find_the_first_non_consecutive_number/first_non_consecutive.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 182 | 2020-04-30T00:51:36.000Z | 2021-09-07T04:15:05.000Z | kyu_8/find_the_first_non_consecutive_number/first_non_consecutive.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 4 | 2020-04-29T22:04:20.000Z | 2021-07-13T20:04:14.000Z | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def first_non_consecutive(arr: list):
"""
Find the first element of an array that is not consecutive.
E.g. If we have an array [1,2,3,4,6,7,8] then 1 then 2 then 3
then 4 are all consecutive but 6 is not,
so that's the first non-consecutive number.
If the whole array is consecutive then return null or Nothing.
:param arr:
:return:
"""
for index, n in enumerate(arr):
if index + 1 < len(arr) and n + 1 != arr[index + 1]:
return arr[index + 1]
| 28.545455 | 66 | 0.636943 | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def first_non_consecutive(arr: list):
"""
Find the first element of an array that is not consecutive.
E.g. If we have an array [1,2,3,4,6,7,8] then 1 then 2 then 3
then 4 are all consecutive but 6 is not,
so that's the first non-consecutive number.
If the whole array is consecutive then return null or Nothing.
:param arr:
:return:
"""
for index, n in enumerate(arr):
if index + 1 < len(arr) and n + 1 != arr[index + 1]:
return arr[index + 1]
| 0 | 0 | 0 |
cebc13802b394cdeae6c353cc8e4771e2d2b6a06 | 420 | py | Python | lesson_5/overtime.py | librity/ossu_p4e | 53586f5022416a0d0a107c2511ca6f4836318bd2 | [
"MIT"
] | null | null | null | lesson_5/overtime.py | librity/ossu_p4e | 53586f5022416a0d0a107c2511ca6f4836318bd2 | [
"MIT"
] | null | null | null | lesson_5/overtime.py | librity/ossu_p4e | 53586f5022416a0d0a107c2511ca6f4836318bd2 | [
"MIT"
] | null | null | null | OVERTIME_LIMIT = 40.0
hours = input("Enter Hours:")
hours_f = float(hours)
rate = input("Enter Rate:")
rate_f = float(rate)
pay = computepay(hours_f, rate_f)
print("Pay", pay)
| 16.8 | 39 | 0.683333 | OVERTIME_LIMIT = 40.0
hours = input("Enter Hours:")
hours_f = float(hours)
rate = input("Enter Rate:")
rate_f = float(rate)
def computepay(hours, rate):
if (hours <= OVERTIME_LIMIT):
return hours * rate
overtime_hrs = hours - OVERTIME_LIMIT
normal_hrs = hours - overtime_hrs
pay = normal_hrs * rate
pay += overtime_hrs * 1.5 * rate
return pay
pay = computepay(hours_f, rate_f)
print("Pay", pay)
| 216 | 0 | 23 |
c6fadce755a83c5d1f6e88a47bb7cc998835eaf8 | 400 | py | Python | modules/users/routes.py | jirenmaa/twitter-clone | de211a7d73ef455f5759eba69cdceb4b51f5a9b0 | [
"MIT"
] | 5 | 2021-10-12T06:40:51.000Z | 2022-02-23T13:37:40.000Z | modules/users/routes.py | jirenmaa/twitter-clone | de211a7d73ef455f5759eba69cdceb4b51f5a9b0 | [
"MIT"
] | null | null | null | modules/users/routes.py | jirenmaa/twitter-clone | de211a7d73ef455f5759eba69cdceb4b51f5a9b0 | [
"MIT"
] | 1 | 2022-02-02T22:36:00.000Z | 2022-02-02T22:36:00.000Z | from django.urls import path
from modules.users.index import (
user_info,
user_tweets,
user_medias,
user_replies,
user_likes,
)
urlpatterns = [
path("", user_info, name="info"),
path("tweets", user_tweets, name="tweets"),
path("medias", user_medias, name="medias"),
path("likes", user_likes, name="likes"),
path("comments", user_replies, name="comments"),
]
| 22.222222 | 52 | 0.655 | from django.urls import path
from modules.users.index import (
user_info,
user_tweets,
user_medias,
user_replies,
user_likes,
)
urlpatterns = [
path("", user_info, name="info"),
path("tweets", user_tweets, name="tweets"),
path("medias", user_medias, name="medias"),
path("likes", user_likes, name="likes"),
path("comments", user_replies, name="comments"),
]
| 0 | 0 | 0 |
814608889206cded837b3a999c20a64def082823 | 6,291 | py | Python | booking/raft_states.py | johnstcn/cs7ns6groupF | df073591e9558c4734a602f809545e7e64a5985c | [
"MIT"
] | null | null | null | booking/raft_states.py | johnstcn/cs7ns6groupF | df073591e9558c4734a602f809545e7e64a5985c | [
"MIT"
] | null | null | null | booking/raft_states.py | johnstcn/cs7ns6groupF | df073591e9558c4734a602f809545e7e64a5985c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import datetime
import json
import logging
import os
from typing import Optional, List, Dict
from raft_peer import Peer
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
class LeaderVolatileState(object):
"""
Volatile state on leaders: (Reinitialized after election)
nextIndex[]: for each server, index of the next log entry to send to that server (initialized to leader last log index + 1)
matchIndex[]: for each server, index of highest log entry known to be replicated on server (initialized to 0, increases monotonically)
"""
class NodeVolatileState(object):
"""
Volatile state on all servers:
commitIndex: index of highest log entry known to be committed (initialized to 0, increases monotonically)
lastApplied: index of highest log entry applied to state machine (initialized to 0, increases monotonically)
"""
class NodePersistentState(object):
"""
Persistent state on all servers: (Updated on stable storage before responding to RPCs)
currentTerm: latest term server has seen (initialized to 0 on first boot, increases monotonically)
votedFor: candidateId that received vote in current term (or null if none)
log[]: log entries; each entry contains command for state machine, and term when entry was received by leader (first index is 1)
"""
@classmethod
def load(cls, fpath):
"""
load persistent state from a file
:param fpath: path of state. Created if it does not already exist.
"""
if not os.path.exists(fpath):
open(fpath, 'a').close()
with open(fpath, 'r') as f:
json_str = f.read()
json_obj = json.loads(json_str or '{}')
current_term = json_obj.get('current_term', 0)
voted_for = json_obj.get('voted_for', None)
logs = []
for l in json_obj.get('logs', []):
entry = Entry.from_bytes(bytes(l, encoding='utf-8'))
logs.append(entry)
return NodePersistentState(fpath, current_term, voted_for, logs)
class BookingData(object):
"""
BookingData represents a room booking to be stored in the Raft log.
"""
@classmethod
class Entry(object):
"""
Entry represents a single log entry.
"""
@classmethod
| 31.298507 | 142 | 0.627881 | #!/usr/bin/env python
import datetime
import json
import logging
import os
from typing import Optional, List, Dict
from raft_peer import Peer
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
class LeaderVolatileState(object):
"""
Volatile state on leaders: (Reinitialized after election)
nextIndex[]: for each server, index of the next log entry to send to that server (initialized to leader last log index + 1)
matchIndex[]: for each server, index of highest log entry known to be replicated on server (initialized to 0, increases monotonically)
"""
def __init__(self, last_log_index: int, known_peers: List[Peer]):
self._next_idx: Dict[Peer, int] = {peer: last_log_index + 1 for peer in known_peers}
self._match_idx: Dict[Peer, int] = {peer: 0 for peer in known_peers}
def set_next_idx(self, k: Peer, v: int):
self._next_idx[k] = v
def get_next_idx(self, k: Peer) -> int:
return self._next_idx[k]
def set_match_idx(self, k: Peer, v: int):
self._match_idx[k] = v
def get_match_idx(self, k: Peer) -> int:
return self._match_idx[k]
def __str__(self):
return "nextIndex:%s matchIndex:%s" % (self._next_idx, self._match_idx)
class NodeVolatileState(object):
"""
Volatile state on all servers:
commitIndex: index of highest log entry known to be committed (initialized to 0, increases monotonically)
lastApplied: index of highest log entry applied to state machine (initialized to 0, increases monotonically)
"""
def __init__(self):
self._commit_idx: int = 0
self._last_applied: int = 0
def get_commit_idx(self) -> int:
return self._commit_idx
def get_last_applied(self) -> int:
return self._last_applied
def set_commit_idx(self, idx: int):
self._commit_idx = idx
def set_last_applied(self, idx: int):
self._last_applied = idx
class NodePersistentState(object):
"""
Persistent state on all servers: (Updated on stable storage before responding to RPCs)
currentTerm: latest term server has seen (initialized to 0 on first boot, increases monotonically)
votedFor: candidateId that received vote in current term (or null if none)
log[]: log entries; each entry contains command for state machine, and term when entry was received by leader (first index is 1)
"""
@classmethod
def load(cls, fpath):
"""
load persistent state from a file
:param fpath: path of state. Created if it does not already exist.
"""
if not os.path.exists(fpath):
open(fpath, 'a').close()
with open(fpath, 'r') as f:
json_str = f.read()
json_obj = json.loads(json_str or '{}')
current_term = json_obj.get('current_term', 0)
voted_for = json_obj.get('voted_for', None)
logs = []
for l in json_obj.get('logs', []):
entry = Entry.from_bytes(bytes(l, encoding='utf-8'))
logs.append(entry)
return NodePersistentState(fpath, current_term, voted_for, logs)
def __init__(self, fpath: str, current_term: int, voted_for: int, logs: List['Entry']):
self._fpath: str = fpath
self._current_term: int = current_term
self._voted_for: int = voted_for
self._logs: List['Entry'] = logs
def __str__(self) -> str:
obj = {
'current_term': self._current_term,
'voted_for': self._voted_for,
'logs': [str(l) for l in self._logs],
}
return json.dumps(obj)
def get_term(self) -> int:
return self._current_term
def set_term(self, new_term):
self._current_term = new_term
self._save()
def increment_term(self) -> int:
self._current_term += 1
self._save()
return self._current_term
def get_voted_for(self) -> Optional[int]:
return self._voted_for
def set_voted_for(self, voted_for=None):
self._voted_for = voted_for
self._save()
def get_logs(self) -> List['Entry']:
return self._logs
def append_log(self, log) -> int:
curr_log_idx = len(self._logs)
self._logs.append(log)
self._save()
return curr_log_idx + 1
def set_logs(self, logs):
self._logs = [l for l in logs]
self._save()
def get_last_log(self) -> (int, 'Entry'):
try:
idx = len(self._logs)
return idx, self._logs[idx-1]
except IndexError:
return 0, None
def _save(self):
with open(self._fpath, 'w') as f:
f.write(str(self))
class BookingData(object):
"""
BookingData represents a room booking to be stored in the Raft log.
"""
def __init__(self, room_id: int, booking_time: datetime.datetime):
self._room_id: int = room_id
self._booking_time: datetime.datetime = booking_time
def get_room_id(self) -> int:
return self._room_id
def get_booking_time(self) -> datetime.datetime:
return self._booking_time
@classmethod
def from_bytes(cls, bytes: bytes):
room_id_bytes, booking_time_bytes = bytes.split(b' ', maxsplit=2)
room_id = int(room_id_bytes)
booking_time = datetime.datetime.utcfromtimestamp(int(booking_time_bytes))
return BookingData(room_id, booking_time)
def __str__(self):
return '%d %d' % (self._room_id, int(self._booking_time.strftime('%s')))
class Entry(object):
"""
Entry represents a single log entry.
"""
def __init__(self, term: int, data: bytes):
self._term: int = term
self._data: bytes = data
def __bytes__(self) -> bytes:
return b'%d %s' % (self._term, self._data)
def __str__(self) -> str:
return self.__bytes__().decode('utf-8')
def __eq__(self, o: object) -> bool:
if not isinstance(o, Entry):
return False
return (self._term == o._term) and (self._data == o._data)
@classmethod
def from_bytes(self, _bytes):
term_bytes, data_bytes = _bytes.split(b' ', maxsplit=1)
term = int(term_bytes)
return Entry(term, data_bytes)
| 3,037 | 0 | 889 |
5db3762b036c1daaae3e45b6c4a15f286f61a8be | 192 | py | Python | skdd/main.py | rlurye/skdd | 2366d155e78dabed991e4c11f93c399f797ca5e3 | [
"MIT"
] | null | null | null | skdd/main.py | rlurye/skdd | 2366d155e78dabed991e4c11f93c399f797ca5e3 | [
"MIT"
] | null | null | null | skdd/main.py | rlurye/skdd | 2366d155e78dabed991e4c11f93c399f797ca5e3 | [
"MIT"
] | null | null | null | import sys
import skdd.core as core
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'test.xlsx'
core.analysis(filename)
| 16 | 30 | 0.604167 | import sys
import skdd.core as core
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'test.xlsx'
core.analysis(filename)
| 0 | 0 | 0 |
9cd87f55c8d9956464fe241dfd6cfcefdb1e0f57 | 4,185 | py | Python | Source/SchemaClass.py | leonerdo037/Templatize | 4599b7d4050f63ff9e5095dbbff4e598152f6437 | [
"Apache-2.0"
] | null | null | null | Source/SchemaClass.py | leonerdo037/Templatize | 4599b7d4050f63ff9e5095dbbff4e598152f6437 | [
"Apache-2.0"
] | null | null | null | Source/SchemaClass.py | leonerdo037/Templatize | 4599b7d4050f63ff9e5095dbbff4e598152f6437 | [
"Apache-2.0"
] | null | null | null | import os
import Errors as err
import Settings as props
import FileHandler as fl
import JSONHandler as js
from ProjectClass import Project | 40.631068 | 144 | 0.65687 | import os
import Errors as err
import Settings as props
import FileHandler as fl
import JSONHandler as js
from ProjectClass import Project
class Schema(Project):
homeDIR=os.path.join(os.path.dirname(os.path.realpath(__file__)), "Projects")
schemaMetaData=None
schemaName=None
schemaPath=None
#def __init__(self, projectName, schemaName):
# self.schemaName=schemaName
# super(Schema, self).__init__(projectName)
# self.schemaPath=os.path.join(self.projectPath, schemaName)
def __ValidateArgs(self):
if self.schemaName==None:
raise err.Conflict("Schema arguments are missing !")
return None
@classmethod
def InitSchema(self, projectName=None, schemaName=None):
self.schemaName=schemaName
super(Schema, self).InitProject(projectName)
self.schemaPath=os.path.join(self.projectPath, schemaName)
self.schemaMetaData=os.path.join(self.schemaPath, "metadata.json")
def CreateSchema(self, schemaDescription, groupCount):
self.__ValidateArgs()
try:
if self.OpenSchema() is not None:
raise err.Conflict("A Schema with the name '{0}' already exists !".format(self.schemaName))
except err.Conflict as ex:
if "Unable to find a Project" in str(ex): return None
if "already exists" in str(ex):
raise err.Conflict("A Schema with the name '{0}' already exists !".format(self.schemaName))
return None
# Creating Directory & File
try:
os.makedirs(self.schemaPath)
jsonContent=js.Load(fl.Read(self.projectMetaData))
jsonContent["Schemas"].append(self.schemaName)
fl.Write(self.projectMetaData, js.Dump(jsonContent), True)
# Creating Schema Metadata
jsonContent=js.SchemaJSON(self.schemaName, schemaDescription, groupCount)
fl.Write(self.schemaMetaData, js.Dump(jsonContent), True)
return "Schema '{0}' created successfully !".format(self.schemaName)
except WindowsError:
raise err.Conflict("There are errors in the metadata file. Synchronize the data to fix them !")
except OSError:
raise err.Conflict("There are errors in the metadata file. Synchronize the data to fix them !")
if os.path.exists(self.schemaPath):
os.removedirs(self.schemaPath)
return None
def OpenSchema(self):
self.__ValidateArgs()
schemas=self.GetSchemaList()
# Opening Schema
if self.schemaName in schemas:
return js.Load(fl.Read(self.schemaMetaData))
else:
raise err.Conflict("Unable to find a Schema with the name '{0}'".format(self.schemaName))
return None
def GetSchemaDescription(self):
jsonContent=self.OpenSchema()
return jsonContent["SchemaDescription"]
def GetModuleList(self):
jsonContent=self.OpenSchema()
return jsonContent["Modules"]
def GetTemplateList(self):
jsonContent=self.OpenSchema()
return jsonContent["Templates"]
def GetGroupCount(self):
jsonContent=self.OpenSchema()
return jsonContent["GroupCount"]
def GetSchemaVariables(self):
jsonContent=self.OpenSchema()
return jsonContent["SchemaVariables"]
def CreateSchemaVariable(self, variableName, variableDescription, variableType, variableMode, value=None):
self.__ValidateArgs()
# Setting Value
if variableMode != "Static":
value = None
jsonContent=self.OpenSchema()
# Validating Uniquness
if variableName in jsonContent["SchemaVariables"]:
raise err.Conflict("A Schema Variable with the name '{0}' already exists !".format(variableName))
return None
else:
jsonContent["SchemaVariables"][variableName]=(js.VariableJSON(variableName, variableDescription, variableType, variableMode, value))
fl.Write(self.schemaMetaData, js.Dump(jsonContent), True)
return "Variable '{0}' created successfully !".format(variableName) | 3,371 | 653 | 23 |
19beefd39a42aea178141b1fb59c8bba9338b421 | 4,779 | py | Python | carpet.py | rafaelhenrique/poor-jobs-analyzer | aee101443f2c9b0658bbd9a151c0f15ce0d00dac | [
"MIT"
] | 1 | 2020-10-18T19:08:23.000Z | 2020-10-18T19:08:23.000Z | carpet.py | jm0216/poor-jobs-analyzer | aee101443f2c9b0658bbd9a151c0f15ce0d00dac | [
"MIT"
] | null | null | null | carpet.py | jm0216/poor-jobs-analyzer | aee101443f2c9b0658bbd9a151c0f15ce0d00dac | [
"MIT"
] | 2 | 2020-10-18T19:08:07.000Z | 2021-08-14T14:31:18.000Z | import csv
import os
import re
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
from html.parser import HTMLParser
from operator import itemgetter
from urllib import request
from urllib.error import HTTPError
def csv_to_list(filename: str) -> list:
"""Receive an csv filename and returns rows of file with an list"""
with open(filename) as csv_file:
reader = csv.DictReader(csv_file)
csv_data = [line for line in reader]
return csv_data
def get_files_in_directory(directory: str) -> list:
"""Receive an directory and returns an list of filenames in directory"""
full_filenames = []
for root, dirs, files in os.walk(directory):
for file in files:
filename = os.path.join(root, file)
full_filenames.append(filename)
return full_filenames
def evaluate_job_file(filename: str, metrics: list) -> tuple:
"""
Receive an filename and metrics (list of dicts containing metrics)
and return an poor level and words with match
"""
poor_level = 0
words = []
with open(filename) as file:
content = file.read()
for metric in metrics:
lower_term = metric['Terms'].lower()
pattern = r'\b{}\b'.format(lower_term)
lower_content = content.lower()
if re.search(pattern, lower_content):
poor_level += int(metric['Poor level'])
words.append(metric['Terms'])
return poor_level, words
def order_by_key(results_list: list, order_key: str) -> list:
"""Receive an list of dicts and return ordered list by order_key"""
reordered_results = sorted(results_list, key=itemgetter(order_key))
return reordered_results
def get_pyjob_codes(url='http://www.pyjobs.com.br/', page=1) -> list:
"""Receive and url and page of pyjobs and return list of codes of jobs"""
job_codes = []
full_url = '{}?page={}'.format(url, page)
try:
response = request.urlopen(full_url)
except HTTPError as exc:
print('Error "{}" when get "{}"'.format(exc.msg, full_url))
return job_codes
pattern = r'href="/job/([0-9]+)/"'
for line in response:
decoded_line = line.decode('utf-8')
match = re.search(pattern, decoded_line)
if match:
job_code = match.group(1)
job_codes.append(job_code)
return job_codes
def get_pyjob_content(pyjob_code: str) -> str:
"""Get an pyjob_code and return your description"""
job_url = 'http://www.pyjobs.com.br/job/{}/'
url = job_url.format(pyjob_code)
try:
response = request.urlopen(url)
except HTTPError as exc:
print('Error "{}" when get "{}"'.format(exc.msg, url))
return (pyjob_code, None)
response_content = response.read().decode('utf-8')
parser = ParsePyjobsHTML()
parser.feed(response_content)
return (pyjob_code, parser.parsed_content)
def async_get_pyjob_codes(initial_page: int, final_page: int, max_workers=10) -> list:
"""Get initial_page and final_page of pyjobs and return a list pyjob_codes from pages"""
print('Running async_get_pyjob_codes...')
pyjob_codes = []
pages = range(initial_page, final_page + 1)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
to_do_map = {}
for page in pages:
future = executor.submit(get_pyjob_codes, page=page)
to_do_map[future] = page
done_iter = futures.as_completed(to_do_map)
for future in done_iter:
pyjob_codes += future.result()
return pyjob_codes
def async_get_pyjob_content(pyjob_codes: list, max_workers=10) -> list:
"""Get pyjob_codes, get content of pyjob and return a list of contents"""
print('Running async_get_pyjob_content...')
contets = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
to_do_map = {}
for pyjob_code in pyjob_codes:
future = executor.submit(get_pyjob_content, pyjob_code=pyjob_code)
to_do_map[future] = pyjob_code
done_iter = futures.as_completed(to_do_map)
for future in done_iter:
pyjob_code, content = future.result()
contets.append((pyjob_code, content))
return contets
| 30.634615 | 92 | 0.65746 | import csv
import os
import re
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
from html.parser import HTMLParser
from operator import itemgetter
from urllib import request
from urllib.error import HTTPError
def csv_to_list(filename: str) -> list:
"""Receive an csv filename and returns rows of file with an list"""
with open(filename) as csv_file:
reader = csv.DictReader(csv_file)
csv_data = [line for line in reader]
return csv_data
def get_files_in_directory(directory: str) -> list:
"""Receive an directory and returns an list of filenames in directory"""
full_filenames = []
for root, dirs, files in os.walk(directory):
for file in files:
filename = os.path.join(root, file)
full_filenames.append(filename)
return full_filenames
def evaluate_job_file(filename: str, metrics: list) -> tuple:
"""
Receive an filename and metrics (list of dicts containing metrics)
and return an poor level and words with match
"""
poor_level = 0
words = []
with open(filename) as file:
content = file.read()
for metric in metrics:
lower_term = metric['Terms'].lower()
pattern = r'\b{}\b'.format(lower_term)
lower_content = content.lower()
if re.search(pattern, lower_content):
poor_level += int(metric['Poor level'])
words.append(metric['Terms'])
return poor_level, words
def order_by_key(results_list: list, order_key: str) -> list:
"""Receive an list of dicts and return ordered list by order_key"""
reordered_results = sorted(results_list, key=itemgetter(order_key))
return reordered_results
def get_pyjob_codes(url='http://www.pyjobs.com.br/', page=1) -> list:
"""Receive and url and page of pyjobs and return list of codes of jobs"""
job_codes = []
full_url = '{}?page={}'.format(url, page)
try:
response = request.urlopen(full_url)
except HTTPError as exc:
print('Error "{}" when get "{}"'.format(exc.msg, full_url))
return job_codes
pattern = r'href="/job/([0-9]+)/"'
for line in response:
decoded_line = line.decode('utf-8')
match = re.search(pattern, decoded_line)
if match:
job_code = match.group(1)
job_codes.append(job_code)
return job_codes
class ParsePyjobsHTML(HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parsed_content = ""
self.capture_content = False
def handle_starttag(self, tag, attrs):
if tag == 'section':
self.capture_content = True
def handle_endtag(self, tag):
if tag == 'section':
self.capture_content = False
def handle_data(self, data):
if self.capture_content:
self.parsed_content += data
def get_pyjob_content(pyjob_code: str) -> str:
"""Get an pyjob_code and return your description"""
job_url = 'http://www.pyjobs.com.br/job/{}/'
url = job_url.format(pyjob_code)
try:
response = request.urlopen(url)
except HTTPError as exc:
print('Error "{}" when get "{}"'.format(exc.msg, url))
return (pyjob_code, None)
response_content = response.read().decode('utf-8')
parser = ParsePyjobsHTML()
parser.feed(response_content)
return (pyjob_code, parser.parsed_content)
def async_get_pyjob_codes(initial_page: int, final_page: int, max_workers=10) -> list:
"""Get initial_page and final_page of pyjobs and return a list pyjob_codes from pages"""
print('Running async_get_pyjob_codes...')
pyjob_codes = []
pages = range(initial_page, final_page + 1)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
to_do_map = {}
for page in pages:
future = executor.submit(get_pyjob_codes, page=page)
to_do_map[future] = page
done_iter = futures.as_completed(to_do_map)
for future in done_iter:
pyjob_codes += future.result()
return pyjob_codes
def async_get_pyjob_content(pyjob_codes: list, max_workers=10) -> list:
"""Get pyjob_codes, get content of pyjob and return a list of contents"""
print('Running async_get_pyjob_content...')
contets = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
to_do_map = {}
for pyjob_code in pyjob_codes:
future = executor.submit(get_pyjob_content, pyjob_code=pyjob_code)
to_do_map[future] = pyjob_code
done_iter = futures.as_completed(to_do_map)
for future in done_iter:
pyjob_code, content = future.result()
contets.append((pyjob_code, content))
return contets
| 371 | 13 | 130 |
b9c5a166e7812fd36719f27baba5cd7863731b68 | 970 | py | Python | lib/parsers/udger-tests/udgerWrap.py | levabd/http-ml-antifraud | c511c8b2cc760088298dec90ed43816c17d2bb82 | [
"MIT"
] | null | null | null | lib/parsers/udger-tests/udgerWrap.py | levabd/http-ml-antifraud | c511c8b2cc760088298dec90ed43816c17d2bb82 | [
"MIT"
] | null | null | null | lib/parsers/udger-tests/udgerWrap.py | levabd/http-ml-antifraud | c511c8b2cc760088298dec90ed43816c17d2bb82 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
from lib import Udger
def is_crawler(client_ip):
"""
:return: crawler or not
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
udger = Udger(data_dir)
return True if udger.parse_ip(client_ip)['ip_classification_code'] == 'crawler' else False
def get_ua(client_ua):
"""
:return: dict {ua_family_code, ua_version, ua_class_code, device_class_code,
os_family_code, os_code}
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
udger = Udger(data_dir)
result = {}
ua_obj = udger.parse_ua(client_ua)
result['ua_family_code'] = ua_obj['ua_family_code']
result['ua_version'] = ua_obj['ua_version']
result['ua_class_code'] = ua_obj['ua_class_code']
result['device_class_code'] = ua_obj['device_class_code']
result['os_family_code'] = ua_obj['os_family_code']
result['os_code'] = ua_obj['os_code']
return result
| 24.25 | 94 | 0.665979 | #!/usr/bin/env python
import os
from lib import Udger
def is_crawler(client_ip):
"""
:return: crawler or not
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
udger = Udger(data_dir)
return True if udger.parse_ip(client_ip)['ip_classification_code'] == 'crawler' else False
def get_ua(client_ua):
"""
:return: dict {ua_family_code, ua_version, ua_class_code, device_class_code,
os_family_code, os_code}
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
udger = Udger(data_dir)
result = {}
ua_obj = udger.parse_ua(client_ua)
result['ua_family_code'] = ua_obj['ua_family_code']
result['ua_version'] = ua_obj['ua_version']
result['ua_class_code'] = ua_obj['ua_class_code']
result['device_class_code'] = ua_obj['device_class_code']
result['os_family_code'] = ua_obj['os_family_code']
result['os_code'] = ua_obj['os_code']
return result
| 0 | 0 | 0 |
ffdff16d2b0207ba135ba0305ab5b0bb11f18acb | 839 | py | Python | tests/test_utils.py | dmitry-viskov/django-lti1.3 | 7474685aecc35d528b944caf05b28ea66ac6d204 | [
"MIT"
] | null | null | null | tests/test_utils.py | dmitry-viskov/django-lti1.3 | 7474685aecc35d528b944caf05b28ea66ac6d204 | [
"MIT"
] | null | null | null | tests/test_utils.py | dmitry-viskov/django-lti1.3 | 7474685aecc35d528b944caf05b28ea66ac6d204 | [
"MIT"
] | null | null | null | import unittest
from pylti1p3.utils import add_param_to_url
| 44.157895 | 108 | 0.690107 | import unittest
from pylti1p3.utils import add_param_to_url
class TestUtils(unittest.TestCase):
def test_add_param_to_url(self):
res = add_param_to_url('https://lms.example.com/class/2923/groups/sets', 'user_id', 123)
self.assertEqual(res, 'https://lms.example.com/class/2923/groups/sets?user_id=123')
res = add_param_to_url('https://lms.example.com/class/2923/groups/sets?some=xxx', 'user_id', 123)
self.assertIn(res, [
'https://lms.example.com/class/2923/groups/sets?some=xxx&user_id=123',
'https://lms.example.com/class/2923/groups/sets?user_id=123&some=xxx'
])
res = add_param_to_url('https://lms.example.com/class/2923/groups/sets?user_id=456', 'user_id', 123)
self.assertEqual(res, 'https://lms.example.com/class/2923/groups/sets?user_id=123')
| 714 | 14 | 50 |
10c19f97339b84034471da0186d6eadfad6e4990 | 840 | py | Python | jaraco/clipboard/__init__.py | jaraco/jaraco.clipboard | 5865bea79b4c24b95b5c7a81665de817427b7049 | [
"MIT"
] | 14 | 2016-12-05T18:21:33.000Z | 2021-02-19T00:17:13.000Z | jaraco/clipboard/__init__.py | jaraco/jaraco.clipboard | 5865bea79b4c24b95b5c7a81665de817427b7049 | [
"MIT"
] | 8 | 2017-12-16T23:58:09.000Z | 2020-11-27T21:10:02.000Z | jaraco/clipboard/__init__.py | jaraco/jaraco.clipboard | 5865bea79b4c24b95b5c7a81665de817427b7049 | [
"MIT"
] | 4 | 2018-03-20T06:30:20.000Z | 2020-02-05T04:19:37.000Z | import platform
import importlib
import itertools
_init()
__all__ = [
name
for name, func in globals().items()
if callable(func) and not name.startswith('_')
]
| 24 | 83 | 0.67619 | import platform
import importlib
import itertools
def _not_implemented(*args, **kwargs):
raise NotImplementedError("format not supported")
def _init():
_platform_mod_name = '.' + platform.system()
_platform_mod = importlib.import_module(_platform_mod_name, 'jaraco.clipboard')
# support copy and paste of text, html, and image.
_modes = 'copy', 'paste'
_formats = 'text', 'html', 'image'
_methods = map('_'.join, itertools.product(_modes, _formats))
for name in _methods:
func = getattr(_platform_mod, name, _not_implemented)
globals().update({name: func})
globals().update(copy=globals()['copy_text'])
globals().update(paste=globals()['paste_text'])
_init()
__all__ = [
name
for name, func in globals().items()
if callable(func) and not name.startswith('_')
]
| 616 | 0 | 46 |
cd0cb1a517da46fdf4ad0572fd64c19255c44bce | 679 | py | Python | c/acerca_de.py | yo-alan/personal | 2f711a9f5dd5a16fbb3ab2a6f9b89069894ce40c | [
"MIT"
] | null | null | null | c/acerca_de.py | yo-alan/personal | 2f711a9f5dd5a16fbb3ab2a6f9b89069894ce40c | [
"MIT"
] | 10 | 2015-01-12T12:57:09.000Z | 2015-03-30T13:39:23.000Z | c/acerca_de.py | yo-alan/personal | 2f711a9f5dd5a16fbb3ab2a6f9b89069894ce40c | [
"MIT"
] | null | null | null | # coding=utf-8
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from v.ui_acerca_de import Ui_Acerca_de
| 18.861111 | 66 | 0.705449 | # coding=utf-8
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from v.ui_acerca_de import Ui_Acerca_de
class Acerca_de(QDialog):
def __init__(self, principal):
QDialog.__init__(self, principal)
self.ui = Ui_Acerca_de()
self.ui.setupUi(self)
self.ui.buttonBox.button(QDialogButtonBox.Ok).setText("Aceptar")
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def mostrar(self):
self.show()
self.center()
def closeEvent(self, event):
pass
def reject(self, ):
self.done(QDialog.Rejected)
def accept(self, ):
self.done(QDialog.Accepted)
| 392 | 4 | 174 |
9069a1163b328a9f4bb1df6280d0604bb5d84102 | 1,544 | py | Python | setup.py | nicolossus/neuromodels | 82f95a8670116ef26b71c02f9c94626c502bc989 | [
"MIT"
] | null | null | null | setup.py | nicolossus/neuromodels | 82f95a8670116ef26b71c02f9c94626c502bc989 | [
"MIT"
] | null | null | null | setup.py | nicolossus/neuromodels | 82f95a8670116ef26b71c02f9c94626c502bc989 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Package meta-data.
NAME = "neuromodels"
DESCRIPTION = "Computational neuroscience models and model tools."
URL = "https://github.com/nicolossus/neuromodels"
EMAIL = "prof.haug@gmail.com"
AUTHOR = "Nicolai Haug"
REQUIRES_PYTHON = '>=3.8.0'
REQUIRES_INSTALL = [
"numpy",
"matplotlib",
"scipy",
"pandas",
"seaborn",
"neo",
"quantities",
"elephant",
"viziphant"
]
REQUIRES_EXTRAS = {
"dev": [
"pytest",
"pytest-cov",
"flake8>=3.9.2",
"isort",
"twine",
],
}
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
about = {}
with open(os.path.join(here, NAME, "__version__.py")) as f:
exec(f.read(), about)
VERSION = about['__version__']
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=["tests", ]),
python_requires=REQUIRES_PYTHON,
install_requires=REQUIRES_INSTALL,
extras_require=REQUIRES_EXTRAS,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 21.444444 | 66 | 0.630829 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Package meta-data.
NAME = "neuromodels"
DESCRIPTION = "Computational neuroscience models and model tools."
URL = "https://github.com/nicolossus/neuromodels"
EMAIL = "prof.haug@gmail.com"
AUTHOR = "Nicolai Haug"
REQUIRES_PYTHON = '>=3.8.0'
REQUIRES_INSTALL = [
"numpy",
"matplotlib",
"scipy",
"pandas",
"seaborn",
"neo",
"quantities",
"elephant",
"viziphant"
]
REQUIRES_EXTRAS = {
"dev": [
"pytest",
"pytest-cov",
"flake8>=3.9.2",
"isort",
"twine",
],
}
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
about = {}
with open(os.path.join(here, NAME, "__version__.py")) as f:
exec(f.read(), about)
VERSION = about['__version__']
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=["tests", ]),
python_requires=REQUIRES_PYTHON,
install_requires=REQUIRES_INSTALL,
extras_require=REQUIRES_EXTRAS,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 0 | 0 | 0 |
d92e1a0cd9b4c7a618b059e56dc9b1efab956c8c | 3,671 | py | Python | coordinatesystem/utility.py | jlawcordova/coordinatesystem | f2cd3b85e4edd4db17c00ed7d45584e997960e83 | [
"MIT"
] | null | null | null | coordinatesystem/utility.py | jlawcordova/coordinatesystem | f2cd3b85e4edd4db17c00ed7d45584e997960e83 | [
"MIT"
] | null | null | null | coordinatesystem/utility.py | jlawcordova/coordinatesystem | f2cd3b85e4edd4db17c00ed7d45584e997960e83 | [
"MIT"
] | null | null | null | """
Contains classes which serve to provide functionalities that handle calculations
involving coordinate systems.
"""
import sys
from math import atan, pi
from coordinatesystem.component import Point2D
from coordinatesystem.component import Line2D
from coordinatesystem.exceptions import ZeroDistanceError
class EquivalentCoordinate:
"""
A class handling conversion from a cartesian coordinate system to another
scaled or rotated equivalent cartesian coordinate system. Two reference points must be
provided for each coordinate system in order to plot a point from the original
coordinate to the equivalent coordinate.
:param origpointref1: First reference point from the original coordinate system.
:param origpointref2: Second reference point from the original coordinate system.
:param equipointref1: First reference point from the equivalent coordinate system.
:param equipointref2: Second reference point from the equivalent coordinate system.
"""
def get_equivalent_point(self, origpoint):
"""
Gets the point in the equivalent coordinate system correponding
to a point the the original coordinate system.
:param origpoint: Point in the original coordinate system.
:returns: Point in the equivalent coordinate system.
"""
origdistance = self.origpointref1.get_distance(origpoint)
origline = Line2D.from_two_points(self.origpointref1, origpoint)
# Get the equivalent distance.
equidistance = self.__distancescale * origdistance
# Get the equivalent angle. Add 180 degrees if the point is
# located below the line which is perpendicular to the original line reference.
anglebetween = origline.get_angle_between(self.__origlineref)
origperpenline = Line2D.from_point_slope(self.__origperpensloperef, self.origpointref1)
if(origperpenline.is_above_point(origpoint) != origperpenline.is_above_point(self.origpointref2)):
anglebetween += pi
equipointrelative = Point2D.from_polar_coordinate(equidistance, anglebetween)
equipointrelative.rotate(atan(-self.__origlineref.slope))
# Reflect if references are reflected.
if(self.__is_reflected_horizontally):
equipointrelative.x *= -1
if(self.__is_reflected_vertically):
equipointrelative.y *= -1
# Offset the first equivalent point reference.
equipoint = self.equipointref1
equipoint.offset(equipointrelative.x, equipointrelative.y)
return equipoint | 43.702381 | 120 | 0.727867 | """
Contains classes which serve to provide functionalities that handle calculations
involving coordinate systems.
"""
import sys
from math import atan, pi
from coordinatesystem.component import Point2D
from coordinatesystem.component import Line2D
from coordinatesystem.exceptions import ZeroDistanceError
class EquivalentCoordinate:
"""
A class handling conversion from a cartesian coordinate system to another
scaled or rotated equivalent cartesian coordinate system. Two reference points must be
provided for each coordinate system in order to plot a point from the original
coordinate to the equivalent coordinate.
:param origpointref1: First reference point from the original coordinate system.
:param origpointref2: Second reference point from the original coordinate system.
:param equipointref1: First reference point from the equivalent coordinate system.
:param equipointref2: Second reference point from the equivalent coordinate system.
"""
def __init__(self, origpointref1, origpointref2, equipointref1, equipointref2):
self.origpointref1 = origpointref1
self.origpointref2 = origpointref2
self.equipointref1 = equipointref1
self.equipointref2 = equipointref2
try:
self.__distancescale = equipointref1.get_distance(equipointref2) / origpointref1.get_distance(origpointref2)
except ZeroDivisionError:
raise ZeroDistanceError()
self.__origlineref = Line2D.from_two_points(origpointref1, origpointref2)
# Get the slope of the line which is perpendicular to the
# original line reference.
if(self.__origlineref.slope != 0):
self.__origperpensloperef = -1 / self.__origlineref.slope
else:
self.__origperpensloperef = -1 / sys.float_info.min
self.__equilineref = Line2D.from_two_points(equipointref1, equipointref2)
self.__is_reflected_horizontally = equipointref1.x > equipointref2.x
self.__is_reflected_vertically = equipointref1.y > equipointref2.y
def get_equivalent_point(self, origpoint):
"""
Gets the point in the equivalent coordinate system correponding
to a point the the original coordinate system.
:param origpoint: Point in the original coordinate system.
:returns: Point in the equivalent coordinate system.
"""
origdistance = self.origpointref1.get_distance(origpoint)
origline = Line2D.from_two_points(self.origpointref1, origpoint)
# Get the equivalent distance.
equidistance = self.__distancescale * origdistance
# Get the equivalent angle. Add 180 degrees if the point is
# located below the line which is perpendicular to the original line reference.
anglebetween = origline.get_angle_between(self.__origlineref)
origperpenline = Line2D.from_point_slope(self.__origperpensloperef, self.origpointref1)
if(origperpenline.is_above_point(origpoint) != origperpenline.is_above_point(self.origpointref2)):
anglebetween += pi
equipointrelative = Point2D.from_polar_coordinate(equidistance, anglebetween)
equipointrelative.rotate(atan(-self.__origlineref.slope))
# Reflect if references are reflected.
if(self.__is_reflected_horizontally):
equipointrelative.x *= -1
if(self.__is_reflected_vertically):
equipointrelative.y *= -1
# Offset the first equivalent point reference.
equipoint = self.equipointref1
equipoint.offset(equipointrelative.x, equipointrelative.y)
return equipoint | 1,056 | 0 | 27 |
84b05be94cc55d868f5cc8fc0ca1115e9d29653e | 503 | py | Python | backendapp/travels/migrations/0038_auto_20200123_1535.py | finebrush/takeatripsFB | 85a5be1a2ee68531f04f2601a3f69ddc608d4d27 | [
"BSD-3-Clause"
] | null | null | null | backendapp/travels/migrations/0038_auto_20200123_1535.py | finebrush/takeatripsFB | 85a5be1a2ee68531f04f2601a3f69ddc608d4d27 | [
"BSD-3-Clause"
] | 13 | 2020-02-12T03:05:15.000Z | 2022-02-10T14:26:50.000Z | backendapp/travels/migrations/0038_auto_20200123_1535.py | finebrush/takeatripsFB | 85a5be1a2ee68531f04f2601a3f69ddc608d4d27 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.7 on 2020-01-23 06:35
from django.db import migrations, models
import django.db.models.deletion
| 25.15 | 130 | 0.652087 | # Generated by Django 2.2.7 on 2020-01-23 06:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('travels', '0037_auto_20200115_1738'),
]
operations = [
migrations.AlterField(
model_name='likeit',
name='infotravel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likeits', to='travels.InfoTravel'),
),
]
| 0 | 356 | 23 |