hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eacd81437dbcbc037e8b4df23a508d3943eb12fe | 1,565 | py | Python | restructure_graphs.py | mheydasch/GC_analyzer_helpfiles | 693bca6a451d80db27f434a1482ab1a692533cd3 | [
"MIT"
] | null | null | null | restructure_graphs.py | mheydasch/GC_analyzer_helpfiles | 693bca6a451d80db27f434a1482ab1a692533cd3 | [
"MIT"
] | null | null | null | restructure_graphs.py | mheydasch/GC_analyzer_helpfiles | 693bca6a451d80db27f434a1482ab1a692533cd3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 15:52:16 2019
@author: max
"""
import os
import re
import argparse
import shutil
#%%
if __name__ == '__main__':
args=parseArguments()
path=args.dir
copy_file(path)
print(args)
#%% | 23.014706 | 103 | 0.577636 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 15:52:16 2019
@author: max
"""
import os
import re
import argparse
import shutil
#%%
def parseArguments():
# Define the parser and read arguments
parser = argparse.ArgumentParser(description='Get tags from all files in a directory.')
parser.add_argument('-d', '--dir', type=str, help='The hierarchical cluster folder', required=True)
args = parser.parse_args()
return(args)
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def get_folders(path):
Fovfolders={}
for root, dirs, files in os.walk(path):
for i in files:
if i.endswith('.png'):
graph=os.path.join(root, i)
Fovfolders.update({graph:i})
return Fovfolders
def copy_file(path):
Fovfolders=get_folders(path)
newdir=os.path.join(path, 'Collection')
createFolder(newdir)
for i in Fovfolders:
newloc=os.path.join(newdir, Fovfolders[i])
try:
shutil.copyfile(i, newloc)
print(i, 'copied to', newloc)
except shutil.SameFileError:
print(Fovfolders[i], 'exists already at this location')
if __name__ == '__main__':
args=parseArguments()
path=args.dir
copy_file(path)
print(args)
#%% | 1,060 | 0 | 95 |
4102a635a2bfa689a3def92d450bca6b2ded2ded | 3,833 | py | Python | hardware_state.py | Lukas-Fuchs/alarm | 33dff7e7b09ecd0b073997f43b544d3a8fb2c559 | [
"MIT"
] | null | null | null | hardware_state.py | Lukas-Fuchs/alarm | 33dff7e7b09ecd0b073997f43b544d3a8fb2c559 | [
"MIT"
] | 3 | 2020-12-22T15:53:44.000Z | 2020-12-23T16:09:47.000Z | hardware_state.py | Lukas-Fuchs/alarm | 33dff7e7b09ecd0b073997f43b544d3a8fb2c559 | [
"MIT"
] | null | null | null | import datetime
from datetime import datetime as dt
import threading
from threading import Lock
import os
# pretty simple data class to describe actions to be taken as rule consequences
| 31.162602 | 108 | 0.559092 | import datetime
from datetime import datetime as dt
import threading
from threading import Lock
import os
# pretty simple data class to describe actions to be taken as rule consequences
class action:
# unique identifier for this action; used to reference it further
id = ""
# fifo to write to
target = ""
# what to write to the fifo;
# this will usually be something a process at the other end of the fifo can understand.
value = ""
class hardware_state:
# holds all sensor states, the key being the sensor's id
# and the value the last signal encountered.
sensors = {}
# all sensor identifiers (registered or not) that have been encountered
encountered_sensors = []
# all fifos to get information from
fifos = {}
# all registered actions
actions = {}
# locks that block reading and writing respectively
lock_read = threading.Lock()
lock_write = threading.Lock()
def add_sensor(self, id):
with self.lock_read:
self.sensors[id] = 0
if id not in self.encountered_sensors:
self.encountered_sensors.append(id)
self.save()
def add_fifo(self, ff_name):
with self.lock_read:
try:
fifo_fd = os.open(ff_name, os.O_RDWR)
os.set_blocking(fifo_fd, False)
fifo = os.fdopen(fifo_fd, "rb+", 0)
if fifo:
self.fifos[ff_name] = fifo
self.save()
return True
except IOError as e:
print(e)
return False
return False
def delete_fifo(self, ff_name):
with self.lock_read, self.lock_write:
if ff_name in self.fifos:
self.fifos[ff_name].close()
del self.fifos[ff_name]
self.save()
# This is intended for parallel-thread use.
def write_fifo(self, ff_name, message):
if ff_name in self.fifos:
with self.lock_write:
self.fifos[ff_name].write(bytes(message + "\n", "utf-8"))
self.fifos[ff_name].flush()
def add_action(self, act):
self.actions[act.id] = act
self.save()
def delete_action(self, id):
if id in self.actions:
with self.lock_read, self.lock_write:
del self.actions[id]
self.save()
return True
return False
def perform_action(self, id):
if id in self.actions:
act = self.actions[id]
try:
if act.target in self.fifos:
writing_thread = threading.Thread(target=self.write_fifo, args=(act.target, act.value,))
writing_thread.start()
elif act.target in self.sensors:
int_value = int(act.value)
self.sensors[act.target] = int_value
except:
# there are a number of things that can go wrong here, none of which matter really
pass
return
def clear(self):
with self.lock_read, self.lock_write:
self.sensors = {}
for key in self.fifos:
self.fifos[key].close()
self.fifos = {}
# Saves the hardware state as a sequence of commands that would recreate that state
def save(self):
if self.block_saving:
return
out = open("hardware_state.dat", "w")
for id in self.sensors:
out.write("sensor add " + id + "\n")
for ff_name in self.fifos:
out.write("fifo add " + ff_name + "\n")
for act in self.actions.values():
out.write("action add " + act.id + " " + act.target + " " + act.value + "\n")
out.close()
| 2,508 | 1,092 | 45 |
62851c76623c24d5ae5d1266b1718b9002223aae | 7,088 | py | Python | examples/ConfigActuation/tests/test_config_actuation.py | craig8/volttron | 2a954311d323effa3b79c2a53f6e8c3bb9664e1c | [
"Apache-2.0",
"BSD-2-Clause"
] | 1 | 2020-06-08T16:54:28.000Z | 2020-06-08T16:54:28.000Z | examples/ConfigActuation/tests/test_config_actuation.py | craig8/volttron | 2a954311d323effa3b79c2a53f6e8c3bb9664e1c | [
"Apache-2.0",
"BSD-2-Clause"
] | 8 | 2016-10-07T22:49:28.000Z | 2022-02-23T00:57:58.000Z | examples/ConfigActuation/tests/test_config_actuation.py | craig8/volttron | 2a954311d323effa3b79c2a53f6e8c3bb9664e1c | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""
Pytest test cases for testing actuator agent using rpc calls.
"""
from datetime import datetime, timedelta
import gevent
import gevent.subprocess as subprocess
import pytest
from gevent.subprocess import Popen
from mock import MagicMock
from volttron.platform import get_services_core, get_examples, jsonapi
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.messaging import topics
from volttron.platform.agent.known_identities import PLATFORM_DRIVER, CONFIGURATION_STORE
REQUEST_CANCEL_SCHEDULE = 'request_cancel_schedule'
REQUEST_NEW_SCHEDULE = 'request_new_schedule'
PLATFORM_ACTUATOR = 'platform.actuator'
TEST_AGENT = 'test-agent'
PRIORITY_LOW = 'LOW'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
@pytest.fixture(scope="module")
def publish_agent(request, volttron_instance):
"""
Fixture used for setting up the environment.
1. Creates fake driver configs
2. Starts the master driver agent with the created fake driver agents
3. Starts the actuator agent
4. Creates an instance Agent class for publishing and returns it
:param request: pytest request object
:param volttron_instance: instance of volttron in which test cases are run
:return: an instance of fake agent used for publishing
"""
# Reset master driver config store
cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = process.communicate()
assert process.returncode == 0
# Add master driver configuration files to config store.
cmd = ['volttron-ctl', 'config', 'store',PLATFORM_DRIVER,
'fake.csv', 'fake_unit_testing.csv', '--csv']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
result = process.wait()
assert result == 0
config_name = "devices/fakedriver"
cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
config_name, 'fake_unit_testing.config', '--json']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
result = process.wait()
assert result == 0
# Start the master driver agent which would intern start the fake driver
# using the configs created above
master_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("MasterDriverAgent"),
config_file={},
start=True)
print("agent id: ", master_uuid)
gevent.sleep(2) # wait for the agent to start and start the devices
# Start the actuator agent through which publish agent should communicate
# to fake device. Start the master driver agent which would intern start
# the fake driver using the configs created above
actuator_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("ActuatorAgent"),
config_file=get_services_core("ActuatorAgent/tests/actuator.config"),
start=True)
print("agent id: ", actuator_uuid)
gevent.sleep(2)
example_uuid = volttron_instance.install_agent(
agent_dir=get_examples("ConfigActuation"),
config_file={},
vip_identity="config_actuation")
gevent.sleep(2)
# 3: Start a fake agent to publish to message bus
publish_agent = volttron_instance.build_agent(identity=TEST_AGENT)
capabilities = {'edit_config_store': {'identity': "config_actuation"}}
volttron_instance.add_capabilities(publish_agent.core.publickey, capabilities)
# 4: add a tear down method to stop sqlhistorian agent and the fake agent
# \that published to message bus
request.addfinalizer(stop_agent)
return publish_agent
| 41.694118 | 92 | 0.705277 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""
Pytest test cases for testing actuator agent using rpc calls.
"""
from datetime import datetime, timedelta
import gevent
import gevent.subprocess as subprocess
import pytest
from gevent.subprocess import Popen
from mock import MagicMock
from volttron.platform import get_services_core, get_examples, jsonapi
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.messaging import topics
from volttron.platform.agent.known_identities import PLATFORM_DRIVER, CONFIGURATION_STORE
REQUEST_CANCEL_SCHEDULE = 'request_cancel_schedule'
REQUEST_NEW_SCHEDULE = 'request_new_schedule'
PLATFORM_ACTUATOR = 'platform.actuator'
TEST_AGENT = 'test-agent'
PRIORITY_LOW = 'LOW'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
@pytest.fixture(scope="module")
def publish_agent(request, volttron_instance):
"""
Fixture used for setting up the environment.
1. Creates fake driver configs
2. Starts the master driver agent with the created fake driver agents
3. Starts the actuator agent
4. Creates an instance Agent class for publishing and returns it
:param request: pytest request object
:param volttron_instance: instance of volttron in which test cases are run
:return: an instance of fake agent used for publishing
"""
# Reset master driver config store
cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = process.communicate()
assert process.returncode == 0
# Add master driver configuration files to config store.
cmd = ['volttron-ctl', 'config', 'store',PLATFORM_DRIVER,
'fake.csv', 'fake_unit_testing.csv', '--csv']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
result = process.wait()
assert result == 0
config_name = "devices/fakedriver"
cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
config_name, 'fake_unit_testing.config', '--json']
process = Popen(cmd, env=volttron_instance.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
result = process.wait()
assert result == 0
# Start the master driver agent which would intern start the fake driver
# using the configs created above
master_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("MasterDriverAgent"),
config_file={},
start=True)
print("agent id: ", master_uuid)
gevent.sleep(2) # wait for the agent to start and start the devices
# Start the actuator agent through which publish agent should communicate
# to fake device. Start the master driver agent which would intern start
# the fake driver using the configs created above
actuator_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("ActuatorAgent"),
config_file=get_services_core("ActuatorAgent/tests/actuator.config"),
start=True)
print("agent id: ", actuator_uuid)
gevent.sleep(2)
example_uuid = volttron_instance.install_agent(
agent_dir=get_examples("ConfigActuation"),
config_file={},
vip_identity="config_actuation")
gevent.sleep(2)
# 3: Start a fake agent to publish to message bus
publish_agent = volttron_instance.build_agent(identity=TEST_AGENT)
capabilities = {'edit_config_store': {'identity': "config_actuation"}}
volttron_instance.add_capabilities(publish_agent.core.publickey, capabilities)
# 4: add a tear down method to stop sqlhistorian agent and the fake agent
# \that published to message bus
def stop_agent():
print("In teardown method of module")
volttron_instance.stop_agent(actuator_uuid)
volttron_instance.stop_agent(master_uuid)
volttron_instance.stop_agent(example_uuid)
volttron_instance.remove_agent(actuator_uuid)
volttron_instance.remove_agent(master_uuid)
volttron_instance.remove_agent(example_uuid)
publish_agent.core.stop()
request.addfinalizer(stop_agent)
return publish_agent
def test_thing(publish_agent):
value = publish_agent.vip.rpc.call(PLATFORM_ACTUATOR,
"get_point",
"fakedriver/SampleWritableFloat1").get()
assert value == 10.0
publish_agent.vip.rpc.call(CONFIGURATION_STORE,
"manage_store",
"config_actuation",
"fakedriver",
jsonapi.dumps({"SampleWritableFloat1": 42.0}),
"json").get()
value = publish_agent.vip.rpc.call(PLATFORM_ACTUATOR,
"get_point",
"fakedriver/SampleWritableFloat1").get()
assert value == 42.0
| 1,147 | 0 | 49 |
213dcc7326f3a5b529d6fbffd0b4ff9e0c5c13cf | 746 | py | Python | snakes.py | ZMBailey/resolving-merge-conflicts | b6425491ccd09810eace5218b805134236580f36 | [
"MIT"
] | null | null | null | snakes.py | ZMBailey/resolving-merge-conflicts | b6425491ccd09810eace5218b805134236580f36 | [
"MIT"
] | null | null | null | snakes.py | ZMBailey/resolving-merge-conflicts | b6425491ccd09810eace5218b805134236580f36 | [
"MIT"
] | null | null | null | class Snake:
"""A dangerous and/or harmless serpent."""
pass
class Cobra(Snake):
"""Definitely dangerous, yup."""
def bite(self, other):
"""Deliver a dose of venom."""
if other.immune == False:
other.poisoned == True
other.poison_timer = 10 * self.venom
class BoaConstrictor(Snake):
"""This one gives really good hugs."""
def squeeze(self, other):
"""Give a hug."""
self.sqeezing = true
other.status = grappled
class BoatConstrictor(BoaConstrictor):
"""Loose snakes sink ships?"""
def __init__(self):
"""Create a new BoatConstrictor"""
super().__init__()
self.size = "enormous"
| 21.941176 | 46 | 0.600536 | class Snake:
"""A dangerous and/or harmless serpent."""
pass
class Cobra(Snake):
"""Definitely dangerous, yup."""
def __init__(self,venom):
self.venom = venom
def bite(self, other):
"""Deliver a dose of venom."""
if other.immune == False:
other.poisoned == True
other.poison_timer = 10 * self.venom
class BoaConstrictor(Snake):
"""This one gives really good hugs."""
def squeeze(self, other):
"""Give a hug."""
self.sqeezing = true
other.status = grappled
class BoatConstrictor(BoaConstrictor):
"""Loose snakes sink ships?"""
def __init__(self):
"""Create a new BoatConstrictor"""
super().__init__()
self.size = "enormous"
| 24 | 0 | 30 |
7bb04b77685aaae83c05b0c21390584f7a553a1f | 878 | py | Python | src/piece/DPieceAI.py | fireclawthefox/AnkandoraLight | 05b71e1a2919141cce02cb1aade95fbac682614b | [
"BSD-2-Clause"
] | 3 | 2020-07-31T10:27:06.000Z | 2022-01-11T20:28:55.000Z | src/piece/DPieceAI.py | fireclawthefox/AnkandoraLight | 05b71e1a2919141cce02cb1aade95fbac682614b | [
"BSD-2-Clause"
] | null | null | null | src/piece/DPieceAI.py | fireclawthefox/AnkandoraLight | 05b71e1a2919141cce02cb1aade95fbac682614b | [
"BSD-2-Clause"
] | 1 | 2020-07-30T08:23:28.000Z | 2020-07-30T08:23:28.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Fireclaw the Fox"
__license__ = """
Simplified BSD (BSD 2-Clause) License.
See License.txt or http://opensource.org/licenses/BSD-2-Clause for more info
"""
from direct.distributed.DistributedSmoothNodeAI import DistributedSmoothNodeAI
| 28.322581 | 83 | 0.705011 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Fireclaw the Fox"
__license__ = """
Simplified BSD (BSD 2-Clause) License.
See License.txt or http://opensource.org/licenses/BSD-2-Clause for more info
"""
from direct.distributed.DistributedSmoothNodeAI import DistributedSmoothNodeAI
class DPieceAI(DistributedSmoothNodeAI):
def __init__(self, air):
DistributedSmoothNodeAI.__init__(self, air)
self.modelName = ""
self.player = None
def generate(self):
DistributedSmoothNodeAI.generate(self)
self.startPosHprBroadcast()
def setModel(self, modelName):
self.modelName = modelName
def getModel(self):
return self.modelName
def getNameForNameTag(self):
requesterId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(requesterId, "createNameTag", [self.player.name])
| 409 | 19 | 157 |
b43bf6c3c5e5492074ca4e552ad314193f8fcc46 | 3,941 | py | Python | openslides_backend/action/actions/poll/update.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | openslides_backend/action/actions/poll/update.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | openslides_backend/action/actions/poll/update.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | from typing import Any, Dict
from ....models.models import Poll
from ....shared.exceptions import ActionException
from ....shared.patterns import FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .base import base_check_100_percent_base
@register_action("poll.update")
class PollUpdateAction(UpdateAction):
"""
Action to update a poll.
"""
model = Poll()
schema = DefaultSchema(Poll()).get_update_schema(
optional_properties=[
"pollmethod",
"min_votes_amount",
"max_votes_amount",
"global_yes",
"global_no",
"global_abstain",
"entitled_group_ids",
"title",
"description",
"onehundred_percent_base",
"majority_method",
"votesvalid",
"votesinvalid",
"votescast",
],
additional_optional_fields={
"publish_immediately": {"type": "boolean"},
},
)
| 32.04065 | 86 | 0.543263 | from typing import Any, Dict
from ....models.models import Poll
from ....shared.exceptions import ActionException
from ....shared.patterns import FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .base import base_check_100_percent_base
@register_action("poll.update")
class PollUpdateAction(UpdateAction):
"""
Action to update a poll.
"""
model = Poll()
schema = DefaultSchema(Poll()).get_update_schema(
optional_properties=[
"pollmethod",
"min_votes_amount",
"max_votes_amount",
"global_yes",
"global_no",
"global_abstain",
"entitled_group_ids",
"title",
"description",
"onehundred_percent_base",
"majority_method",
"votesvalid",
"votesinvalid",
"votescast",
],
additional_optional_fields={
"publish_immediately": {"type": "boolean"},
},
)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
poll = self.datastore.get(
FullQualifiedId(self.model.collection, instance["id"]), ["state", "type"]
)
state_change = self.check_state_change(instance, poll)
self.check_100_percent_base(instance)
not_allowed = []
if not poll.get("state") == Poll.STATE_CREATED:
for key in (
"pollmethod",
"type",
"min_votes_amount",
"max_votes_amount",
"global_yes",
"global_no",
"global_abstain",
):
if key in instance:
not_allowed.append(key)
if (
poll.get("state") != Poll.STATE_CREATED
or poll.get("type") == Poll.TYPE_ANALOG
):
if "entitled_group_ids" in instance:
not_allowed.append("entitled_group_ids")
if not poll.get("type") == Poll.TYPE_ANALOG:
for key in (
"votesvalid",
"votesinvalid",
"votescast",
):
if key in instance:
not_allowed.append(key)
if not_allowed:
raise ActionException(
"Following options are not allowed in this state and type: "
+ ", ".join(not_allowed)
)
if state_change:
instance["state"] = Poll.STATE_FINISHED
if (
poll["type"] == Poll.TYPE_ANALOG
and (
instance.get("state") == Poll.STATE_FINISHED
or poll["state"] == Poll.STATE_FINISHED
)
and instance.get("publish_immediately")
):
instance["state"] = Poll.STATE_PUBLISHED
return instance
def check_100_percent_base(self, instance: Dict[str, Any]) -> None:
onehundred_percent_base = instance.get("onehundred_percent_base")
if "pollmethod" in instance:
pollmethod = instance["pollmethod"]
else:
poll = self.datastore.get(
FullQualifiedId(self.model.collection, instance["id"]), ["pollmethod"]
)
pollmethod = poll.get("pollmethod")
base_check_100_percent_base(pollmethod, onehundred_percent_base)
def check_state_change(
self, instance: Dict[str, Any], poll: Dict[str, Any]
) -> bool:
if poll.get("type") != Poll.TYPE_ANALOG:
return False
if poll.get("state") != Poll.STATE_CREATED:
return False
check_fields = (
"votesvalid",
"votesinvalid",
"votescast",
)
for field in check_fields:
if instance.get(field):
return True
return False
| 2,769 | 0 | 81 |
c203b50774eb0d12c796abfc68b15d2534607102 | 1,517 | py | Python | DataBases/Userinfodata.py | PinkLittlePig/tabletop-bot | 5c731d09fdd6f996fd6c48ccdf560a96457745b2 | [
"MIT"
] | null | null | null | DataBases/Userinfodata.py | PinkLittlePig/tabletop-bot | 5c731d09fdd6f996fd6c48ccdf560a96457745b2 | [
"MIT"
] | null | null | null | DataBases/Userinfodata.py | PinkLittlePig/tabletop-bot | 5c731d09fdd6f996fd6c48ccdf560a96457745b2 | [
"MIT"
] | null | null | null | from DataBases.DataBaseClass import Database
from datetime import datetime
class UserInfoDatabase(Database):
"""Stores information on users including currency, user ids, and items"""
| 42.138889 | 168 | 0.63151 | from DataBases.DataBaseClass import Database
from datetime import datetime
class UserInfoDatabase(Database):
"""Stores information on users including currency, user ids, and items"""
def __init__(self, tablename='UserInformation'):
super().__init__(filename='UserInfo.db')
self.tablename = tablename
self.trycreatetable('user integer PRIMARY KEY, currency integer, itemkey integer, totalgamesplayed integer, wins integer, dailystreak integer, lastdaily TEXT')
self.useritems = {}
def cratestartinfo(self, id):
return [id, 500, self.randomString(), 0, 0, 0, self.getdatetime("""DATETIME('now', 'localtime', '-1 day')""")]
def updateuser(self, userid, **userchanges):
changestatment = self.createquerysql(userchanges, connector=', ')
self.data_navigatior.execute(f"""UPDATE {self.tablename} Set {changestatment} Where user={userid}""")
self.data.commit()
def checkadduser(self, userids):
checkedandadded = []
for id in userids:
if not(founduser := self.checkforentery(user=id)[0]):
itemkey = self.randomString()
self.useritems[itemkey] = []
new = self.cratestartinfo(id)
self.addentery(*new)
checkedandadded.append(new)
continue
checkedandadded.append(founduser)
return checkedandadded
def getitems(self, key):
return self.useritems[key]
| 1,179 | 0 | 145 |
a591bc1cf059796b5c6b9b23aa7308bee5a85c7d | 3,137 | py | Python | ascii_chess/ascii_board.py | butwhywhy/ASCIIchess | 133b54cacd8ba3b96a93d4cf4822e85fc7a4b4b1 | [
"MIT"
] | null | null | null | ascii_chess/ascii_board.py | butwhywhy/ASCIIchess | 133b54cacd8ba3b96a93d4cf4822e85fc7a4b4b1 | [
"MIT"
] | 3 | 2015-02-16T08:24:37.000Z | 2015-02-16T08:29:52.000Z | ascii_chess/ascii_board.py | butwhywhy/ASCIIchess | 133b54cacd8ba3b96a93d4cf4822e85fc7a4b4b1 | [
"MIT"
] | null | null | null | from ascii_drawing import Canvas, Square, figure_from_string, GeneralColorConversor, ScaleConversor
from .chess_rules import KING, QUEEN, ROOK, BISHOP, KNIGHT, PAWN
PIECE_NAMES = {KING: 'king', QUEEN: 'queen', ROOK: 'rook', BISHOP: 'bishop',
KNIGHT: 'knight', PAWN: 'pawn'}
from os import path
from pkg_resources import resource_string
ascii_pieces = {p : figure_from_string(resource_string('ascii_chess', path.join('ascii_chess_pieces', PIECE_NAMES[p])))
for p in (PAWN, BISHOP, KNIGHT, ROOK, QUEEN, KING)}
| 35.247191 | 119 | 0.623207 | from ascii_drawing import Canvas, Square, figure_from_string, GeneralColorConversor, ScaleConversor
class ChessBoard(object):
def __init__(self, side, white_darkness, black_darkness):
self.extra = 4
self.side = side
self.white_darkness = white_darkness
self.black_darkness = black_darkness
self.perspective = 'white'
self.white_square = Square(self.side, self.side, self.white_darkness, 1)
self.black_square = Square(self.side, self.side, self.black_darkness, 1)
ll = self.side * 8 + 2 * self.extra
self.canvas = Canvas(ll, ll)
self.clean_board()
def add_piece(self, figure, row, column):
self.clean_square(row, column)
self.canvas.add_figure(figure, *self.get_square(row, column))
def set_perspective(self, player):
if player in ('white', 'black'):
self.perspective = player
else:
raise ValueError("Unrecognised perspective " + player)
def set_position(self, white_set, black_set, position=None):
from .chess_rules import parse_square
if position is None:
from .chess_rules import INITIAL
position = INITIAL
self.clean_board()
for square, (piece, is_black) in position.iteritems():
p = black_set[piece] if is_black else white_set[piece]
self.add_piece(p, *square)
def clean_square(self, row, column):
if (column + row) % 2 != 0:
self.canvas.add_figure(self.white_square, *self.get_square(row, column))
else:
self.canvas.add_figure(self.black_square, *self.get_square(row, column))
def clean_board(self):
for i in xrange(8):
for j in xrange(8):
self.clean_square(i, j)
def get_square(self, row, col):
if self.perspective == 'white':
x = col
y = 7 - row
else:
x = 7 - col
y = row
return (self.extra + y * self.side, self.extra + x * self.side)
def __repr__(self):
return self.canvas.paint()
class ChessPiecesSet(object):
def __init__(self, side, max_darkness):
def pix_transform(darkness, opacity):
if darkness == 0:
return (darkness, 0)
return (darkness * max_darkness, 1)
color_conv = GeneralColorConversor(pix_transform)
scale_conv = ScaleConversor(side, side)
self.pieces = {}
for pname in ascii_pieces:
self.pieces[pname] = scale_conv.convert(
color_conv.convert(ascii_pieces[pname]))
def __getitem__(self, piece_name):
return self.pieces[piece_name]
from .chess_rules import KING, QUEEN, ROOK, BISHOP, KNIGHT, PAWN
PIECE_NAMES = {KING: 'king', QUEEN: 'queen', ROOK: 'rook', BISHOP: 'bishop',
KNIGHT: 'knight', PAWN: 'pawn'}
from os import path
from pkg_resources import resource_string
ascii_pieces = {p : figure_from_string(resource_string('ascii_chess', path.join('ascii_chess_pieces', PIECE_NAMES[p])))
for p in (PAWN, BISHOP, KNIGHT, ROOK, QUEEN, KING)}
| 2,271 | 12 | 324 |
a9de616a234815a30eff3f4f68d051ee59ecfb45 | 1,802 | py | Python | egyptianFractions.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | 1 | 2021-06-08T06:53:50.000Z | 2021-06-08T06:53:50.000Z | egyptianFractions.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | null | null | null | egyptianFractions.py | Existence-glitch/PythonCodes | a49121855364a5e0ac2ff227ef91f19086a52f09 | [
"MIT"
] | null | null | null | from math import gcd, ceil
main() | 43.95122 | 142 | 0.615982 | from math import gcd, ceil
def fibonacci(a, b):
#Se calcula el denominador de la más grande fracción unitaria ("Largest unit fraction")
luf_den = ceil(b/a)
#Resta entre la fracción a/b entregada y la más grande fracción unitaria que podemos extraer de ella.
subtraction = a/b - 1/luf_den
#Si la resta retorna 0, se detiene la recursión y el proceso termina retornando el último denominador de la más grande fracción unitaria.
if(subtraction == 0):
return [str(luf_den)]
#Si la resta no es 0, calculamos recursivamente hasta que esto se cumpla.
else:
#Se actualiza el numerador y denominador para el siguiente cálculo
a = int(a * luf_den - b)
b = int(b * luf_den)
#Se calcula el máximo comun divisor entre ellos
g = gcd(b, a)
#Se retorna el denominador de la más grande fracción unitaria y se llama a la función recursivamente.
return [str(luf_den)] + fibonacci(int(a/g), int(b/g))
def main():
#El programa lee n, para saber cuantas fracciones se insertarán.
n = int(input())
for i in range(n):
#Se lee el numerador y denominador de cada fracción insertada
numerador, denominador = input().split()
a = int(numerador)
b = int(denominador)
#Se asegura que la fraccion insertada no sea unitaria o con numerador > 1
if (a > 1):
#Se asegura que la fraccion insertada no sea impropia, es decir mayor a 1
if (a < b):
denominators = ", ".join(fibonacci(a, b))
print(f"{a}/{b}:", denominators)
else:
print("La fracción insertada no es propia")
else:
print("El numerador insertado es inválido")
main() | 1,734 | 0 | 50 |
b7857ccc0fb4a35bd036b819cf6fde09b5d47feb | 1,379 | py | Python | pineapple-py-main/src/backend.py | yujiecong/yjcL | 6d6dc4ad3611cb34c07192a1a3038a1ac3f67d6c | [
"MIT"
] | null | null | null | pineapple-py-main/src/backend.py | yujiecong/yjcL | 6d6dc4ad3611cb34c07192a1a3038a1ac3f67d6c | [
"MIT"
] | null | null | null | pineapple-py-main/src/backend.py | yujiecong/yjcL | 6d6dc4ad3611cb34c07192a1a3038a1ac3f67d6c | [
"MIT"
] | null | null | null | import sys
from definition import Variable, Statement, Assignment, Print, SourceCode
from lexer import Lexer
from parser import parse
if __name__ == '__main__':
main()
| 28.729167 | 87 | 0.672951 | import sys
from definition import Variable, Statement, Assignment, Print, SourceCode
from lexer import Lexer
from parser import parse
class Interpreter:
def __init__(self, source_code: str):
self.lexer = Lexer(source_code)
self.ast = parse(self.lexer)
self.variables = {}
def resolve_print(self, print_statement: Print) -> None:
print(self.variables[print_statement.variable.name])
def resolve_assignment(self, assignment: Assignment) -> None:
self.variables[assignment.variable.name] = assignment.string
def resolve_statement(self, statement: Statement) -> None:
if isinstance(statement, Print):
self.resolve_print(statement)
elif isinstance(statement, Assignment):
self.resolve_assignment(statement)
else:
raise RuntimeError(
'resolve_statement(): unexpected statement type: {}'.format(statement))
def resolve_source_code(self, ast: SourceCode) -> None:
for statement in ast.statements:
self.resolve_statement(statement)
def execute(self) -> None:
self.resolve_source_code(self.ast)
def main():
source_file = sys.argv[1]
with open(source_file) as f:
source_code = f.read()
interpreter = Interpreter(source_code)
interpreter.execute()
if __name__ == '__main__':
main()
| 997 | -3 | 208 |
8561179a7caf34130d4ba161d933cfb6e7685eb6 | 359 | py | Python | core/migrations/0006_rename_choice_choices_name.py | morethanmin/fooorm | 770894ce20ececcfc635b28948f69819b72834a0 | [
"MIT"
] | 1 | 2021-09-10T07:08:16.000Z | 2021-09-10T07:08:16.000Z | core/migrations/0006_rename_choice_choices_name.py | morethanmin/fooorm | 770894ce20ececcfc635b28948f69819b72834a0 | [
"MIT"
] | null | null | null | core/migrations/0006_rename_choice_choices_name.py | morethanmin/fooorm | 770894ce20ececcfc635b28948f69819b72834a0 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-07 03:52
from django.db import migrations
| 18.894737 | 47 | 0.579387 | # Generated by Django 3.2.5 on 2021-08-07 03:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20210806_2255'),
]
operations = [
migrations.RenameField(
model_name='choices',
old_name='choice',
new_name='name',
),
]
| 0 | 253 | 23 |
0abcc7d18ee60c3f88f686aca48f7459c7c0d8b6 | 997 | py | Python | 04-functions/lottery.py | sn-lvpthe/CirquePy | a22cd1c76e650ab1752cbd6a3edfcfd50d9d58dc | [
"Unlicense"
] | null | null | null | 04-functions/lottery.py | sn-lvpthe/CirquePy | a22cd1c76e650ab1752cbd6a3edfcfd50d9d58dc | [
"Unlicense"
] | null | null | null | 04-functions/lottery.py | sn-lvpthe/CirquePy | a22cd1c76e650ab1752cbd6a3edfcfd50d9d58dc | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
if __name__ == "__main__": # this means that if somebody ran this Python file, execute only the code below
main()
| 26.945946 | 129 | 0.656971 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
def generate_lottery_numbers(quantity):
num_list = []
while True:
if len(num_list) == quantity: # when the length of the list reaches the desired quantity, stop choosing new numbers
break
lot_num = random.randint(1, 50)
if lot_num not in num_list: # if the chosen number is not in the list yet, add it to it (this helps avoiding duplicates)
num_list.append(lot_num)
return num_list
def main():
print ("Maak je loterijnummers automatisch aan!\n" )
quantity_question = input("Hoeveel willekeurige getallen wil je: ")
try:
quantity_num = int(quantity_question)
print (generate_lottery_numbers(quantity_num) )
except ValueError:
print ("Opgelet... Er is een aantal nodig!." )
print ("\n\t- EINDE. -" )
if __name__ == "__main__": # this means that if somebody ran this Python file, execute only the code below
main()
| 767 | 0 | 46 |
adff595ea33874ae835033317deae43e33df658a | 3,700 | py | Python | graphSAGE/src/dataCenter.py | ShaoaAllen/CogQA | 82b932f4fd9dad33efb7eaaffa7ee29594397932 | [
"MIT"
] | null | null | null | graphSAGE/src/dataCenter.py | ShaoaAllen/CogQA | 82b932f4fd9dad33efb7eaaffa7ee29594397932 | [
"MIT"
] | null | null | null | graphSAGE/src/dataCenter.py | ShaoaAllen/CogQA | 82b932f4fd9dad33efb7eaaffa7ee29594397932 | [
"MIT"
] | null | null | null | import sys
import os
from collections import defaultdict
import numpy as np
class DataCenter(object):
"""docstring for DataCenter"""
| 32.45614 | 92 | 0.678919 | import sys
import os
from collections import defaultdict
import numpy as np
class DataCenter(object):
"""docstring for DataCenter"""
def __init__(self, config):
super(DataCenter, self).__init__()
self.config = config
def load_dataSet(self, dataSet='cora'):
if dataSet == 'cora':
cora_content_file = self.config['file_path.cora_content']
cora_cite_file = self.config['file_path.cora_cite']
feat_data = []
labels = [] # label sequence of node
node_map = {} # map node to Node_ID
label_map = {} # map label to Label_ID
with open(cora_content_file) as fp:
for i,line in enumerate(fp):
info = line.strip().split()
feat_data.append([float(x) for x in info[1:-1]])
node_map[info[0]] = i
if not info[-1] in label_map:
label_map[info[-1]] = len(label_map)
labels.append(label_map[info[-1]])
feat_data = np.asarray(feat_data)
labels = np.asarray(labels, dtype=np.int64)
adj_lists = defaultdict(set)
with open(cora_cite_file) as fp:
for i,line in enumerate(fp):
info = line.strip().split()
assert len(info) == 2
paper1 = node_map[info[0]]
paper2 = node_map[info[1]]
adj_lists[paper1].add(paper2)
adj_lists[paper2].add(paper1)
assert len(feat_data) == len(labels) == len(adj_lists)
test_indexs, val_indexs, train_indexs = self._split_data(feat_data.shape[0])
setattr(self, dataSet+'_test', test_indexs)
setattr(self, dataSet+'_val', val_indexs)
setattr(self, dataSet+'_train', train_indexs)
setattr(self, dataSet+'_feats', feat_data)
setattr(self, dataSet+'_labels', labels)
setattr(self, dataSet+'_adj_lists', adj_lists)
elif dataSet == 'pubmed':
pubmed_content_file = self.config['file_path.pubmed_paper']
pubmed_cite_file = self.config['file_path.pubmed_cites']
feat_data = []
labels = [] # label sequence of node
node_map = {} # map node to Node_ID
with open(pubmed_content_file) as fp:
fp.readline()
feat_map = {entry.split(":")[1]:i-1 for i,entry in enumerate(fp.readline().split("\t"))}
for i, line in enumerate(fp):
info = line.split("\t")
node_map[info[0]] = i
labels.append(int(info[1].split("=")[1])-1)
tmp_list = np.zeros(len(feat_map)-2)
for word_info in info[2:-1]:
word_info = word_info.split("=")
tmp_list[feat_map[word_info[0]]] = float(word_info[1])
feat_data.append(tmp_list)
feat_data = np.asarray(feat_data)
labels = np.asarray(labels, dtype=np.int64)
adj_lists = defaultdict(set)
with open(pubmed_cite_file) as fp:
fp.readline()
fp.readline()
for line in fp:
info = line.strip().split("\t")
paper1 = node_map[info[1].split(":")[1]]
paper2 = node_map[info[-1].split(":")[1]]
adj_lists[paper1].add(paper2)
adj_lists[paper2].add(paper1)
assert len(feat_data) == len(labels) == len(adj_lists)
test_indexs, val_indexs, train_indexs = self._split_data(feat_data.shape[0])
setattr(self, dataSet+'_test', test_indexs)
setattr(self, dataSet+'_val', val_indexs)
setattr(self, dataSet+'_train', train_indexs)
setattr(self, dataSet+'_feats', feat_data)
setattr(self, dataSet+'_labels', labels)
setattr(self, dataSet+'_adj_lists', adj_lists)
def _split_data(self, num_nodes, test_split = 3, val_split = 6):
rand_indices = np.random.permutation(num_nodes)
test_size = num_nodes // test_split
val_size = num_nodes // val_split
train_size = num_nodes - (test_size + val_size)
test_indexs = rand_indices[:test_size]
val_indexs = rand_indices[test_size:(test_size+val_size)]
train_indexs = rand_indices[(test_size+val_size):]
return test_indexs, val_indexs, train_indexs
| 3,488 | 0 | 73 |
3f372f778c46e09e9295a501d1163b5145f2f35e | 6,535 | py | Python | update-version.py | JOCKCERJON/TEMP | 3fa77e0cf5c8b9cdec36f916759ae8ea46076537 | [
"BSD-3-Clause"
] | 1,402 | 2015-01-23T02:50:26.000Z | 2022-03-26T09:42:31.000Z | update-version.py | snobalfyte/HashCheck | 3fa77e0cf5c8b9cdec36f916759ae8ea46076537 | [
"BSD-3-Clause"
] | 68 | 2015-08-29T17:10:23.000Z | 2021-09-27T16:33:09.000Z | update-version.py | snobalfyte/HashCheck | 3fa77e0cf5c8b9cdec36f916759ae8ea46076537 | [
"BSD-3-Clause"
] | 214 | 2015-02-04T04:17:50.000Z | 2022-03-31T11:43:01.000Z | #!/usr/bin/python3
#
# Version updater
# Copyright (C) 2016 Christopher Gurnee. All rights reserved.
#
# Please refer to readme.md for information about this source code.
# Please refer to license.txt for details about distribution and modification.
#
# Updates various version constants based on HASHCHECK_VERSION_STR in version.h
import sys, os, os.path, re, contextlib, atexit
from warnings import warn
atexit.register(lambda: input('Press Enter to exit ...'))
# When used in a 'with' statement, renames filename to filename.orig and opens filename for
# writing. If an uncaught exception is raised, restores filename.orig, otherwise deletes it.
@contextlib.contextmanager
os.chdir(os.path.dirname(__file__))
# Get the "authoritative" version string from HASHCHECK_VERSION_STR in version.h
match = None
with open('version.h', encoding='utf-8') as file:
for line in file:
match = re.match(r'#define\s+HASHCHECK_VERSION_STR\s+"(\d+)\.(\d+)\.(\d+)\.(\d+)((?:-\w+)?)"', line)
if match:
break
if not match:
sys.exit('Valid version not found in version.h')
major = match.group(1)
minor = match.group(2)
patch = match.group(3)
build = match.group(4)
prerelease = match.group(5)
print('v' + full_version())
# Compare the authoritative version with the one in appveyor.yml; since this file
# is updated last, it will be the same iff the authoritative version wasn't changed
match = None
with open('appveyor.yml', encoding='utf-8') as file:
for line in file:
match = re.match(r'version:\s*(\S+)\s*$', line)
if match:
if match.group(1) == full_version():
if input('Version is unchanged, increment build number (Y/n)? ').strip().lower() == 'n':
sys.exit(0)
build = str(int(build) + 1)
print('v' + full_version())
break
# Update the 3 version constants in version.h
found_version_full = 0
found_version_str = 0
found_linker_version = 0
with overwrite('version.h', encoding='utf-8', newline='') as out_file:
with open('version.h.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^#define\s+HASHCHECK_VERSION_FULL\s+[\d,]+',
'#define HASHCHECK_VERSION_FULL ' + ','.join((major, minor, patch, build)), line)
found_version_full += subs
(line, subs) = re.subn(r'^#define\s+HASHCHECK_VERSION_STR\s+"[\d.\w-]*"',
'#define HASHCHECK_VERSION_STR "' + full_version() + '"', line)
found_version_str += subs
(line, subs) = re.subn(r'^#pragma\s+comment\s*\(\s*linker\s*,\s*"/version:[\d+.]+"\s*\)',
'#pragma comment(linker, "/version:{}.{}")'.format(major, minor), line)
found_linker_version += subs
out_file.write(line)
if found_version_full != 1:
warn('found {} HASHCHECK_VERSION_FULL defines in version.h'.format(found_version_full))
if found_version_str != 1:
warn('found {} HASHCHECK_VERSION_STR defines in version.h'.format(found_version_str))
if found_linker_version != 1:
warn('found {} linker /version lines in version.h'.format(found_linker_version))
# Update the 4 version constants in HashCheck.nsi
found_outfile = 0
found_product_version = 0
found_version_key_product = 0
found_version_key_file = 0
with overwrite(r'installer\HashCheck.nsi', encoding='utf-8', newline='') as out_file:
with open(r'installer\HashCheck.nsi.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^OutFile\s*"HashCheckSetup-v[\d.\w-]+.exe"',
'OutFile "HashCheckSetup-v' + full_version() + '.exe"', line)
found_outfile += subs
(line, subs) = re.subn(r'^VIProductVersion\s+"[\d.\w-]+"',
'VIProductVersion "' + full_version() + '"', line)
found_product_version += subs
(line, subs) = re.subn(r'^VIAddVersionKey\s+/LANG=\${LANG_ENGLISH}\s+"ProductVersion"\s+"[\d.\w-]+"',
'VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductVersion" "' + full_version() + '"', line)
found_version_key_product += subs
(line, subs) = re.subn(r'VIAddVersionKey\s+/LANG=\${LANG_ENGLISH}\s+"FileVersion"\s+"[\d.\w-]+"',
'VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "' + full_version() + '"', line)
found_version_key_file += subs
out_file.write(line)
if found_outfile != 1:
warn('found {} OutFile statements in HashCheck.nsi'.format(found_outfile))
if found_product_version != 1:
warn('found {} VIProductVersion\'s in HashCheck.nsi'.format(found_product_version))
if found_version_key_product != 1:
warn('found {} ProductVersion VIAddVersionKeys defines in HashCheck.nsi'.format(found_version_key_product))
if found_version_key_file != 1:
warn('found {} FileVersion VIAddVersionKeys defines in HashCheck.nsi'.format(found_version_key_file))
# Lastly, update the one version line in appveyor
found_version = 0
with overwrite('appveyor.yml', encoding='utf-8', newline='') as out_file:
with open('appveyor.yml.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^version:\s*\S+', 'version: ' + full_version(), line)
found_version += subs
out_file.write(line)
if found_version != 1:
warn('found {} version lines in appveyor.yml'.format(found_version))
print('Done.')
| 44.455782 | 126 | 0.605662 | #!/usr/bin/python3
#
# Version updater
# Copyright (C) 2016 Christopher Gurnee. All rights reserved.
#
# Please refer to readme.md for information about this source code.
# Please refer to license.txt for details about distribution and modification.
#
# Updates various version constants based on HASHCHECK_VERSION_STR in version.h
import sys, os, os.path, re, contextlib, atexit
from warnings import warn
atexit.register(lambda: input('Press Enter to exit ...'))
# When used in a 'with' statement, renames filename to filename.orig and opens filename for
# writing. If an uncaught exception is raised, restores filename.orig, otherwise deletes it.
@contextlib.contextmanager
def overwrite(filename, mode='w', *args, **kwargs):
try:
renamed = filename + '.orig'
os.rename(filename, renamed)
except FileNotFoundError:
renamed = None
try:
file = open(filename, mode, *args, **kwargs)
except:
if renamed:
os.rename(renamed, filename)
raise
try:
yield file
except:
file.close()
os.remove(filename)
if renamed:
os.rename(renamed, filename)
raise
file.close()
if renamed:
os.remove(renamed)
os.chdir(os.path.dirname(__file__))
# Get the "authoritative" version string from HASHCHECK_VERSION_STR in version.h
match = None
with open('version.h', encoding='utf-8') as file:
for line in file:
match = re.match(r'#define\s+HASHCHECK_VERSION_STR\s+"(\d+)\.(\d+)\.(\d+)\.(\d+)((?:-\w+)?)"', line)
if match:
break
if not match:
sys.exit('Valid version not found in version.h')
major = match.group(1)
minor = match.group(2)
patch = match.group(3)
build = match.group(4)
prerelease = match.group(5)
def full_version():
return '.'.join((major, minor, patch, build)) + prerelease
print('v' + full_version())
# Compare the authoritative version with the one in appveyor.yml; since this file
# is updated last, it will be the same iff the authoritative version wasn't changed
match = None
with open('appveyor.yml', encoding='utf-8') as file:
for line in file:
match = re.match(r'version:\s*(\S+)\s*$', line)
if match:
if match.group(1) == full_version():
if input('Version is unchanged, increment build number (Y/n)? ').strip().lower() == 'n':
sys.exit(0)
build = str(int(build) + 1)
print('v' + full_version())
break
# Update the 3 version constants in version.h
found_version_full = 0
found_version_str = 0
found_linker_version = 0
with overwrite('version.h', encoding='utf-8', newline='') as out_file:
with open('version.h.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^#define\s+HASHCHECK_VERSION_FULL\s+[\d,]+',
'#define HASHCHECK_VERSION_FULL ' + ','.join((major, minor, patch, build)), line)
found_version_full += subs
(line, subs) = re.subn(r'^#define\s+HASHCHECK_VERSION_STR\s+"[\d.\w-]*"',
'#define HASHCHECK_VERSION_STR "' + full_version() + '"', line)
found_version_str += subs
(line, subs) = re.subn(r'^#pragma\s+comment\s*\(\s*linker\s*,\s*"/version:[\d+.]+"\s*\)',
'#pragma comment(linker, "/version:{}.{}")'.format(major, minor), line)
found_linker_version += subs
out_file.write(line)
if found_version_full != 1:
warn('found {} HASHCHECK_VERSION_FULL defines in version.h'.format(found_version_full))
if found_version_str != 1:
warn('found {} HASHCHECK_VERSION_STR defines in version.h'.format(found_version_str))
if found_linker_version != 1:
warn('found {} linker /version lines in version.h'.format(found_linker_version))
# Update the 4 version constants in HashCheck.nsi
found_outfile = 0
found_product_version = 0
found_version_key_product = 0
found_version_key_file = 0
with overwrite(r'installer\HashCheck.nsi', encoding='utf-8', newline='') as out_file:
with open(r'installer\HashCheck.nsi.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^OutFile\s*"HashCheckSetup-v[\d.\w-]+.exe"',
'OutFile "HashCheckSetup-v' + full_version() + '.exe"', line)
found_outfile += subs
(line, subs) = re.subn(r'^VIProductVersion\s+"[\d.\w-]+"',
'VIProductVersion "' + full_version() + '"', line)
found_product_version += subs
(line, subs) = re.subn(r'^VIAddVersionKey\s+/LANG=\${LANG_ENGLISH}\s+"ProductVersion"\s+"[\d.\w-]+"',
'VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductVersion" "' + full_version() + '"', line)
found_version_key_product += subs
(line, subs) = re.subn(r'VIAddVersionKey\s+/LANG=\${LANG_ENGLISH}\s+"FileVersion"\s+"[\d.\w-]+"',
'VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "' + full_version() + '"', line)
found_version_key_file += subs
out_file.write(line)
if found_outfile != 1:
warn('found {} OutFile statements in HashCheck.nsi'.format(found_outfile))
if found_product_version != 1:
warn('found {} VIProductVersion\'s in HashCheck.nsi'.format(found_product_version))
if found_version_key_product != 1:
warn('found {} ProductVersion VIAddVersionKeys defines in HashCheck.nsi'.format(found_version_key_product))
if found_version_key_file != 1:
warn('found {} FileVersion VIAddVersionKeys defines in HashCheck.nsi'.format(found_version_key_file))
# Lastly, update the one version line in appveyor
found_version = 0
with overwrite('appveyor.yml', encoding='utf-8', newline='') as out_file:
with open('appveyor.yml.orig', encoding='utf-8', newline='') as in_file:
for line in in_file:
(line, subs) = re.subn(r'^version:\s*\S+', 'version: ' + full_version(), line)
found_version += subs
out_file.write(line)
if found_version != 1:
warn('found {} version lines in appveyor.yml'.format(found_version))
print('Done.')
| 623 | 0 | 46 |
a3ebfa1c510844dfb90ae19113ead659e155cc9b | 2,337 | py | Python | youtube_dl_cli/download_youtube_video.py | CarsonSlovoka/youtube-dl-cli | 80de0ef2500f58292723532d372b740a964635f5 | [
"Apache-2.0"
] | 1 | 2020-10-26T13:51:39.000Z | 2020-10-26T13:51:39.000Z | youtube_dl_cli/download_youtube_video.py | CarsonSlovoka/youtube-dl-cli | 80de0ef2500f58292723532d372b740a964635f5 | [
"Apache-2.0"
] | null | null | null | youtube_dl_cli/download_youtube_video.py | CarsonSlovoka/youtube-dl-cli | 80de0ef2500f58292723532d372b740a964635f5 | [
"Apache-2.0"
] | 1 | 2020-04-02T07:26:49.000Z | 2020-04-02T07:26:49.000Z | __all__ = ('YoutubeKeeper', 'YLFormat')
import youtube_dl
import os
from sys import stderr
from pathlib import Path
from .structured import YLFormat
from typing import List, Tuple, Sequence, Iterable
from tkinter.messagebox import askokcancel
from tkinter import Tk
if 'withdraw tk':
try:
Tk().withdraw()
except:
...
| 35.953846 | 129 | 0.521181 | __all__ = ('YoutubeKeeper', 'YLFormat')
import youtube_dl
import os
from sys import stderr
from pathlib import Path
from .structured import YLFormat
from typing import List, Tuple, Sequence, Iterable
from tkinter.messagebox import askokcancel
from tkinter import Tk
if 'withdraw tk':
try:
Tk().withdraw()
except:
...
class YoutubeKeeper:
__slots__ = ()
@staticmethod
def download(url: str, options: dict):
with youtube_dl.YoutubeDL(options) as ydl:
ydl.download([url])
@staticmethod
def start(download_list: Iterable[Tuple[str, Sequence[YLFormat]]],
output_dir: Path = None,
**options):
if output_dir is None:
output_dir = Path(os.environ["USERPROFILE"]) / Path('Music/my_music')
output_dir.mkdir(exist_ok=True)
elif not output_dir.exists():
print(f'output_dir not exists: {output_dir}')
return
for cur_data in download_list:
if not isinstance(cur_data, (tuple, list)) and len(cur_data) < 2:
continue
cur_url, tuple_format = cur_data
for format_info in tuple_format:
if not isinstance(format_info, YLFormat):
print(f'the format is not correct. format: {format_info}')
continue
fmt_name, fmt = format_info.name, format_info.value
try:
YoutubeKeeper.download(cur_url, dict(format=fmt,
# outtmpl=f"{Path(output_dir) / Path(f'%(title)s-{fmt_name}.%(ext)s')}",
outtmpl=f"{Path(output_dir) / Path(f'%(title)s.%(ext)s')}",
writethumbnail=options.get('writethumbnail'),
# ignoreerrors=True,
quiet=options.get('quiet')
))
except youtube_dl.utils.DownloadError:
stderr.write(f'download error: {cur_url} | {fmt_name}')
if askokcancel('All done!', 'Open the music folder?'):
os.startfile(output_dir)
return
| 1,859 | 108 | 23 |
1df9a7cb640b2a0805ab34095d6a3e9d2d288e1e | 4,778 | py | Python | data/convert_data_ukbb2964.py | vukadinovic936/ukbb_cardiac | ea2840fca2df2ba4ca6c6d386a2493c3d8c92bde | [
"Apache-2.0"
] | 82 | 2018-04-07T19:29:25.000Z | 2022-03-03T02:48:17.000Z | data/convert_data_ukbb2964.py | vukadinovic936/ukbb_cardiac | ea2840fca2df2ba4ca6c6d386a2493c3d8c92bde | [
"Apache-2.0"
] | 12 | 2018-04-03T09:43:15.000Z | 2021-11-17T20:04:29.000Z | data/convert_data_ukbb2964.py | vukadinovic936/ukbb_cardiac | ea2840fca2df2ba4ca6c6d386a2493c3d8c92bde | [
"Apache-2.0"
] | 42 | 2018-01-07T18:55:30.000Z | 2022-02-24T13:55:31.000Z | # Copyright 2017, Wenjia Bai. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The data converting script for UK Biobank Application 2964, which contributes
the manual annotations of 5,000 subjects.
This script assumes that the images and annotations have already been downloaded
as zip files. It decompresses the zip files, sort the DICOM files into subdirectories
according to the information provided in the manifest.csv spreadsheet, parse manual
annotated contours from the cvi42 xml files, read the matching DICOM and cvi42 contours
and finally save them as nifti images.
"""
import os
import csv
import glob
import re
import time
import pandas as pd
import dateutil.parser
from biobank_utils import *
import parse_cvi42_xml
if __name__ == '__main__':
# Path to the downloaded data
data_path = '/vol/vipdata/data/biobank/cardiac/Application_2964/data_path'
# For each subdirectory
for sub_path in sorted(os.listdir(data_path)):
sub_path = os.path.join(data_path, sub_path)
# For each subject in the subdirectory
for eid in sorted(os.listdir(sub_path)):
data_dir = os.path.join(sub_path, eid)
# Only convert data if there is manual annotation, i.e. cvi42 files
if os.path.exists(os.path.join(data_dir, '{0}_cvi42.zip'.format(eid))):
# Check the annotator's name
s = os.popen('unzip -c {0}/{1}_cvi42.zip "*.cvi42wsx" '
'| grep OwnerUserName'.format(data_dir, eid)).read()
annotator = (s.split('>')[1]).split('<')[0]
# Decompress the zip files in this directory
files = glob.glob('{0}/{1}_*.zip'.format(data_dir, eid))
dicom_dir = os.path.join(data_dir, 'dicom')
if not os.path.exists(dicom_dir):
os.mkdir(dicom_dir)
for f in files:
if os.path.basename(f) == '{0}_cvi42.zip'.format(eid):
os.system('unzip -o {0} -d {1}'.format(f, data_dir))
else:
os.system('unzip -o {0} -d {1}'.format(f, dicom_dir))
# Process the manifest file
process_manifest(os.path.join(dicom_dir, 'manifest.csv'),
os.path.join(dicom_dir, 'manifest2.csv'))
df2 = pd.read_csv(os.path.join(dicom_dir, 'manifest2.csv'), error_bad_lines=False)
# Organise the dicom files
# Group the files into subdirectories for each imaging series
for series_name, series_df in df2.groupby('series discription'):
series_dir = os.path.join(dicom_dir, series_name)
if not os.path.exists(series_dir):
os.mkdir(series_dir)
series_files = [os.path.join(dicom_dir, x) for x in series_df['filename']]
os.system('mv {0} {1}'.format(' '.join(series_files), series_dir))
# Parse cvi42 xml file
cvi42_contours_dir = os.path.join(data_dir, 'cvi42_contours')
if not os.path.exists(cvi42_contours_dir):
os.mkdir(cvi42_contours_dir)
xml_name = os.path.join(data_dir, '{0}_cvi42.cvi42wsx'.format(eid))
parse_cvi42_xml.parseFile(xml_name, cvi42_contours_dir)
# Rare cases when no dicom file exists
# e.g. 12xxxxx/1270299
if not os.listdir(dicom_dir):
print('Warning: empty dicom directory! Skip this one.')
continue
# Convert dicom files and annotations into nifti images
dset = Biobank_Dataset(dicom_dir, cvi42_contours_dir)
dset.read_dicom_images()
dset.convert_dicom_to_nifti(data_dir)
# Remove intermediate files
os.system('rm -rf {0} {1}'.format(dicom_dir, cvi42_contours_dir))
os.system('rm -f {0}'.format(xml_name))
| 47.78 | 106 | 0.589787 | # Copyright 2017, Wenjia Bai. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The data converting script for UK Biobank Application 2964, which contributes
the manual annotations of 5,000 subjects.
This script assumes that the images and annotations have already been downloaded
as zip files. It decompresses the zip files, sort the DICOM files into subdirectories
according to the information provided in the manifest.csv spreadsheet, parse manual
annotated contours from the cvi42 xml files, read the matching DICOM and cvi42 contours
and finally save them as nifti images.
"""
import os
import csv
import glob
import re
import time
import pandas as pd
import dateutil.parser
from biobank_utils import *
import parse_cvi42_xml
if __name__ == '__main__':
# Path to the downloaded data
data_path = '/vol/vipdata/data/biobank/cardiac/Application_2964/data_path'
# For each subdirectory
for sub_path in sorted(os.listdir(data_path)):
sub_path = os.path.join(data_path, sub_path)
# For each subject in the subdirectory
for eid in sorted(os.listdir(sub_path)):
data_dir = os.path.join(sub_path, eid)
# Only convert data if there is manual annotation, i.e. cvi42 files
if os.path.exists(os.path.join(data_dir, '{0}_cvi42.zip'.format(eid))):
# Check the annotator's name
s = os.popen('unzip -c {0}/{1}_cvi42.zip "*.cvi42wsx" '
'| grep OwnerUserName'.format(data_dir, eid)).read()
annotator = (s.split('>')[1]).split('<')[0]
# Decompress the zip files in this directory
files = glob.glob('{0}/{1}_*.zip'.format(data_dir, eid))
dicom_dir = os.path.join(data_dir, 'dicom')
if not os.path.exists(dicom_dir):
os.mkdir(dicom_dir)
for f in files:
if os.path.basename(f) == '{0}_cvi42.zip'.format(eid):
os.system('unzip -o {0} -d {1}'.format(f, data_dir))
else:
os.system('unzip -o {0} -d {1}'.format(f, dicom_dir))
# Process the manifest file
process_manifest(os.path.join(dicom_dir, 'manifest.csv'),
os.path.join(dicom_dir, 'manifest2.csv'))
df2 = pd.read_csv(os.path.join(dicom_dir, 'manifest2.csv'), error_bad_lines=False)
# Organise the dicom files
# Group the files into subdirectories for each imaging series
for series_name, series_df in df2.groupby('series discription'):
series_dir = os.path.join(dicom_dir, series_name)
if not os.path.exists(series_dir):
os.mkdir(series_dir)
series_files = [os.path.join(dicom_dir, x) for x in series_df['filename']]
os.system('mv {0} {1}'.format(' '.join(series_files), series_dir))
# Parse cvi42 xml file
cvi42_contours_dir = os.path.join(data_dir, 'cvi42_contours')
if not os.path.exists(cvi42_contours_dir):
os.mkdir(cvi42_contours_dir)
xml_name = os.path.join(data_dir, '{0}_cvi42.cvi42wsx'.format(eid))
parse_cvi42_xml.parseFile(xml_name, cvi42_contours_dir)
# Rare cases when no dicom file exists
# e.g. 12xxxxx/1270299
if not os.listdir(dicom_dir):
print('Warning: empty dicom directory! Skip this one.')
continue
# Convert dicom files and annotations into nifti images
dset = Biobank_Dataset(dicom_dir, cvi42_contours_dir)
dset.read_dicom_images()
dset.convert_dicom_to_nifti(data_dir)
# Remove intermediate files
os.system('rm -rf {0} {1}'.format(dicom_dir, cvi42_contours_dir))
os.system('rm -f {0}'.format(xml_name))
| 0 | 0 | 0 |
38a6c61c2b962bdeef42096f6686ad596fe0b0bc | 757 | py | Python | a-practical-introduction-to-python-programming-brian-heinold/chapter-02/exercise-09.py | elarabyelaidy19/awesome-reading | 5c01a4272ba58e4f7ea665aab14b4c0aa252ea89 | [
"MIT"
] | 31 | 2021-11-02T19:51:13.000Z | 2022-02-17T10:55:26.000Z | a-practical-introduction-to-python-programming-brian-heinold/chapter-02/exercise-09.py | MosTafaHoSamm/awesome-reading | 469408fefc049d78ed53a2b2331b5d5cecdc6c06 | [
"MIT"
] | 1 | 2022-01-18T12:27:54.000Z | 2022-01-18T12:27:54.000Z | a-practical-introduction-to-python-programming-brian-heinold/chapter-02/exercise-09.py | MosTafaHoSamm/awesome-reading | 469408fefc049d78ed53a2b2331b5d5cecdc6c06 | [
"MIT"
] | 3 | 2022-01-11T05:01:34.000Z | 2022-02-05T14:36:29.000Z | # The Fibonacci numbers are the sequence below, where the first two numbers are 1, and each
# number thereafter is the sum of the two preceding numbers. Write a program that asks the
# user how many Fibonacci numbers to print and then prints that many.
# 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 ...
# How many Fibonacci numbers to print? 10
times = eval(input('How many Fibonacci numbers to print? '))
first = 1
second = 1
if times <= 2:
print(first)
if times == 2:
print(second)
else:
print(first)
print(second)
for i in range(2, times):
now = first + second
print(now)
first = second
second = now
# To handle the case if input <= 2, you should use 'if conditions' or 'lists'
| 28.037037 | 91 | 0.632761 | # The Fibonacci numbers are the sequence below, where the first two numbers are 1, and each
# number thereafter is the sum of the two preceding numbers. Write a program that asks the
# user how many Fibonacci numbers to print and then prints that many.
# 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 ...
# How many Fibonacci numbers to print? 10
times = eval(input('How many Fibonacci numbers to print? '))
first = 1
second = 1
if times <= 2:
print(first)
if times == 2:
print(second)
else:
print(first)
print(second)
for i in range(2, times):
now = first + second
print(now)
first = second
second = now
# To handle the case if input <= 2, you should use 'if conditions' or 'lists'
| 0 | 0 | 0 |
dbfd2b863ebf8a65643357ce4ed11a1cf0283a7d | 60,178 | py | Python | src/qdyn/pulse.py | goerz/qdynpylib | 89f76cdb07e149435fcdd7153afe3156b444b9a8 | [
"BSD-3-Clause"
] | 3 | 2016-05-09T03:21:32.000Z | 2018-04-12T08:42:50.000Z | src/qdyn/pulse.py | qucontrol/qdynpylib | 89f76cdb07e149435fcdd7153afe3156b444b9a8 | [
"BSD-3-Clause"
] | 10 | 2019-04-19T16:22:10.000Z | 2021-01-19T04:37:03.000Z | src/qdyn/pulse.py | qucontrol/qdynpylib | 89f76cdb07e149435fcdd7153afe3156b444b9a8 | [
"BSD-3-Clause"
] | 1 | 2019-06-28T18:47:32.000Z | 2019-06-28T18:47:32.000Z | """Module containing the :class:`Pulse` class and functions for initializing
pulse shapes."""
import logging
import re
from collections.abc import MutableMapping
import matplotlib.pyplot as plt
import numpy as np
import scipy.fftpack
from matplotlib.gridspec import GridSpec
from numpy.fft import fft, fftfreq
from scipy import signal
from scipy.interpolate import UnivariateSpline
from .io import open_file, writetotxt
from .linalg import iscomplexobj
from .units import UnitConvert, UnitFloat
class _PulseConfigAttribs(MutableMapping):
"""Custom ordered dict of config file attributes of pulses.
The 'type' key is fixed to the value 'file', and the keys listed in
`synchronized_keys` are linked to the corresponding attribute of
the parent pulse. Furthermore, the value of the 'is_complex' key is linked
to the type of the amplitude attribute of the parent pulse.
Args:
parent (Pulse): The pulse to which the settings apply
"""
_synchronized_keys = ['time_unit', 'ampl_unit']
_read_only_keys = ['type', 'is_complex']
_required_keys = [
'id',
'type',
'filename',
'time_unit',
'ampl_unit',
'is_complex',
]
def copy(self):
"""Shallow copy of object"""
c = _PulseConfigAttribs(self._parent)
c._data = self._data.copy()
c._keys = list(self._keys)
return c
class Pulse:
"""Numerical real or complex control pulse
Args:
tgrid (numpy.ndarray(float)):
Time grid values
amplitude (numpy.ndarray(float), numpy.ndarray(complex)):
Amplitude values. If not given, amplitude will be zero
time_unit (str): Unit of values in `tgrid`. Will be ignored when
reading from file.
ampl_unit (str): Unit of values in `amplitude`. Will be ignored when
reading from file.
freq_unit (str): Unit of frequencies when calculating spectrum. If not
given, an appropriate unit based on `time_unit` will be chosen, if
possible (or a `TypeError` will be raised.
Attributes:
tgrid (numpy.ndarray(float)): time points at which the pulse values
are defined, from ``t0 + dt/2`` to ``T - dt/2``.
amplitude (numpy.ndarray(float), numpy.ndarray(complex)): array
of real or complex pulse values.
time_unit (str): Unit of values in `tgrid`
ampl_unit (str): Unit of values in `amplitude`
freq_unit (str): Unit to use for frequency when calculating the
spectrum
preamble (list): List of lines that are written before the header
when writing the pulse to file. Each line should start with '# '
postamble (list): List of lines that are written after all data
lines. Each line should start with '# '
config_attribs (dict): Additional config data, for when generating a
QDYN config file section describing the pulse (e.g.
`{'oct_shape': 'flattop', 't_rise': '10_ns'}`)
Class Attributes:
unit_convert (QDYN.units.UnitConvert): converter to be used for any
unit conversion within any methods
Example:
>>> tgrid = pulse_tgrid(10, 100)
>>> amplitude = 100 * gaussian(tgrid, 50, 10)
>>> p = Pulse(tgrid=tgrid, amplitude=amplitude, time_unit='ns',
... ampl_unit='MHz')
>>> p.write('pulse.dat')
>>> p2 = Pulse.read('pulse.dat')
>>> from os import unlink; unlink('pulse.dat')
Notes:
It is important to remember that the QDYN Fortran library considers
pulses to be defined on the intervals of the propagation time grid
(i.e. for a time grid with n time steps of dt, the pulse will have n-1
points, defined at points shifted by dt/2)
The `pulse_tgrid` and `tgrid_from_config` routine may be used to obtain
the proper pulse time grid from the propagation time grid::
>>> import numpy as np
>>> p = Pulse(tgrid=pulse_tgrid(10, 100), ampl_unit='MHz',
... time_unit='ns')
>>> len(p.tgrid)
99
>>> print(str(p.dt))
0.10101_ns
>>> p.t0
0
>>> print("%.5f" % p.tgrid[0])
0.05051
>>> print(str(p.T))
10_ns
>>> print("%.5f" % p.tgrid[-1])
9.94949
The type of the `amplitude` (not whether there is a non-zero
imaginary part) decide whether the pulse is considered real or complex.
Complex pulses are not allowed to couple to Hermitian operators, and
in an optimization, both the real and imaginary part of the pulse are
modified.
"""
unit_convert = UnitConvert()
freq_units = { # map time_unit to most suitable freq_unit
'ns': 'GHz',
'ps': 'cminv',
'fs': 'eV',
'microsec': 'MHz',
'au': 'au',
'iu': 'iu',
'unitless': 'unitless',
'dimensionless': 'dimensionless',
}
def __eq__(self, other):
"""Compare two pulses, within a precision of 1e-12"""
if not isinstance(other, self.__class__):
return False
public_attribs = [
'is_complex',
'time_unit',
'ampl_unit',
'freq_unit',
'preamble',
'postamble',
'config_attribs',
]
for attr in public_attribs:
if getattr(self, attr) != getattr(other, attr):
return False
try:
if np.max(np.abs(self.tgrid - other.tgrid)) > 1.0e-12:
return False
if np.max(np.abs(self.amplitude - other.amplitude)) > 1.0e-12:
return False
except ValueError:
return False
return True
def copy(self):
"""Return a copy of the pulse"""
return self.__class__(
self.tgrid,
self.amplitude,
time_unit=self.time_unit,
ampl_unit=self.ampl_unit,
freq_unit=self.freq_unit,
config_attribs=self.config_attribs,
)
def _check(self):
"""Assert self-consistency of pulse"""
assert self.tgrid is not None, "Pulse is not initialized"
assert self.amplitude is not None, "Pulse is not initialized"
assert isinstance(self.tgrid, np.ndarray), "tgrid must be numpy array"
assert isinstance(
self.amplitude, np.ndarray
), "amplitude must be numpy array"
assert (
self.tgrid.dtype.type is np.float64
), "tgrid must be double precision"
assert self.amplitude.dtype.type in [
np.float64,
np.complex128,
], "amplitude must be double precision"
assert len(self.tgrid) == len(
self.amplitude
), "length of tgrid and amplitudes do not match"
assert self.ampl_unit in self.unit_convert.units, (
"Unknown ampl_unit %s" % self.ampl_unit
)
assert self.time_unit in self.unit_convert.units, (
"Unknown time_unit %s" % self.time_unit
)
assert self.freq_unit in self.unit_convert.units, (
"Unknown freq_unit %s" % self.freq_unit
)
@classmethod
def read(
cls,
filename,
time_unit=None,
ampl_unit=None,
freq_unit=None,
ignore_header=False,
):
"""Read a pulse from file, in the format generated by the QDYN
``write_pulse`` routine.
Parameters:
filename (str): Path and name of file from which to read the pulse
time_unit (str or None): The unit of the time grid
ampl_unit (str or None): The unit of the pulse amplitude.
freq_unit (str or None): Intended value for the `freq_unit`
attribute. If None, a `freq_unit` will be chosen automatically,
if possible (or a `TypeError` will be raised)
ignore_header (bool): If True, the file header will be ignored.
Note:
By default, the file is assumed to contain a header that
identifies the columns and their units, as a comment line
immediately preceding the data. If `time_unit` or `ampl_unit` are
None, and `ignore_header` is False, the respective unites are
extracted from the header line. If `time_unit` or `ampl_unit` are
not None, the respective values will be converted from the unit
specified in the file header. If `ignore_header` is True, both
`time_unit` and `ampl_unit` must be given. This can be used to read
in pulses that were not generated by the QDYN ``write_pulse``
routine. Note that if `ignore_header` is True, *all* comment lines
preceding the data will be included in the `preamble` attribute.
The `write` method allows to restore *exactly* the original pulse
file.
"""
logger = logging.getLogger(__name__)
header_rx = {
'complex': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
Re\((ampl|E)\) \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*
Im\((ampl|E)\) \s* \[(\w+)\]\s*$''',
re.X | re.I,
),
'real': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
Re\((ampl|E)\) \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*$''',
re.X | re.I,
),
'abs': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
(Abs\()?(ampl|E)(\))? \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*$''',
re.X | re.I,
),
}
try:
t, x, y = np.genfromtxt(filename, unpack=True, dtype=np.float64)
except ValueError:
t, x = np.genfromtxt(filename, unpack=True, dtype=np.float64)
y = None
preamble = []
postamble = []
with open_file(filename) as in_fh:
in_preamble = True
for line in in_fh:
if line.startswith('#'):
if in_preamble:
preamble.append(line.strip())
else:
postamble.append(line.strip())
else:
if in_preamble:
in_preamble = False
# the last line of the preamble *must* be the header line. We will
# process it and remove it from preamble
mode = None
file_time_unit = None
file_ampl_unit = None
if ignore_header:
mode = 'complex'
if y is None:
mode = 'real'
else:
try:
header_line = preamble.pop()
except IndexError:
raise IOError("Pulse file does not contain a preamble")
for file_mode, pattern in header_rx.items():
match = pattern.match(header_line)
if match:
mode = file_mode
file_time_unit = match.group('time_unit')
file_ampl_unit = match.group('ampl_unit')
break
if mode is None:
logger.warning(
"Non-standard header in pulse file."
"Check that pulse was read with correct units"
)
if y is None:
mode = 'real'
else:
mode = 'complex'
free_pattern = re.compile(
r'''
^\# .* \[\s*(?P<time_unit>\w+)\s*\]
.* \[\s*(?P<ampl_unit>\w+)\s*\]''',
re.X,
)
match = free_pattern.search(header_line)
if match:
file_time_unit = match.group('time_unit')
file_ampl_unit = match.group('ampl_unit')
logger.info("Identify time_unit = %s", file_time_unit)
logger.info("Identify ampl_unit = %s", file_ampl_unit)
if file_time_unit is None or file_ampl_unit is None:
raise ValueError("Could not identify units from header")
if mode == 'abs':
amplitude = x
elif mode == 'real':
amplitude = x
elif mode == 'complex':
amplitude = x + 1j * y
else:
raise ValueError("mode must be 'abs', 'real', or 'complex'")
if not ignore_header:
if time_unit is None:
time_unit = file_time_unit
else:
t = cls.unit_convert.convert(t, file_time_unit, time_unit)
if ampl_unit is None:
ampl_unit = file_ampl_unit
else:
amplitude = cls.unit_convert.convert(
amplitude, file_ampl_unit, ampl_unit
)
pulse = cls(
tgrid=t,
amplitude=amplitude,
time_unit=time_unit,
ampl_unit=ampl_unit,
freq_unit=freq_unit,
)
pulse.preamble = preamble
pulse.postamble = postamble
return pulse
@classmethod
def from_func(
cls,
tgrid,
func,
time_unit=None,
ampl_unit=None,
freq_unit=None,
config_attribs=None,
):
"""Instantiate a pulse from an amplitude function `func`.
All other parameters are passed on to `__init__`
"""
amplitude = [func(t) for t in tgrid]
return cls(
tgrid,
amplitude=amplitude,
time_unit=time_unit,
ampl_unit=ampl_unit,
freq_unit=freq_unit,
config_attribs=config_attribs,
)
@property
def dt(self):
"""Time grid step, as instance of `UnitFloat`"""
return UnitFloat(self.tgrid[1] - self.tgrid[0], unit=self.time_unit)
@property
def t0(self):
"""Time at which the pulse begins (dt/2 before the first point in the
pulse), as instance of `UnitFloat`
"""
result = self.tgrid[0] - 0.5 * float(self.dt)
if abs(result) < 1.0e-15 * self.tgrid[-1]:
result = 0.0
return UnitFloat(result, self.time_unit)
@property
def states_tgrid(self):
"""Time grid values for the states propagated under the numerical pulse
values, as numpy array in units of :attr:`time_unit`.
The returned time grid has one point more than :attr:`tgrid`, and
extends from :attr:`t0` to :attr:`T` (inclusive).
"""
return np.linspace(float(self.t0), float(self.T), len(self.tgrid) + 1)
@property
def w_max(self):
"""Maximum frequency that can be represented with the
current sampling rate.
"""
n = len(self.tgrid)
dt = float(self.unit_convert.convert(self.dt, self.time_unit, 'iu'))
if n % 2 == 1:
# odd
w_max = ((n - 1) * np.pi) / (n * dt)
else:
# even
w_max = np.pi / dt
return self.unit_convert.convert(w_max, 'iu', self.freq_unit)
@property
def dw(self):
"""Step width in the spectrum (i.e. the spectral resolution)
based on the current pulse duration, as an instance of
:class:`~qdyn.units.UnitFloat`.
"""
n = len(self.tgrid)
w_max = self.w_max
if n % 2 == 1:
# odd
return 2.0 * w_max / float(n - 1)
else:
# even
return 2.0 * w_max / float(n)
@property
def T(self):
"""Time at which the pulse ends (dt/2 after the last point in the
pulse), as an instance of :class:`~qdyn.units.UnitFloat`.
"""
result = self.tgrid[-1] + 0.5 * float(self.dt)
if abs(round(result) - result) < (1.0e-15 * result):
result = round(result)
return UnitFloat(result, unit=self.time_unit)
@property
def is_complex(self):
"""Is the pulse amplitude of complex type?"""
return iscomplexobj(self.amplitude)
def as_func(self, interpolation='linear', allow_args=False):
"""Callable that evaluates the pulse for a given time value.
Possible values for `interpolation` are 'linear' and 'piecewise'.
The resulting function takes an argument `t` that must be a float
in the range [:attr:`t0`, :attr:`T`] and in units of
:attr:`time_unit`). It returns the
(interpolated) pulse amplitude as a float, in units of
:attr:`ampl_unit`
If `allow_args` is True, the resulting function takes a second argument
`args` that is ignored. This is for compatibility with qutip, see
http://qutip.org/docs/latest/guide/dynamics/dynamics-time.html.
"""
t0 = float(self.t0)
T = float(self.T)
dt = float(self.dt)
offset = t0 + 0.5 * dt
def func_linear(t):
"""linear interpolation of pulse amplitude"""
if t0 <= float(t) <= T:
t = float(t) - offset
n = max(int(t / dt), 0)
delta = max(t - n * dt, 0.0) / dt
try:
return (1 - delta) * self.amplitude[
n
] + delta * self.amplitude[n + 1]
except IndexError: # last n
return self.amplitude[n]
else:
raise ValueError(
"Value t=%g not in range [%g, %g]" % (t, t0, T)
)
def func_piecewise(t):
"""piecewise interpolation of pulse amplitude"""
if t0 <= float(t) <= T:
t = float(t) - offset
n = max(int(t / dt), 0)
delta = max(t - n * dt, 0.0) / dt
if delta < 0.5:
return self.amplitude[n]
else:
try:
return self.amplitude[n + 1]
except IndexError: # last n
return self.amplitude[n]
else:
raise ValueError(
"Value t=%g not in range [%g, %g]" % (t, t0, T)
)
func_map = {'linear': func_linear, 'piecewise': func_piecewise}
try:
if allow_args:
return _attach_args(func_map[interpolation])
else:
return func_map[interpolation]
except KeyError:
raise ValueError(
"Invalid interpolation not in %s: %s"
% (str(list(func_map.keys())), interpolation)
)
def convert(self, time_unit=None, ampl_unit=None, freq_unit=None):
"""Convert the pulse data to different units"""
if time_unit is not None:
factor = self.unit_convert.convert(1.0, self.time_unit, time_unit)
self.tgrid *= factor
self.time_unit = time_unit
if ampl_unit is not None:
factor = self.unit_convert.convert(1.0, self.ampl_unit, ampl_unit)
self.amplitude *= factor
self.ampl_unit = ampl_unit
if freq_unit is not None:
self.freq_unit = freq_unit
self._check()
def get_timegrid_point(self, t, move="left"):
"""Return the next point to the left (or right) of the given `t` which
is on the pulse time grid
"""
t_start = self.tgrid[0]
t_stop = self.tgrid[-1]
dt = self.dt
if t < t_start:
return t_start
if t > t_stop:
return t_stop
if move == "left":
n = np.floor((t - t_start) / dt)
else:
n = np.ceil((t - t_start) / dt)
return t_start + n * dt
@property
def fluence(self):
"""Fluence (integrated pulse energy) for the pulse
.. math:: \\int_{-\\infty}^{\\infty} \\vert|E(t)\\vert^2 dt
"""
return np.sum(self.amplitude ** 2) * float(self.dt)
@property
def oct_iter(self):
"""OCT iteration number from the pulse preamble, if available. If not
available, 0"""
iter_rx = re.compile(r'OCT iter[\s:]*(\d+)', re.I)
for line in self.preamble:
iter_match = iter_rx.search(line)
if iter_match:
return int(iter_match.group(1))
return 0
def spectrum(self, freq_unit=None, mode='complex', sort=False):
"""Calculate the spectrum of the pulse
Parameters:
freq_unit (str): Desired unit of the `freq` output array.
Can Hz (GHz, Mhz, etc) to obtain frequencies, or any energy
unit, using the correspondence ``f = E/h``. If not given,
defaults to the `freq_unit` attribute
mode (str): Wanted mode for `spectrum` output array.
Possible values are 'complex', 'abs', 'real', 'imag'
sort (bool): Sort the output `freq` array (and the output
`spectrum` array) so that frequecies are ordered from
``-w_max .. 0 .. w_max``, instead of the direct output from the
FFT. This is good for plotting, but does not allow to do an
inverse Fourier transform afterwards
Returns:
numpy.ndarray(float), numpy.ndarray(complex): Frequency values
associated with the amplitude values in `spectrum`, i.e. the x-axis
of the spectrogram. The values are in the unit `freq_unit`.
Real (`mode in ['abs', 'real', 'imag']`) or complex
(`mode='complex'`) amplitude of each frequency component.
Notes:
If `sort=False` and `mode='complex'`, the original pulse
values can be obtained by simply calling `np.fft.ifft`
The spectrum is not normalized (Scipy follows the convention of
doing the normalization on the backward transform). You might want
to normalized by 1/n for plotting.
"""
s = fft(self.amplitude) # spectrum amplitude
f = self.fftfreq(freq_unit=freq_unit)
modifier = {
'abs': lambda s: np.abs(s),
'real': lambda s: np.real(s),
'imag': lambda s: np.imag(s),
'complex': lambda s: s,
}
if sort:
order = np.argsort(f)
f = f[order]
s = s[order]
return f, modifier[mode](s)
def fftfreq(self, freq_unit=None):
"""Return the FFT frequencies associated with the pulse. Cf.
`numpy.fft.fftfreq`
Parameters:
freq_unit (str): Desired unit of the output array.
If not given, defaults to the `freq_unit` attribute
Returns:
numpy.ndarray(float): Frequency values associated with
the pulse time grid.
The first half of the `freq` array contains the
positive frequencies, the second half the negative frequencies
"""
if freq_unit is None:
freq_unit = self.freq_unit
n = len(self.amplitude)
dt = float(self.unit_convert.convert(self.dt, self.time_unit, 'iu'))
return self.unit_convert.convert(
fftfreq(n, d=dt / (2.0 * np.pi)), 'iu', freq_unit
)
def derivative(self):
"""Calculate the derivative of the current pulse and return it as a new
pulse. Note that the derivative is in units of `ampl_unit`/`time_unit`,
but will be marked as 'unitless'.
"""
self._unshift()
T = self.tgrid[-1] - self.tgrid[0]
deriv = scipy.fftpack.diff(self.amplitude) * (2.0 * np.pi / T)
deriv_pulse = Pulse(
tgrid=self.tgrid,
amplitude=deriv,
time_unit=self.time_unit,
ampl_unit='unitless',
)
self._shift()
deriv_pulse._shift()
return deriv_pulse
def phase(self, unwrap=False, s=None, derivative=False, freq_unit=None):
"""Return the pulse's complex phase, or derivative of the phase
Parameters:
unwrap (bool): If False, report the phase in ``[-pi:pi]``. If True,
the phase may take any real value, avoiding the discontinuous
jumps introduced by limiting the phase to a 2 pi interval.
s (float or None): smoothing parameter, see
:py:class:`scipy.interpolate.UnivariateSpline`. If None, no
smoothing is performed.
derivative (bool): If False, return the (smoothed) phase directly.
If True, return the derivative of the (smoothed) phase.
freq_unit (str or None): If `derivative` is True, the unit in which
the derivative should be calculated. If None, `self.freq_unit`
is used.
Note:
When calculating the derivative, some smoothing is generally
required. By specifying a smoothing parameter `s`, the phase is
smoothed through univeriate splines before calculating the
derivative.
When calculating the phase directly (instead of the derivative),
smoothing should only be used when also unwrapping the phase.
"""
phase = np.angle(self.amplitude)
if unwrap or derivative:
phase = np.unwrap(phase)
tgrid = self.unit_convert.convert(self.tgrid, self.time_unit, 'iu')
if derivative:
if freq_unit is None:
freq_unit = self.freq_unit
if s is None:
s = 1
spl = UnivariateSpline(tgrid, phase, s=s)
deriv = spl.derivative()(tgrid)
return self.unit_convert.convert(deriv, 'iu', self.freq_unit)
else: # direct phase
if s is not None:
spl = UnivariateSpline(tgrid, phase, s=s)
return spl(tgrid)
else:
return phase
def write(self, filename, mode=None):
"""Write a pulse to file, in the same format as the QDYN `write_pulse`
routine
Parameters:
filename (str): Name of file to which to write the pulse
mode (str): Mode in which to write files. Possible values
are 'abs', 'real', or 'complex'. The former two result in a
two-column file, the latter in a three-column file. If not
given, 'real' or 'complex' is used, depending on the type of
:attr:`amplitude`
"""
if mode is None:
if iscomplexobj(self.amplitude):
mode = 'complex'
else:
mode = 'real'
self._check()
preamble = self.preamble
if not hasattr(preamble, '__getitem__'):
preamble = [str(preamble)]
postamble = self.postamble
if not hasattr(postamble, '__getitem__'):
postamble = [str(postamble)]
buffer = ''
# preamble
for line in preamble:
line = str(line).strip()
if line.startswith('#'):
buffer += "%s\n" % line
else:
buffer += '# %s\n' % line
# header and data
time_header = "time [%s]" % self.time_unit
ampl_re_header = "Re(ampl) [%s]" % self.ampl_unit
ampl_im_header = "Im(ampl) [%s]" % self.ampl_unit
ampl_abs_header = "Abs(ampl) [%s]" % self.ampl_unit
if mode == 'abs':
buffer += "# %23s%25s\n" % (time_header, ampl_abs_header)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E\n" % (t, abs(self.amplitude[i]))
elif mode == 'real':
buffer += "# %23s%25s\n" % (time_header, ampl_re_header)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E\n" % (t, self.amplitude.real[i])
elif mode == 'complex':
buffer += "# %23s%25s%25s\n" % (
time_header,
ampl_re_header,
ampl_im_header,
)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E%25.17E\n" % (
t,
self.amplitude.real[i],
self.amplitude.imag[i],
)
else:
raise ValueError("mode must be 'abs', 'real', or 'complex'")
# postamble
for line in self.postamble:
line = str(line).strip()
if line.startswith('#'):
buffer += "%s\n" % line
else:
buffer += '# %s' % line
with open_file(filename, 'w') as out_fh:
out_fh.write(buffer)
def write_oct_spectral_filter(self, filename, filter_func, freq_unit=None):
"""Evaluate a spectral filter function and write the result to the file
with a given `filename`, in a format such that the file may be used for
the `oct_spectral_filter` field of a pulse in a QDYN config file. The
file will have two columns: The pulse frequencies (see `fftfreq`
method), and the value of the filter function in the range [0, 1]
Args:
filename (str): Filename of the output file
filter_func (callable): A function that takes a frequency values
(in units of `freq_unit`) and returns a filter value in the
range [0, 1]
freq_unit (str): Unit of frequencies that `filter_func`
assumes. If not given, defaults to the `freq_unit` attribute.
Note:
The `filter_func` function may return any values that numpy
considers equivalent to floats in the range [0, 1]. This
includes boolean values, where True is equivalent to 1.0 and
False is equivalent to 0.0
"""
if freq_unit is None:
freq_unit = self.freq_unit
freqs = self.fftfreq(freq_unit=freq_unit)
filter = np.array([filter_func(f) for f in freqs], dtype=np.float64)
if not (0 <= np.min(filter) <= 1 and 0 <= np.max(filter) <= 1):
raise ValueError("filter values must be in the range [0, 1]")
header = "%15s%15s" % ("freq [%s]" % freq_unit, 'filter')
writetotxt(filename, freqs, filter, fmt='%15.7e%15.12f', header=header)
def apply_spectral_filter(self, filter_func, freq_unit=None):
"""Apply a spectral filter function to the pulse (in place)
Args:
filter_func (callable): A function that takes a frequency values
(in units of `freq_unit`) and returns a filter value in the
range [0, 1]
freq_unit (str): Unit of frequencies that `filter_func`
assumes. If not given, defaults to the `freq_unit` attribute.
"""
freqs, spec = self.spectrum(freq_unit=freq_unit)
filter = np.array([filter_func(f) for f in freqs], dtype=np.float64)
if not (0 <= np.min(filter) <= 1 and 0 <= np.max(filter) <= 1):
raise ValueError("filter values must be in the range [0, 1]")
spec *= filter
self.amplitude = np.fft.ifft(spec)
return self
def apply_smoothing(self, **kwargs):
"""Smooth the pulse amplitude (in place) through univariate splining.
All keyword arguments are passed directly to
:py:class:`scipy.interpolate.UnivariateSpline`. This especially
includes the smoothing parameter `s`.
"""
if iscomplexobj(self.amplitude):
splx = UnivariateSpline(self.tgrid, self.amplitude.real, **kwargs)
sply = UnivariateSpline(self.tgrid, self.amplitude.imag, **kwargs)
self.amplitude = splx(self.tgrid) + 1.0j * sply(self.tgrid)
else:
spl = UnivariateSpline(self.tgrid, self.amplitude, **kwargs)
self.amplitude = spl(self.tgrid)
return self
def _unshift(self):
"""Move the pulse onto the unshifted time grid. This increases the
number of points by one"""
tgrid_new = np.linspace(
float(self.t0), float(self.T), len(self.tgrid) + 1
)
pulse_new = np.zeros(
len(self.amplitude) + 1, dtype=self.amplitude.dtype.type
)
pulse_new[0] = self.amplitude[0]
for i in range(1, len(pulse_new) - 1):
pulse_new[i] = 0.5 * (self.amplitude[i - 1] + self.amplitude[i])
pulse_new[-1] = self.amplitude[-1]
self.tgrid = tgrid_new
self.amplitude = pulse_new
self._check()
def _shift(self, data=None):
"""Inverse of _unshift"""
dt = float(self.dt)
tgrid_new = np.linspace(
self.tgrid[0] + dt / 2.0,
self.tgrid[-1] - dt / 2.0,
len(self.tgrid) - 1,
)
if data is None:
data_old = self.amplitude
else:
data_old = data
data_new = np.zeros(len(data_old) - 1, dtype=data_old.dtype.type)
data_new[0] = data_old[0]
for i in range(1, len(data_new) - 1):
data_new[i] = 2.0 * data_old[i] - data_new[i - 1]
data_new[-1] = data_old[-1]
if data is None:
self.tgrid = tgrid_new
self.amplitude = data_new
self._check()
else:
return data_new
def resample(self, upsample=None, downsample=None, num=None, window=None):
"""Resample the pulse, either by giving an upsample ratio, a downsample
ration, or a number of sampling points
Parameters:
upsample (int): Factor by which to increase the number of
samples. Afterwards, those points extending beyond the original
end point of the pulse are discarded.
downsample (int): For ``downsample=n``, keep only every
n'th point of the original pulse. This may cause the resampled
pulse to end earlier than the original pulse
num (int): Resample with `num` sampling points. This may
case the end point of the resampled pulse to change
window (list, numpy.ndarray, callable, str, float, or tuple):
Specifies the window applied to the signal in the Fourier
domain. See `sympy.signal.resample`.
Notes:
Exactly one of `upsample`, `downsample`, or `num` must be given.
Upsampling will maintain the pulse start and end point (as returned
by the `T` and `t0` properties), up to some rounding errors.
Downsampling, or using an arbitrary number will change the end
point of the pulse in general.
"""
self._unshift()
nt = len(self.tgrid)
if sum([(x is not None) for x in [upsample, downsample, num]]) != 1:
raise ValueError(
"Exactly one of upsample, downsample, or num must be given"
)
if num is None:
if upsample is not None:
upsample = int(upsample)
num = nt * upsample
elif downsample is not None:
downsample = int(downsample)
assert downsample > 0, "downsample must be > 0"
num = nt / downsample
else:
num = nt
else:
num = num + 1 # to account for shifting
a, t = signal.resample(self.amplitude, num, self.tgrid, window=window)
if upsample is not None:
# discard last (upsample-1) elements
self.amplitude = a[: -(upsample - 1)]
self.tgrid = t[: -(upsample - 1)]
else:
self.amplitude = a
self.tgrid = t
self._shift()
def render_pulse(self, ax, label='pulse'):
"""Render the pulse amplitude on the given axes."""
if np.max(np.abs(self.amplitude.imag)) > 0.0:
ax.plot(self.tgrid, np.abs(self.amplitude), label=label)
ax.set_ylabel("abs(pulse) (%s)" % self.ampl_unit)
else:
if np.min(self.amplitude.real) < 0:
ax.axhline(y=0.0, ls='-', color='black')
ax.plot(self.tgrid, self.amplitude.real, label=label)
ax.set_ylabel("pulse (%s)" % (self.ampl_unit))
ax.set_xlabel("time (%s)" % self.time_unit)
def render_phase(self, ax, label='phase'):
"""Render the complex phase of the pulse on the given axes."""
ax.axhline(y=0.0, ls='-', color='black')
ax.plot(
self.tgrid,
np.angle(self.amplitude) / np.pi,
ls='-',
color='black',
label=label,
)
ax.set_ylabel(r'phase ($\pi$)')
ax.set_xlabel("time (%s)" % self.time_unit)
def render_spectrum(
self,
ax,
zoom=True,
wmin=None,
wmax=None,
spec_scale=None,
spec_max=None,
freq_unit=None,
mark_freqs=None,
mark_freq_points=None,
label='spectrum',
):
"""Render spectrum onto the given axis, see `plot` for arguments"""
freq, spectrum = self.spectrum(
mode='abs', sort=True, freq_unit=freq_unit
)
# normalizing the spectrum makes it independent of the number of
# sampling points. That is, the spectrum of a signal that is simply
# resampled will be the same as that of the original signal. Scipy
# follows the convention of doing the normalization in the inverse
# transform
spectrum *= 1.0 / len(spectrum)
if wmax is not None and wmin is not None:
zoom = False
if zoom:
# figure out the range of the spectrum
max_amp = np.amax(spectrum)
if self.is_complex:
# we center the spectrum around zero, and extend
# symmetrically in both directions as far as there is
# significant amplitude
wmin = np.max(freq)
wmax = np.min(freq)
for i, w in enumerate(freq):
if spectrum[i] > 0.001 * max_amp:
if w > wmax:
wmax = w
if w < wmin:
wmin = w
wmax = max(abs(wmin), abs(wmax))
wmin = -wmax
else:
# we show only the positive part of the spectrum (under the
# assumption that the spectrum is symmetric) and zoom in
# only on the region that was significant amplitude
wmin = 0.0
wmax = 0.0
for i, w in enumerate(freq):
if spectrum[i] > 0.001 * max_amp:
if wmin == 0 and w > 0:
wmin = w
wmax = w
buffer = (wmax - wmin) * 0.1
# plot spectrum
if zoom:
ax.set_xlim((wmin - buffer), (wmax + buffer))
else:
if wmin is not None and wmax is not None:
ax.set_xlim(wmin, wmax)
ax.set_xlabel("frequency (%s)" % freq_unit)
ax.set_ylabel("abs(spec) (arb. un.)")
if spec_scale is None:
spec_scale = 1.0
ax.plot(
freq, spec_scale * spectrum, marker=mark_freq_points, label=label
)
if spec_max is not None:
ax.set_ylim(0, spec_max)
if mark_freqs is not None:
for freq in mark_freqs:
kwargs = {'ls': '--', 'color': 'black'}
try:
freq, kwargs = freq
except TypeError:
pass
ax.axvline(x=float(freq), **kwargs)
def plot(
self,
fig=None,
show_pulse=True,
show_spectrum=True,
zoom=True,
wmin=None,
wmax=None,
spec_scale=None,
spec_max=None,
freq_unit=None,
mark_freqs=None,
mark_freq_points=None,
**figargs
):
"""Generate a plot of the pulse on a given figure
Parameters:
fig (matplotlib.figure.Figure): The figure onto which to plot. If
not given, create a new figure from `matplotlib.pyplot.figure`
show_pulse (bool): Include a plot of the pulse amplitude? If the
pulse has a vanishing imaginary part, the plot will show the
real part of the amplitude, otherwise, there will be one plot
for the absolute value of the amplitude and one showing the
complex phase in units of pi
show_spectrum (bool): Include a plot of the spectrum?
zoom (bool): If `True`, only show the part of the spectrum that has
amplitude of at least 0.1% of the maximum peak in the spectrum.
For real pulses, only the positive part of the spectrum is
shown
wmin (float): Lowest frequency to show. Overrides zoom options.
Must be given together with `wmax`.
wmax (float): Highest frequency to show. Overrides zoom options.
Must be given together with `wmin`.
spec_scale (float): Factor by which to scale the amplitudes in the
spectrum
spec_max (float): Maximum amplitude in the spectrum, after
spec_scale has been applied
freq_unit (str): Unit in which to show the frequency axis in the
spectrum. If not given, use the `freq_unit` attribute
mark_freqs (None, list(float), list((float, dict))):
Array of frequencies to mark in spectrum as vertical dashed
lines. If list of tuples (float, dict), the float value is the
frequency to mark, and the dict gives the keyword arguments
that are passed to the matplotlib `axvline` method.
mark_freq_points (None, ~matplotlib.markers.MarkerStyle): Marker to
be used to indicate the individual points in the spectrum.
The remaining figargs are passed to `matplotlib.pyplot.figure` to
create a new figure if `fig` is None.
"""
if fig is None:
fig = plt.figure(**figargs)
if freq_unit is None:
freq_unit = self.freq_unit
self._check()
pulse_is_complex = self.is_complex
# do the layout
if show_pulse and show_spectrum:
if pulse_is_complex:
# show abs(pulse), phase(pulse), abs(spectrum)
gs = GridSpec(3, 1, height_ratios=[2, 1, 2])
else:
# show real(pulse), abs(spectrum)
gs = GridSpec(2, 1, height_ratios=[1, 1])
else:
if show_pulse:
if pulse_is_complex:
# show abs(pulse), phase(pulse)
gs = GridSpec(2, 1, height_ratios=[2, 1])
else:
# show real(pulse)
gs = GridSpec(1, 1)
else:
gs = GridSpec(1, 1)
if show_spectrum:
ax_spectrum = fig.add_subplot(gs[-1], label='spectrum')
self.render_spectrum(
ax_spectrum,
zoom,
wmin,
wmax,
spec_scale,
spec_max,
freq_unit,
mark_freqs,
mark_freq_points,
)
if show_pulse:
# plot pulse amplitude
ax_pulse = fig.add_subplot(gs[0], label='pulse')
self.render_pulse(ax_pulse)
if pulse_is_complex:
# plot pulse phase
ax_phase = fig.add_subplot(gs[1], label='phase')
self.render_phase(ax_phase)
fig.subplots_adjust(hspace=0.3)
def show(self, **kwargs):
"""Show a plot of the pulse and its spectrum. All arguments will be
passed to the plot method
"""
self.plot(**kwargs) # uses plt.figure()
plt.show()
def show_pulse(self, **kwargs):
"""Show a plot of the pulse amplitude; alias for
`show(show_spectrum=False)`. All other arguments will be passed to the
`show` method
"""
self.show(show_spectrum=False, **kwargs)
def show_spectrum(self, zoom=True, freq_unit=None, **kwargs):
"""Show a plot of the pulse spectrum; alias for
`show(show_pulse=False, zoom=zoom, freq_unit=freq_unit)`. All other
arguments will be passed to the `show` method
"""
self.show(show_pulse=False, zoom=zoom, freq_unit=freq_unit, **kwargs)
def pulse_tgrid(T, nt, t0=0.0):
"""Return a pulse time grid suitable for an equidistant time grid of the
states between t0 and T with nt intervals. The values of the pulse are
defined in the intervals of the time grid, so the pulse time grid will be
shifted by dt/2 with respect to the time grid of the states. Also, the
pulse time grid will have nt-1 points:
>>> print(", ".join([("%.2f" % t) for t in pulse_tgrid(1.5, nt=4)]))
0.25, 0.75, 1.25
The limits of the states time grid are defined as the starting and end
points of the pulse, however:
>>> p = Pulse(tgrid=pulse_tgrid(1.5, 4), time_unit='ns', ampl_unit='MHz')
>>> p.t0
0
>>> p.T
1.5_ns
"""
dt = float(T - t0) / (nt - 1)
t_first_pulse = float(t0) + 0.5 * dt
t_last_pulse = float(T) - 0.5 * dt
nt_pulse = nt - 1
return np.linspace(t_first_pulse, t_last_pulse, nt_pulse)
def tgrid_from_config(tgrid_dict, time_unit, pulse_grid=True):
"""Extract the time grid from the given config file
>>> tgrid_dict = dict([('t_start', 0.0), ('t_stop', UnitFloat(10.0, 'ns')),
... ('dt', UnitFloat(20, 'ps')), ('fixed', True)])
>>> tgrid = tgrid_from_config(tgrid_dict, time_unit='ns')
>>> print("%.2f" % tgrid[0])
0.01
>>> print("%.2f" % tgrid[-1])
9.99
"""
if time_unit is None:
time_unit = 'unitless'
t_start = None
t_stop = None
nt = None
dt = None
if 't_start' in tgrid_dict:
t_start = tgrid_dict['t_start']
if 't_stop' in tgrid_dict:
t_stop = tgrid_dict['t_stop']
if 'nt' in tgrid_dict:
nt = tgrid_dict['nt']
if 'dt' in tgrid_dict:
dt = tgrid_dict['dt']
if t_start is None:
assert (
(t_stop is not None) and (dt is not None) and (nt is not None)
), "tgrid not fully specified in config"
t_start = t_stop - (nt - 1) * dt
if t_stop is None:
assert (
(t_start is not None) and (dt is not None) and (nt is not None)
), "tgrid not fully specified in config"
t_stop = t_start + (nt - 1) * dt
if nt is None:
assert (
(t_start is not None) and (dt is not None) and (t_stop is not None)
), "tgrid not fully specified in config"
nt = int((t_stop - t_start) / dt) + 1
if dt is None:
assert (
(t_start is not None) and (nt is not None) and (t_stop is not None)
), "tgrid not fully specified in config"
dt = (t_stop - t_start) / float(nt - 1)
t_start = UnitFloat(t_start).convert(time_unit)
t_stop = UnitFloat(t_stop).convert(time_unit)
dt = UnitFloat(dt).convert(time_unit)
if pulse_grid:
# convert to pulse parameters
t_start += 0.5 * dt
t_stop -= 0.5 * dt
nt -= 1
tgrid = np.linspace(float(t_start), float(t_stop), nt)
return tgrid
###############################################################################
# Shape functions
###############################################################################
def carrier(
t, time_unit, freq, freq_unit, weights=None, phases=None, complex=False
):
r'''Create the "carrier" of the pulse as a weighted superposition of
cosines at different frequencies.
Parameters:
t (numpy.ndarray(float)): Time value or time grid
time_unit (str): Unit of `t`
freq (numpy.ndarray(float)): Carrier frequency or frequencies
freq_unit (str): Unit of `freq`
weights (numpy.ndarray): If `freq` is an array, weights for
the different frequencies. If not given, all weights are 1. The
weights are normalized to sum to one. Any weight smaller than
machine precision is assumed zero.
phases (numpy.ndarray): If `phases` is an array, phase shift
for each frequency component, in units of pi. If not given, all
phases are 0.
complex (bool): If `True`, oscillate in the complex plane
Returns:
numpy.ndarray(complex): Depending on whether
`complex` is `True` or `False`,
.. math::
s(t) = \sum_j w_j * \cos(\omega_j * t + \phi_j) \\
s(t) = \sum_j w_j * \exp(i*(\omega_j * t + \phi_j))
with :math:`\omega_j = 2 * \pi * f_j`, and frequency
:math:`f_j` where :math:`f_j` is the j'th value in `freq`. The
value of :math:`\phi_j` is the j'th value in `phases`
`signal` is a scalar if `t` is a scalar, and and array if `t`
is an array
Notes:
`freq_unit` can be Hz (GHz, MHz, etc), describing the frequency
directly, or any energy unit, in which case the energy value E (given
through the freq parameter) is converted to an actual frequency as
.. math::
f = E / (\hbar * 2 * \pi)
'''
unit_convert = UnitConvert()
if np.isscalar(t):
signal = 0.0
else:
signal = np.zeros(len(t), dtype=np.complex128)
assert isinstance(t, np.ndarray), "t must be numpy array"
assert t.dtype.type is np.float64, "t must be double precision real"
c = unit_convert.convert(1, time_unit, 'iu') * unit_convert.convert(
1, freq_unit, 'iu'
)
if np.isscalar(freq):
if complex:
signal += np.exp(1j * c * freq * t) # element-wise
else:
signal += np.cos(c * freq * t) # element-wise
else:
eps = 1.0e-16 # machine precision
if weights is None:
weights = np.ones(len(freq))
if phases is None:
phases = np.zeros(len(freq))
norm = float(sum(weights))
if norm > eps:
for (w, weight, phase) in zip(freq, weights, phases):
if weight > eps:
weight = weight / norm
if complex:
signal += weight * np.exp(
1j * (c * w * t + phase * np.pi)
)
else:
signal += weight * np.cos(c * w * t + phase * np.pi)
return signal
def CRAB_carrier(
t, time_unit, freq, freq_unit, a, b, normalize=False, complex=False
):
r"""Construct a "carrier" based on the CRAB formula
.. math::
E(t) = \sum_{n} (a_n \cos(\omega_n t) + b_n \cos(\omega_n t))
where :math:`a_n` is the n'th element of `a`, :math:`b_n` is the n'th
element of `b`, and :math:`\omega_n` is the n'th element of freq.
Args:
t (numpy.ndarray): time grid values
time_unit (str): Unit of `t`
freq (numpy.ndarray): Carrier frequency or frequencies
freq_unit (str): Unit of `freq`
a (numpy.ndarray): Coefficients for cosines
b (numpy.ndarray): Coefficients for sines
normalize (bool): If True, normalize the resulting carrier
such that its values are in [-1,1]
complex (bool): If True, oscillate in the complex
plane
.. math::
E(t) = \sum_{n} (a_n - i b_n) \exp(i \omega_n t)
Notes:
`freq_unit` can be Hz (GHz, MHz, etc), describing the frequency
directly, or any energy unit, in which case the energy value E (given
through the freq parameter) is converted to an actual frequency as
.. math::
f = E / (\hbar * 2 * \pi)
"""
unit_convert = UnitConvert()
c = unit_convert.convert(1, time_unit, 'iu') * unit_convert.convert(
1, freq_unit, 'iu'
)
assert (
len(a) == len(b) == len(freq)
), "freq, a, b must all be of the same length"
if complex:
signal = np.zeros(len(t), dtype=np.complex128)
else:
signal = np.zeros(len(t), dtype=np.float64)
for w_n, a_n, b_n in zip(freq, a, b):
if complex:
signal += (a_n - 1j * b_n) * np.exp(1j * c * w_n * t)
else:
signal += a_n * np.cos(c * w_n * t) + b_n * np.sin(c * w_n * t)
if normalize:
nrm = np.abs(signal).max()
if nrm > 1.0e-16:
signal *= 1.0 / nrm
return signal
def gaussian(t, t0, sigma):
"""Return a Gaussian shape with peak amplitude 1.0
Parameters:
t (float, numpy.ndarray): time value or grid
t0 (float): center of peak
sigma (float): width of Gaussian
Returns:
(float, numpy.ndarray): Gaussian shape of same type as `t`
"""
return np.exp(-((t - t0) ** 2) / (2 * sigma ** 2))
@np.vectorize
def box(t, t_start, t_stop):
"""Return a box-shape (Theta-function) that is zero before `t_start` and
after `t_stop` and one elsewehere.
Parameters:
t (scalar, numpy.ndarray): Time point or time grid
t_start (scalar): First value of `t` for which the box has value 1
t_stop (scalar): Last value of `t` for which the box has value 1
Returns:
box_shape (numpy.ndarray(float)): If `t` is an array, `box_shape` is
an array of the same size as `t` If `t` is scalar, `box_shape` is
an array of size 1 (which for all intents and purposes can be used
like a float)
"""
if t < t_start:
return 0.0
if t > t_stop:
return 0.0
return 1.0
def blackman(t, t_start, t_stop, a=0.16):
"""Return a Blackman function between `t_start` and `t_stop`,
see http://en.wikipedia.org/wiki/Window_function#Blackman_windows
A Blackman shape looks nearly identical to a Gaussian with a 6-sigma
interval between start and stop Unlike the Gaussian,
however, it will go exactly to zero at the edges. Thus, Blackman pulses
are often preferable to Gaussians.
Parameters:
t (float, numpy.ndarray): Time point or time grid
t_start (float): Starting point of Blackman shape
t_stop (float): End point of Blackman shape
Returns:
(float, numpy.ndarray(float)):
If `t` is a scalar, `blackman_shape` is the scalar value of the
Blackman shape at `t`. If `t` is an array, `blackman_shape` is an
array of same size as `t`, containing the values for the Blackman
shape (zero before `t_start` and after `t_stop`)
See Also:
numpy.blackman
"""
T = t_stop - t_start
return (
0.5
* (
1.0
- a
- np.cos(2.0 * np.pi * (t - t_start) / T)
+ a * np.cos(4.0 * np.pi * (t - t_start) / T)
)
* box(t, t_start, t_stop)
)
@np.vectorize
def flattop(t, t_start, t_stop, t_rise, t_fall=None):
"""Return flattop shape, starting at `t_start` with a sine-squared ramp
that goes to 1 within `t_rise`, and ramps down to 0 again within `t_fall`
from `t_stop`
Parameters:
t (scalar, numpy.ndarray): Time point or time grid
t_start (scalar): Start of flattop window
t_stop (scalar): Stop of flattop window
t_rise (scalar): Duration of ramp-up, starting at `t_start`
t_fall (scalar): Duration of ramp-down, ending at `t_stop`.
If not given, `t_fall=t_rise`.
Returns:
flattop_shape (numpy.ndarray(float)): If `t` is an array,
`flattop_shape` is an array of the same size as `t` If `t` is
scalar, `flattop_ox_shape` is an array of size 1 (which for all
intents and purposes can be used like a float)
"""
if t_start <= t <= t_stop:
f = 1.0
if t_fall is None:
t_fall = t_rise
if t <= t_start + t_rise:
f = np.sin(np.pi * (t - t_start) / (2.0 * t_rise)) ** 2
elif t >= t_stop - t_fall:
f = np.sin(np.pi * (t - t_stop) / (2.0 * t_fall)) ** 2
return f
else:
return 0.0
| 37.10111 | 79 | 0.548057 | """Module containing the :class:`Pulse` class and functions for initializing
pulse shapes."""
import logging
import re
from collections.abc import MutableMapping
import matplotlib.pyplot as plt
import numpy as np
import scipy.fftpack
from matplotlib.gridspec import GridSpec
from numpy.fft import fft, fftfreq
from scipy import signal
from scipy.interpolate import UnivariateSpline
from .io import open_file, writetotxt
from .linalg import iscomplexobj
from .units import UnitConvert, UnitFloat
class _PulseConfigAttribs(MutableMapping):
"""Custom ordered dict of config file attributes of pulses.
The 'type' key is fixed to the value 'file', and the keys listed in
`synchronized_keys` are linked to the corresponding attribute of
the parent pulse. Furthermore, the value of the 'is_complex' key is linked
to the type of the amplitude attribute of the parent pulse.
Args:
parent (Pulse): The pulse to which the settings apply
"""
_synchronized_keys = ['time_unit', 'ampl_unit']
_read_only_keys = ['type', 'is_complex']
_required_keys = [
'id',
'type',
'filename',
'time_unit',
'ampl_unit',
'is_complex',
]
def __init__(self, parent):
self._parent = parent
self._keys = list(self._required_keys) # copy
# the 'filename' and 'id' values are set to an "invalid" value on
# purpose: if written to config file without overriding them, Fortran
# will complain
self._data = {'id': -1, 'type': 'file', 'filename': ''}
def __setitem__(self, key, value):
if key in self._read_only_keys:
if value != self[key]:
# Not allowing to reset read-only-keys to their same value
# would make serialization difficult
raise ValueError("'%s' setting is read-only" % key)
elif key in self._synchronized_keys:
if value != self[key]:
setattr(self._parent, key, value)
else:
if key not in self._data:
self._keys.append(key)
self._data[key] = value
def __getitem__(self, key):
if key == 'is_complex':
return self._parent.is_complex
elif key in self._synchronized_keys:
return getattr(self._parent, key)
else:
return self._data[key]
def __delitem__(self, key):
if key in self._required_keys:
raise ValueError("Cannot delete key %s" % key)
else:
del self._data[key]
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __len__(self):
return len(self._keys)
def __str__(self):
items = ["(%r, %r)" % (key, val) for (key, val) in self.items()]
return "[%s]" % (", ".join(items))
def copy(self):
"""Shallow copy of object"""
c = _PulseConfigAttribs(self._parent)
c._data = self._data.copy()
c._keys = list(self._keys)
return c
def __copy__(self):
return self.copy()
class Pulse:
"""Numerical real or complex control pulse
Args:
tgrid (numpy.ndarray(float)):
Time grid values
amplitude (numpy.ndarray(float), numpy.ndarray(complex)):
Amplitude values. If not given, amplitude will be zero
time_unit (str): Unit of values in `tgrid`. Will be ignored when
reading from file.
ampl_unit (str): Unit of values in `amplitude`. Will be ignored when
reading from file.
freq_unit (str): Unit of frequencies when calculating spectrum. If not
given, an appropriate unit based on `time_unit` will be chosen, if
possible (or a `TypeError` will be raised.
Attributes:
tgrid (numpy.ndarray(float)): time points at which the pulse values
are defined, from ``t0 + dt/2`` to ``T - dt/2``.
amplitude (numpy.ndarray(float), numpy.ndarray(complex)): array
of real or complex pulse values.
time_unit (str): Unit of values in `tgrid`
ampl_unit (str): Unit of values in `amplitude`
freq_unit (str): Unit to use for frequency when calculating the
spectrum
preamble (list): List of lines that are written before the header
when writing the pulse to file. Each line should start with '# '
postamble (list): List of lines that are written after all data
lines. Each line should start with '# '
config_attribs (dict): Additional config data, for when generating a
QDYN config file section describing the pulse (e.g.
`{'oct_shape': 'flattop', 't_rise': '10_ns'}`)
Class Attributes:
unit_convert (QDYN.units.UnitConvert): converter to be used for any
unit conversion within any methods
Example:
>>> tgrid = pulse_tgrid(10, 100)
>>> amplitude = 100 * gaussian(tgrid, 50, 10)
>>> p = Pulse(tgrid=tgrid, amplitude=amplitude, time_unit='ns',
... ampl_unit='MHz')
>>> p.write('pulse.dat')
>>> p2 = Pulse.read('pulse.dat')
>>> from os import unlink; unlink('pulse.dat')
Notes:
It is important to remember that the QDYN Fortran library considers
pulses to be defined on the intervals of the propagation time grid
(i.e. for a time grid with n time steps of dt, the pulse will have n-1
points, defined at points shifted by dt/2)
The `pulse_tgrid` and `tgrid_from_config` routine may be used to obtain
the proper pulse time grid from the propagation time grid::
>>> import numpy as np
>>> p = Pulse(tgrid=pulse_tgrid(10, 100), ampl_unit='MHz',
... time_unit='ns')
>>> len(p.tgrid)
99
>>> print(str(p.dt))
0.10101_ns
>>> p.t0
0
>>> print("%.5f" % p.tgrid[0])
0.05051
>>> print(str(p.T))
10_ns
>>> print("%.5f" % p.tgrid[-1])
9.94949
The type of the `amplitude` (not whether there is a non-zero
imaginary part) decide whether the pulse is considered real or complex.
Complex pulses are not allowed to couple to Hermitian operators, and
in an optimization, both the real and imaginary part of the pulse are
modified.
"""
unit_convert = UnitConvert()
freq_units = { # map time_unit to most suitable freq_unit
'ns': 'GHz',
'ps': 'cminv',
'fs': 'eV',
'microsec': 'MHz',
'au': 'au',
'iu': 'iu',
'unitless': 'unitless',
'dimensionless': 'dimensionless',
}
def __init__(
self,
tgrid,
amplitude=None,
time_unit=None,
ampl_unit=None,
freq_unit=None,
config_attribs=None,
):
tgrid = np.array(tgrid, dtype=np.float64)
if amplitude is None:
amplitude = np.zeros(len(tgrid))
if iscomplexobj(amplitude):
amplitude = np.array(amplitude, dtype=np.complex128)
else:
amplitude = np.array(amplitude, dtype=np.float64)
self.tgrid = tgrid
self.amplitude = amplitude
if time_unit is None:
raise TypeError("time_unit must be given as a string")
else:
self.time_unit = time_unit
if ampl_unit is None:
raise TypeError("ampl_unit must be given as a string")
else:
self.ampl_unit = ampl_unit
self.preamble = []
self.postamble = []
self.freq_unit = freq_unit
if freq_unit is None:
try:
self.freq_unit = self.freq_units[self.time_unit]
except KeyError:
raise TypeError("freq_unit must be specified")
self.config_attribs = _PulseConfigAttribs(self)
if config_attribs is not None:
for key in config_attribs:
self.config_attribs[key] = config_attribs[key]
self._check()
def __eq__(self, other):
"""Compare two pulses, within a precision of 1e-12"""
if not isinstance(other, self.__class__):
return False
public_attribs = [
'is_complex',
'time_unit',
'ampl_unit',
'freq_unit',
'preamble',
'postamble',
'config_attribs',
]
for attr in public_attribs:
if getattr(self, attr) != getattr(other, attr):
return False
try:
if np.max(np.abs(self.tgrid - other.tgrid)) > 1.0e-12:
return False
if np.max(np.abs(self.amplitude - other.amplitude)) > 1.0e-12:
return False
except ValueError:
return False
return True
def copy(self):
"""Return a copy of the pulse"""
return self.__class__(
self.tgrid,
self.amplitude,
time_unit=self.time_unit,
ampl_unit=self.ampl_unit,
freq_unit=self.freq_unit,
config_attribs=self.config_attribs,
)
def __copy__(self):
return self.copy()
def _check(self):
"""Assert self-consistency of pulse"""
assert self.tgrid is not None, "Pulse is not initialized"
assert self.amplitude is not None, "Pulse is not initialized"
assert isinstance(self.tgrid, np.ndarray), "tgrid must be numpy array"
assert isinstance(
self.amplitude, np.ndarray
), "amplitude must be numpy array"
assert (
self.tgrid.dtype.type is np.float64
), "tgrid must be double precision"
assert self.amplitude.dtype.type in [
np.float64,
np.complex128,
], "amplitude must be double precision"
assert len(self.tgrid) == len(
self.amplitude
), "length of tgrid and amplitudes do not match"
assert self.ampl_unit in self.unit_convert.units, (
"Unknown ampl_unit %s" % self.ampl_unit
)
assert self.time_unit in self.unit_convert.units, (
"Unknown time_unit %s" % self.time_unit
)
assert self.freq_unit in self.unit_convert.units, (
"Unknown freq_unit %s" % self.freq_unit
)
@classmethod
def read(
cls,
filename,
time_unit=None,
ampl_unit=None,
freq_unit=None,
ignore_header=False,
):
"""Read a pulse from file, in the format generated by the QDYN
``write_pulse`` routine.
Parameters:
filename (str): Path and name of file from which to read the pulse
time_unit (str or None): The unit of the time grid
ampl_unit (str or None): The unit of the pulse amplitude.
freq_unit (str or None): Intended value for the `freq_unit`
attribute. If None, a `freq_unit` will be chosen automatically,
if possible (or a `TypeError` will be raised)
ignore_header (bool): If True, the file header will be ignored.
Note:
By default, the file is assumed to contain a header that
identifies the columns and their units, as a comment line
immediately preceding the data. If `time_unit` or `ampl_unit` are
None, and `ignore_header` is False, the respective unites are
extracted from the header line. If `time_unit` or `ampl_unit` are
not None, the respective values will be converted from the unit
specified in the file header. If `ignore_header` is True, both
`time_unit` and `ampl_unit` must be given. This can be used to read
in pulses that were not generated by the QDYN ``write_pulse``
routine. Note that if `ignore_header` is True, *all* comment lines
preceding the data will be included in the `preamble` attribute.
The `write` method allows to restore *exactly* the original pulse
file.
"""
logger = logging.getLogger(__name__)
header_rx = {
'complex': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
Re\((ampl|E)\) \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*
Im\((ampl|E)\) \s* \[(\w+)\]\s*$''',
re.X | re.I,
),
'real': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
Re\((ampl|E)\) \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*$''',
re.X | re.I,
),
'abs': re.compile(
r'''
^\#\s*t(ime)? \s* \[\s*(?P<time_unit>\w+)\s*\]\s*
(Abs\()?(ampl|E)(\))? \s* \[\s*(?P<ampl_unit>\w+)\s*\]\s*$''',
re.X | re.I,
),
}
try:
t, x, y = np.genfromtxt(filename, unpack=True, dtype=np.float64)
except ValueError:
t, x = np.genfromtxt(filename, unpack=True, dtype=np.float64)
y = None
preamble = []
postamble = []
with open_file(filename) as in_fh:
in_preamble = True
for line in in_fh:
if line.startswith('#'):
if in_preamble:
preamble.append(line.strip())
else:
postamble.append(line.strip())
else:
if in_preamble:
in_preamble = False
# the last line of the preamble *must* be the header line. We will
# process it and remove it from preamble
mode = None
file_time_unit = None
file_ampl_unit = None
if ignore_header:
mode = 'complex'
if y is None:
mode = 'real'
else:
try:
header_line = preamble.pop()
except IndexError:
raise IOError("Pulse file does not contain a preamble")
for file_mode, pattern in header_rx.items():
match = pattern.match(header_line)
if match:
mode = file_mode
file_time_unit = match.group('time_unit')
file_ampl_unit = match.group('ampl_unit')
break
if mode is None:
logger.warning(
"Non-standard header in pulse file."
"Check that pulse was read with correct units"
)
if y is None:
mode = 'real'
else:
mode = 'complex'
free_pattern = re.compile(
r'''
^\# .* \[\s*(?P<time_unit>\w+)\s*\]
.* \[\s*(?P<ampl_unit>\w+)\s*\]''',
re.X,
)
match = free_pattern.search(header_line)
if match:
file_time_unit = match.group('time_unit')
file_ampl_unit = match.group('ampl_unit')
logger.info("Identify time_unit = %s", file_time_unit)
logger.info("Identify ampl_unit = %s", file_ampl_unit)
if file_time_unit is None or file_ampl_unit is None:
raise ValueError("Could not identify units from header")
if mode == 'abs':
amplitude = x
elif mode == 'real':
amplitude = x
elif mode == 'complex':
amplitude = x + 1j * y
else:
raise ValueError("mode must be 'abs', 'real', or 'complex'")
if not ignore_header:
if time_unit is None:
time_unit = file_time_unit
else:
t = cls.unit_convert.convert(t, file_time_unit, time_unit)
if ampl_unit is None:
ampl_unit = file_ampl_unit
else:
amplitude = cls.unit_convert.convert(
amplitude, file_ampl_unit, ampl_unit
)
pulse = cls(
tgrid=t,
amplitude=amplitude,
time_unit=time_unit,
ampl_unit=ampl_unit,
freq_unit=freq_unit,
)
pulse.preamble = preamble
pulse.postamble = postamble
return pulse
@classmethod
def from_func(
cls,
tgrid,
func,
time_unit=None,
ampl_unit=None,
freq_unit=None,
config_attribs=None,
):
"""Instantiate a pulse from an amplitude function `func`.
All other parameters are passed on to `__init__`
"""
amplitude = [func(t) for t in tgrid]
return cls(
tgrid,
amplitude=amplitude,
time_unit=time_unit,
ampl_unit=ampl_unit,
freq_unit=freq_unit,
config_attribs=config_attribs,
)
@property
def dt(self):
"""Time grid step, as instance of `UnitFloat`"""
return UnitFloat(self.tgrid[1] - self.tgrid[0], unit=self.time_unit)
@property
def t0(self):
"""Time at which the pulse begins (dt/2 before the first point in the
pulse), as instance of `UnitFloat`
"""
result = self.tgrid[0] - 0.5 * float(self.dt)
if abs(result) < 1.0e-15 * self.tgrid[-1]:
result = 0.0
return UnitFloat(result, self.time_unit)
@property
def states_tgrid(self):
"""Time grid values for the states propagated under the numerical pulse
values, as numpy array in units of :attr:`time_unit`.
The returned time grid has one point more than :attr:`tgrid`, and
extends from :attr:`t0` to :attr:`T` (inclusive).
"""
return np.linspace(float(self.t0), float(self.T), len(self.tgrid) + 1)
@property
def w_max(self):
"""Maximum frequency that can be represented with the
current sampling rate.
"""
n = len(self.tgrid)
dt = float(self.unit_convert.convert(self.dt, self.time_unit, 'iu'))
if n % 2 == 1:
# odd
w_max = ((n - 1) * np.pi) / (n * dt)
else:
# even
w_max = np.pi / dt
return self.unit_convert.convert(w_max, 'iu', self.freq_unit)
@property
def dw(self):
"""Step width in the spectrum (i.e. the spectral resolution)
based on the current pulse duration, as an instance of
:class:`~qdyn.units.UnitFloat`.
"""
n = len(self.tgrid)
w_max = self.w_max
if n % 2 == 1:
# odd
return 2.0 * w_max / float(n - 1)
else:
# even
return 2.0 * w_max / float(n)
@property
def T(self):
"""Time at which the pulse ends (dt/2 after the last point in the
pulse), as an instance of :class:`~qdyn.units.UnitFloat`.
"""
result = self.tgrid[-1] + 0.5 * float(self.dt)
if abs(round(result) - result) < (1.0e-15 * result):
result = round(result)
return UnitFloat(result, unit=self.time_unit)
@property
def is_complex(self):
"""Is the pulse amplitude of complex type?"""
return iscomplexobj(self.amplitude)
def as_func(self, interpolation='linear', allow_args=False):
"""Callable that evaluates the pulse for a given time value.
Possible values for `interpolation` are 'linear' and 'piecewise'.
The resulting function takes an argument `t` that must be a float
in the range [:attr:`t0`, :attr:`T`] and in units of
:attr:`time_unit`). It returns the
(interpolated) pulse amplitude as a float, in units of
:attr:`ampl_unit`
If `allow_args` is True, the resulting function takes a second argument
`args` that is ignored. This is for compatibility with qutip, see
http://qutip.org/docs/latest/guide/dynamics/dynamics-time.html.
"""
t0 = float(self.t0)
T = float(self.T)
dt = float(self.dt)
offset = t0 + 0.5 * dt
def func_linear(t):
"""linear interpolation of pulse amplitude"""
if t0 <= float(t) <= T:
t = float(t) - offset
n = max(int(t / dt), 0)
delta = max(t - n * dt, 0.0) / dt
try:
return (1 - delta) * self.amplitude[
n
] + delta * self.amplitude[n + 1]
except IndexError: # last n
return self.amplitude[n]
else:
raise ValueError(
"Value t=%g not in range [%g, %g]" % (t, t0, T)
)
def func_piecewise(t):
"""piecewise interpolation of pulse amplitude"""
if t0 <= float(t) <= T:
t = float(t) - offset
n = max(int(t / dt), 0)
delta = max(t - n * dt, 0.0) / dt
if delta < 0.5:
return self.amplitude[n]
else:
try:
return self.amplitude[n + 1]
except IndexError: # last n
return self.amplitude[n]
else:
raise ValueError(
"Value t=%g not in range [%g, %g]" % (t, t0, T)
)
def _attach_args(func):
def func_with_args(t, args):
return func(t)
return func_with_args
func_map = {'linear': func_linear, 'piecewise': func_piecewise}
try:
if allow_args:
return _attach_args(func_map[interpolation])
else:
return func_map[interpolation]
except KeyError:
raise ValueError(
"Invalid interpolation not in %s: %s"
% (str(list(func_map.keys())), interpolation)
)
def convert(self, time_unit=None, ampl_unit=None, freq_unit=None):
"""Convert the pulse data to different units"""
if time_unit is not None:
factor = self.unit_convert.convert(1.0, self.time_unit, time_unit)
self.tgrid *= factor
self.time_unit = time_unit
if ampl_unit is not None:
factor = self.unit_convert.convert(1.0, self.ampl_unit, ampl_unit)
self.amplitude *= factor
self.ampl_unit = ampl_unit
if freq_unit is not None:
self.freq_unit = freq_unit
self._check()
def get_timegrid_point(self, t, move="left"):
"""Return the next point to the left (or right) of the given `t` which
is on the pulse time grid
"""
t_start = self.tgrid[0]
t_stop = self.tgrid[-1]
dt = self.dt
if t < t_start:
return t_start
if t > t_stop:
return t_stop
if move == "left":
n = np.floor((t - t_start) / dt)
else:
n = np.ceil((t - t_start) / dt)
return t_start + n * dt
@property
def fluence(self):
"""Fluence (integrated pulse energy) for the pulse
.. math:: \\int_{-\\infty}^{\\infty} \\vert|E(t)\\vert^2 dt
"""
return np.sum(self.amplitude ** 2) * float(self.dt)
@property
def oct_iter(self):
"""OCT iteration number from the pulse preamble, if available. If not
available, 0"""
iter_rx = re.compile(r'OCT iter[\s:]*(\d+)', re.I)
for line in self.preamble:
iter_match = iter_rx.search(line)
if iter_match:
return int(iter_match.group(1))
return 0
def spectrum(self, freq_unit=None, mode='complex', sort=False):
"""Calculate the spectrum of the pulse
Parameters:
freq_unit (str): Desired unit of the `freq` output array.
Can Hz (GHz, Mhz, etc) to obtain frequencies, or any energy
unit, using the correspondence ``f = E/h``. If not given,
defaults to the `freq_unit` attribute
mode (str): Wanted mode for `spectrum` output array.
Possible values are 'complex', 'abs', 'real', 'imag'
sort (bool): Sort the output `freq` array (and the output
`spectrum` array) so that frequecies are ordered from
``-w_max .. 0 .. w_max``, instead of the direct output from the
FFT. This is good for plotting, but does not allow to do an
inverse Fourier transform afterwards
Returns:
numpy.ndarray(float), numpy.ndarray(complex): Frequency values
associated with the amplitude values in `spectrum`, i.e. the x-axis
of the spectrogram. The values are in the unit `freq_unit`.
Real (`mode in ['abs', 'real', 'imag']`) or complex
(`mode='complex'`) amplitude of each frequency component.
Notes:
If `sort=False` and `mode='complex'`, the original pulse
values can be obtained by simply calling `np.fft.ifft`
The spectrum is not normalized (Scipy follows the convention of
doing the normalization on the backward transform). You might want
to normalized by 1/n for plotting.
"""
s = fft(self.amplitude) # spectrum amplitude
f = self.fftfreq(freq_unit=freq_unit)
modifier = {
'abs': lambda s: np.abs(s),
'real': lambda s: np.real(s),
'imag': lambda s: np.imag(s),
'complex': lambda s: s,
}
if sort:
order = np.argsort(f)
f = f[order]
s = s[order]
return f, modifier[mode](s)
def fftfreq(self, freq_unit=None):
"""Return the FFT frequencies associated with the pulse. Cf.
`numpy.fft.fftfreq`
Parameters:
freq_unit (str): Desired unit of the output array.
If not given, defaults to the `freq_unit` attribute
Returns:
numpy.ndarray(float): Frequency values associated with
the pulse time grid.
The first half of the `freq` array contains the
positive frequencies, the second half the negative frequencies
"""
if freq_unit is None:
freq_unit = self.freq_unit
n = len(self.amplitude)
dt = float(self.unit_convert.convert(self.dt, self.time_unit, 'iu'))
return self.unit_convert.convert(
fftfreq(n, d=dt / (2.0 * np.pi)), 'iu', freq_unit
)
def derivative(self):
"""Calculate the derivative of the current pulse and return it as a new
pulse. Note that the derivative is in units of `ampl_unit`/`time_unit`,
but will be marked as 'unitless'.
"""
self._unshift()
T = self.tgrid[-1] - self.tgrid[0]
deriv = scipy.fftpack.diff(self.amplitude) * (2.0 * np.pi / T)
deriv_pulse = Pulse(
tgrid=self.tgrid,
amplitude=deriv,
time_unit=self.time_unit,
ampl_unit='unitless',
)
self._shift()
deriv_pulse._shift()
return deriv_pulse
def phase(self, unwrap=False, s=None, derivative=False, freq_unit=None):
"""Return the pulse's complex phase, or derivative of the phase
Parameters:
unwrap (bool): If False, report the phase in ``[-pi:pi]``. If True,
the phase may take any real value, avoiding the discontinuous
jumps introduced by limiting the phase to a 2 pi interval.
s (float or None): smoothing parameter, see
:py:class:`scipy.interpolate.UnivariateSpline`. If None, no
smoothing is performed.
derivative (bool): If False, return the (smoothed) phase directly.
If True, return the derivative of the (smoothed) phase.
freq_unit (str or None): If `derivative` is True, the unit in which
the derivative should be calculated. If None, `self.freq_unit`
is used.
Note:
When calculating the derivative, some smoothing is generally
required. By specifying a smoothing parameter `s`, the phase is
smoothed through univeriate splines before calculating the
derivative.
When calculating the phase directly (instead of the derivative),
smoothing should only be used when also unwrapping the phase.
"""
phase = np.angle(self.amplitude)
if unwrap or derivative:
phase = np.unwrap(phase)
tgrid = self.unit_convert.convert(self.tgrid, self.time_unit, 'iu')
if derivative:
if freq_unit is None:
freq_unit = self.freq_unit
if s is None:
s = 1
spl = UnivariateSpline(tgrid, phase, s=s)
deriv = spl.derivative()(tgrid)
return self.unit_convert.convert(deriv, 'iu', self.freq_unit)
else: # direct phase
if s is not None:
spl = UnivariateSpline(tgrid, phase, s=s)
return spl(tgrid)
else:
return phase
def write(self, filename, mode=None):
"""Write a pulse to file, in the same format as the QDYN `write_pulse`
routine
Parameters:
filename (str): Name of file to which to write the pulse
mode (str): Mode in which to write files. Possible values
are 'abs', 'real', or 'complex'. The former two result in a
two-column file, the latter in a three-column file. If not
given, 'real' or 'complex' is used, depending on the type of
:attr:`amplitude`
"""
if mode is None:
if iscomplexobj(self.amplitude):
mode = 'complex'
else:
mode = 'real'
self._check()
preamble = self.preamble
if not hasattr(preamble, '__getitem__'):
preamble = [str(preamble)]
postamble = self.postamble
if not hasattr(postamble, '__getitem__'):
postamble = [str(postamble)]
buffer = ''
# preamble
for line in preamble:
line = str(line).strip()
if line.startswith('#'):
buffer += "%s\n" % line
else:
buffer += '# %s\n' % line
# header and data
time_header = "time [%s]" % self.time_unit
ampl_re_header = "Re(ampl) [%s]" % self.ampl_unit
ampl_im_header = "Im(ampl) [%s]" % self.ampl_unit
ampl_abs_header = "Abs(ampl) [%s]" % self.ampl_unit
if mode == 'abs':
buffer += "# %23s%25s\n" % (time_header, ampl_abs_header)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E\n" % (t, abs(self.amplitude[i]))
elif mode == 'real':
buffer += "# %23s%25s\n" % (time_header, ampl_re_header)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E\n" % (t, self.amplitude.real[i])
elif mode == 'complex':
buffer += "# %23s%25s%25s\n" % (
time_header,
ampl_re_header,
ampl_im_header,
)
for i, t in enumerate(self.tgrid):
buffer += "%25.17E%25.17E%25.17E\n" % (
t,
self.amplitude.real[i],
self.amplitude.imag[i],
)
else:
raise ValueError("mode must be 'abs', 'real', or 'complex'")
# postamble
for line in self.postamble:
line = str(line).strip()
if line.startswith('#'):
buffer += "%s\n" % line
else:
buffer += '# %s' % line
with open_file(filename, 'w') as out_fh:
out_fh.write(buffer)
def write_oct_spectral_filter(self, filename, filter_func, freq_unit=None):
"""Evaluate a spectral filter function and write the result to the file
with a given `filename`, in a format such that the file may be used for
the `oct_spectral_filter` field of a pulse in a QDYN config file. The
file will have two columns: The pulse frequencies (see `fftfreq`
method), and the value of the filter function in the range [0, 1]
Args:
filename (str): Filename of the output file
filter_func (callable): A function that takes a frequency values
(in units of `freq_unit`) and returns a filter value in the
range [0, 1]
freq_unit (str): Unit of frequencies that `filter_func`
assumes. If not given, defaults to the `freq_unit` attribute.
Note:
The `filter_func` function may return any values that numpy
considers equivalent to floats in the range [0, 1]. This
includes boolean values, where True is equivalent to 1.0 and
False is equivalent to 0.0
"""
if freq_unit is None:
freq_unit = self.freq_unit
freqs = self.fftfreq(freq_unit=freq_unit)
filter = np.array([filter_func(f) for f in freqs], dtype=np.float64)
if not (0 <= np.min(filter) <= 1 and 0 <= np.max(filter) <= 1):
raise ValueError("filter values must be in the range [0, 1]")
header = "%15s%15s" % ("freq [%s]" % freq_unit, 'filter')
writetotxt(filename, freqs, filter, fmt='%15.7e%15.12f', header=header)
def apply_spectral_filter(self, filter_func, freq_unit=None):
"""Apply a spectral filter function to the pulse (in place)
Args:
filter_func (callable): A function that takes a frequency values
(in units of `freq_unit`) and returns a filter value in the
range [0, 1]
freq_unit (str): Unit of frequencies that `filter_func`
assumes. If not given, defaults to the `freq_unit` attribute.
"""
freqs, spec = self.spectrum(freq_unit=freq_unit)
filter = np.array([filter_func(f) for f in freqs], dtype=np.float64)
if not (0 <= np.min(filter) <= 1 and 0 <= np.max(filter) <= 1):
raise ValueError("filter values must be in the range [0, 1]")
spec *= filter
self.amplitude = np.fft.ifft(spec)
return self
def apply_smoothing(self, **kwargs):
"""Smooth the pulse amplitude (in place) through univariate splining.
All keyword arguments are passed directly to
:py:class:`scipy.interpolate.UnivariateSpline`. This especially
includes the smoothing parameter `s`.
"""
if iscomplexobj(self.amplitude):
splx = UnivariateSpline(self.tgrid, self.amplitude.real, **kwargs)
sply = UnivariateSpline(self.tgrid, self.amplitude.imag, **kwargs)
self.amplitude = splx(self.tgrid) + 1.0j * sply(self.tgrid)
else:
spl = UnivariateSpline(self.tgrid, self.amplitude, **kwargs)
self.amplitude = spl(self.tgrid)
return self
def _unshift(self):
"""Move the pulse onto the unshifted time grid. This increases the
number of points by one"""
tgrid_new = np.linspace(
float(self.t0), float(self.T), len(self.tgrid) + 1
)
pulse_new = np.zeros(
len(self.amplitude) + 1, dtype=self.amplitude.dtype.type
)
pulse_new[0] = self.amplitude[0]
for i in range(1, len(pulse_new) - 1):
pulse_new[i] = 0.5 * (self.amplitude[i - 1] + self.amplitude[i])
pulse_new[-1] = self.amplitude[-1]
self.tgrid = tgrid_new
self.amplitude = pulse_new
self._check()
def _shift(self, data=None):
"""Inverse of _unshift"""
dt = float(self.dt)
tgrid_new = np.linspace(
self.tgrid[0] + dt / 2.0,
self.tgrid[-1] - dt / 2.0,
len(self.tgrid) - 1,
)
if data is None:
data_old = self.amplitude
else:
data_old = data
data_new = np.zeros(len(data_old) - 1, dtype=data_old.dtype.type)
data_new[0] = data_old[0]
for i in range(1, len(data_new) - 1):
data_new[i] = 2.0 * data_old[i] - data_new[i - 1]
data_new[-1] = data_old[-1]
if data is None:
self.tgrid = tgrid_new
self.amplitude = data_new
self._check()
else:
return data_new
def resample(self, upsample=None, downsample=None, num=None, window=None):
"""Resample the pulse, either by giving an upsample ratio, a downsample
ration, or a number of sampling points
Parameters:
upsample (int): Factor by which to increase the number of
samples. Afterwards, those points extending beyond the original
end point of the pulse are discarded.
downsample (int): For ``downsample=n``, keep only every
n'th point of the original pulse. This may cause the resampled
pulse to end earlier than the original pulse
num (int): Resample with `num` sampling points. This may
case the end point of the resampled pulse to change
window (list, numpy.ndarray, callable, str, float, or tuple):
Specifies the window applied to the signal in the Fourier
domain. See `sympy.signal.resample`.
Notes:
Exactly one of `upsample`, `downsample`, or `num` must be given.
Upsampling will maintain the pulse start and end point (as returned
by the `T` and `t0` properties), up to some rounding errors.
Downsampling, or using an arbitrary number will change the end
point of the pulse in general.
"""
self._unshift()
nt = len(self.tgrid)
if sum([(x is not None) for x in [upsample, downsample, num]]) != 1:
raise ValueError(
"Exactly one of upsample, downsample, or num must be given"
)
if num is None:
if upsample is not None:
upsample = int(upsample)
num = nt * upsample
elif downsample is not None:
downsample = int(downsample)
assert downsample > 0, "downsample must be > 0"
num = nt / downsample
else:
num = nt
else:
num = num + 1 # to account for shifting
a, t = signal.resample(self.amplitude, num, self.tgrid, window=window)
if upsample is not None:
# discard last (upsample-1) elements
self.amplitude = a[: -(upsample - 1)]
self.tgrid = t[: -(upsample - 1)]
else:
self.amplitude = a
self.tgrid = t
self._shift()
def render_pulse(self, ax, label='pulse'):
"""Render the pulse amplitude on the given axes."""
if np.max(np.abs(self.amplitude.imag)) > 0.0:
ax.plot(self.tgrid, np.abs(self.amplitude), label=label)
ax.set_ylabel("abs(pulse) (%s)" % self.ampl_unit)
else:
if np.min(self.amplitude.real) < 0:
ax.axhline(y=0.0, ls='-', color='black')
ax.plot(self.tgrid, self.amplitude.real, label=label)
ax.set_ylabel("pulse (%s)" % (self.ampl_unit))
ax.set_xlabel("time (%s)" % self.time_unit)
def render_phase(self, ax, label='phase'):
"""Render the complex phase of the pulse on the given axes."""
ax.axhline(y=0.0, ls='-', color='black')
ax.plot(
self.tgrid,
np.angle(self.amplitude) / np.pi,
ls='-',
color='black',
label=label,
)
ax.set_ylabel(r'phase ($\pi$)')
ax.set_xlabel("time (%s)" % self.time_unit)
def render_spectrum(
self,
ax,
zoom=True,
wmin=None,
wmax=None,
spec_scale=None,
spec_max=None,
freq_unit=None,
mark_freqs=None,
mark_freq_points=None,
label='spectrum',
):
"""Render spectrum onto the given axis, see `plot` for arguments"""
freq, spectrum = self.spectrum(
mode='abs', sort=True, freq_unit=freq_unit
)
# normalizing the spectrum makes it independent of the number of
# sampling points. That is, the spectrum of a signal that is simply
# resampled will be the same as that of the original signal. Scipy
# follows the convention of doing the normalization in the inverse
# transform
spectrum *= 1.0 / len(spectrum)
if wmax is not None and wmin is not None:
zoom = False
if zoom:
# figure out the range of the spectrum
max_amp = np.amax(spectrum)
if self.is_complex:
# we center the spectrum around zero, and extend
# symmetrically in both directions as far as there is
# significant amplitude
wmin = np.max(freq)
wmax = np.min(freq)
for i, w in enumerate(freq):
if spectrum[i] > 0.001 * max_amp:
if w > wmax:
wmax = w
if w < wmin:
wmin = w
wmax = max(abs(wmin), abs(wmax))
wmin = -wmax
else:
# we show only the positive part of the spectrum (under the
# assumption that the spectrum is symmetric) and zoom in
# only on the region that was significant amplitude
wmin = 0.0
wmax = 0.0
for i, w in enumerate(freq):
if spectrum[i] > 0.001 * max_amp:
if wmin == 0 and w > 0:
wmin = w
wmax = w
buffer = (wmax - wmin) * 0.1
# plot spectrum
if zoom:
ax.set_xlim((wmin - buffer), (wmax + buffer))
else:
if wmin is not None and wmax is not None:
ax.set_xlim(wmin, wmax)
ax.set_xlabel("frequency (%s)" % freq_unit)
ax.set_ylabel("abs(spec) (arb. un.)")
if spec_scale is None:
spec_scale = 1.0
ax.plot(
freq, spec_scale * spectrum, marker=mark_freq_points, label=label
)
if spec_max is not None:
ax.set_ylim(0, spec_max)
if mark_freqs is not None:
for freq in mark_freqs:
kwargs = {'ls': '--', 'color': 'black'}
try:
freq, kwargs = freq
except TypeError:
pass
ax.axvline(x=float(freq), **kwargs)
def plot(
self,
fig=None,
show_pulse=True,
show_spectrum=True,
zoom=True,
wmin=None,
wmax=None,
spec_scale=None,
spec_max=None,
freq_unit=None,
mark_freqs=None,
mark_freq_points=None,
**figargs
):
"""Generate a plot of the pulse on a given figure
Parameters:
fig (matplotlib.figure.Figure): The figure onto which to plot. If
not given, create a new figure from `matplotlib.pyplot.figure`
show_pulse (bool): Include a plot of the pulse amplitude? If the
pulse has a vanishing imaginary part, the plot will show the
real part of the amplitude, otherwise, there will be one plot
for the absolute value of the amplitude and one showing the
complex phase in units of pi
show_spectrum (bool): Include a plot of the spectrum?
zoom (bool): If `True`, only show the part of the spectrum that has
amplitude of at least 0.1% of the maximum peak in the spectrum.
For real pulses, only the positive part of the spectrum is
shown
wmin (float): Lowest frequency to show. Overrides zoom options.
Must be given together with `wmax`.
wmax (float): Highest frequency to show. Overrides zoom options.
Must be given together with `wmin`.
spec_scale (float): Factor by which to scale the amplitudes in the
spectrum
spec_max (float): Maximum amplitude in the spectrum, after
spec_scale has been applied
freq_unit (str): Unit in which to show the frequency axis in the
spectrum. If not given, use the `freq_unit` attribute
mark_freqs (None, list(float), list((float, dict))):
Array of frequencies to mark in spectrum as vertical dashed
lines. If list of tuples (float, dict), the float value is the
frequency to mark, and the dict gives the keyword arguments
that are passed to the matplotlib `axvline` method.
mark_freq_points (None, ~matplotlib.markers.MarkerStyle): Marker to
be used to indicate the individual points in the spectrum.
The remaining figargs are passed to `matplotlib.pyplot.figure` to
create a new figure if `fig` is None.
"""
if fig is None:
fig = plt.figure(**figargs)
if freq_unit is None:
freq_unit = self.freq_unit
self._check()
pulse_is_complex = self.is_complex
# do the layout
if show_pulse and show_spectrum:
if pulse_is_complex:
# show abs(pulse), phase(pulse), abs(spectrum)
gs = GridSpec(3, 1, height_ratios=[2, 1, 2])
else:
# show real(pulse), abs(spectrum)
gs = GridSpec(2, 1, height_ratios=[1, 1])
else:
if show_pulse:
if pulse_is_complex:
# show abs(pulse), phase(pulse)
gs = GridSpec(2, 1, height_ratios=[2, 1])
else:
# show real(pulse)
gs = GridSpec(1, 1)
else:
gs = GridSpec(1, 1)
if show_spectrum:
ax_spectrum = fig.add_subplot(gs[-1], label='spectrum')
self.render_spectrum(
ax_spectrum,
zoom,
wmin,
wmax,
spec_scale,
spec_max,
freq_unit,
mark_freqs,
mark_freq_points,
)
if show_pulse:
# plot pulse amplitude
ax_pulse = fig.add_subplot(gs[0], label='pulse')
self.render_pulse(ax_pulse)
if pulse_is_complex:
# plot pulse phase
ax_phase = fig.add_subplot(gs[1], label='phase')
self.render_phase(ax_phase)
fig.subplots_adjust(hspace=0.3)
def show(self, **kwargs):
"""Show a plot of the pulse and its spectrum. All arguments will be
passed to the plot method
"""
self.plot(**kwargs) # uses plt.figure()
plt.show()
def show_pulse(self, **kwargs):
"""Show a plot of the pulse amplitude; alias for
`show(show_spectrum=False)`. All other arguments will be passed to the
`show` method
"""
self.show(show_spectrum=False, **kwargs)
def show_spectrum(self, zoom=True, freq_unit=None, **kwargs):
"""Show a plot of the pulse spectrum; alias for
`show(show_pulse=False, zoom=zoom, freq_unit=freq_unit)`. All other
arguments will be passed to the `show` method
"""
self.show(show_pulse=False, zoom=zoom, freq_unit=freq_unit, **kwargs)
def pulse_tgrid(T, nt, t0=0.0):
"""Return a pulse time grid suitable for an equidistant time grid of the
states between t0 and T with nt intervals. The values of the pulse are
defined in the intervals of the time grid, so the pulse time grid will be
shifted by dt/2 with respect to the time grid of the states. Also, the
pulse time grid will have nt-1 points:
>>> print(", ".join([("%.2f" % t) for t in pulse_tgrid(1.5, nt=4)]))
0.25, 0.75, 1.25
The limits of the states time grid are defined as the starting and end
points of the pulse, however:
>>> p = Pulse(tgrid=pulse_tgrid(1.5, 4), time_unit='ns', ampl_unit='MHz')
>>> p.t0
0
>>> p.T
1.5_ns
"""
dt = float(T - t0) / (nt - 1)
t_first_pulse = float(t0) + 0.5 * dt
t_last_pulse = float(T) - 0.5 * dt
nt_pulse = nt - 1
return np.linspace(t_first_pulse, t_last_pulse, nt_pulse)
def tgrid_from_config(tgrid_dict, time_unit, pulse_grid=True):
"""Extract the time grid from the given config file
>>> tgrid_dict = dict([('t_start', 0.0), ('t_stop', UnitFloat(10.0, 'ns')),
... ('dt', UnitFloat(20, 'ps')), ('fixed', True)])
>>> tgrid = tgrid_from_config(tgrid_dict, time_unit='ns')
>>> print("%.2f" % tgrid[0])
0.01
>>> print("%.2f" % tgrid[-1])
9.99
"""
if time_unit is None:
time_unit = 'unitless'
t_start = None
t_stop = None
nt = None
dt = None
if 't_start' in tgrid_dict:
t_start = tgrid_dict['t_start']
if 't_stop' in tgrid_dict:
t_stop = tgrid_dict['t_stop']
if 'nt' in tgrid_dict:
nt = tgrid_dict['nt']
if 'dt' in tgrid_dict:
dt = tgrid_dict['dt']
if t_start is None:
assert (
(t_stop is not None) and (dt is not None) and (nt is not None)
), "tgrid not fully specified in config"
t_start = t_stop - (nt - 1) * dt
if t_stop is None:
assert (
(t_start is not None) and (dt is not None) and (nt is not None)
), "tgrid not fully specified in config"
t_stop = t_start + (nt - 1) * dt
if nt is None:
assert (
(t_start is not None) and (dt is not None) and (t_stop is not None)
), "tgrid not fully specified in config"
nt = int((t_stop - t_start) / dt) + 1
if dt is None:
assert (
(t_start is not None) and (nt is not None) and (t_stop is not None)
), "tgrid not fully specified in config"
dt = (t_stop - t_start) / float(nt - 1)
t_start = UnitFloat(t_start).convert(time_unit)
t_stop = UnitFloat(t_stop).convert(time_unit)
dt = UnitFloat(dt).convert(time_unit)
if pulse_grid:
# convert to pulse parameters
t_start += 0.5 * dt
t_stop -= 0.5 * dt
nt -= 1
tgrid = np.linspace(float(t_start), float(t_stop), nt)
return tgrid
###############################################################################
# Shape functions
###############################################################################
def carrier(
t, time_unit, freq, freq_unit, weights=None, phases=None, complex=False
):
r'''Create the "carrier" of the pulse as a weighted superposition of
cosines at different frequencies.
Parameters:
t (numpy.ndarray(float)): Time value or time grid
time_unit (str): Unit of `t`
freq (numpy.ndarray(float)): Carrier frequency or frequencies
freq_unit (str): Unit of `freq`
weights (numpy.ndarray): If `freq` is an array, weights for
the different frequencies. If not given, all weights are 1. The
weights are normalized to sum to one. Any weight smaller than
machine precision is assumed zero.
phases (numpy.ndarray): If `phases` is an array, phase shift
for each frequency component, in units of pi. If not given, all
phases are 0.
complex (bool): If `True`, oscillate in the complex plane
Returns:
numpy.ndarray(complex): Depending on whether
`complex` is `True` or `False`,
.. math::
s(t) = \sum_j w_j * \cos(\omega_j * t + \phi_j) \\
s(t) = \sum_j w_j * \exp(i*(\omega_j * t + \phi_j))
with :math:`\omega_j = 2 * \pi * f_j`, and frequency
:math:`f_j` where :math:`f_j` is the j'th value in `freq`. The
value of :math:`\phi_j` is the j'th value in `phases`
`signal` is a scalar if `t` is a scalar, and and array if `t`
is an array
Notes:
`freq_unit` can be Hz (GHz, MHz, etc), describing the frequency
directly, or any energy unit, in which case the energy value E (given
through the freq parameter) is converted to an actual frequency as
.. math::
f = E / (\hbar * 2 * \pi)
'''
unit_convert = UnitConvert()
if np.isscalar(t):
signal = 0.0
else:
signal = np.zeros(len(t), dtype=np.complex128)
assert isinstance(t, np.ndarray), "t must be numpy array"
assert t.dtype.type is np.float64, "t must be double precision real"
c = unit_convert.convert(1, time_unit, 'iu') * unit_convert.convert(
1, freq_unit, 'iu'
)
if np.isscalar(freq):
if complex:
signal += np.exp(1j * c * freq * t) # element-wise
else:
signal += np.cos(c * freq * t) # element-wise
else:
eps = 1.0e-16 # machine precision
if weights is None:
weights = np.ones(len(freq))
if phases is None:
phases = np.zeros(len(freq))
norm = float(sum(weights))
if norm > eps:
for (w, weight, phase) in zip(freq, weights, phases):
if weight > eps:
weight = weight / norm
if complex:
signal += weight * np.exp(
1j * (c * w * t + phase * np.pi)
)
else:
signal += weight * np.cos(c * w * t + phase * np.pi)
return signal
def CRAB_carrier(
t, time_unit, freq, freq_unit, a, b, normalize=False, complex=False
):
r"""Construct a "carrier" based on the CRAB formula
.. math::
E(t) = \sum_{n} (a_n \cos(\omega_n t) + b_n \cos(\omega_n t))
where :math:`a_n` is the n'th element of `a`, :math:`b_n` is the n'th
element of `b`, and :math:`\omega_n` is the n'th element of freq.
Args:
t (numpy.ndarray): time grid values
time_unit (str): Unit of `t`
freq (numpy.ndarray): Carrier frequency or frequencies
freq_unit (str): Unit of `freq`
a (numpy.ndarray): Coefficients for cosines
b (numpy.ndarray): Coefficients for sines
normalize (bool): If True, normalize the resulting carrier
such that its values are in [-1,1]
complex (bool): If True, oscillate in the complex
plane
.. math::
E(t) = \sum_{n} (a_n - i b_n) \exp(i \omega_n t)
Notes:
`freq_unit` can be Hz (GHz, MHz, etc), describing the frequency
directly, or any energy unit, in which case the energy value E (given
through the freq parameter) is converted to an actual frequency as
.. math::
f = E / (\hbar * 2 * \pi)
"""
unit_convert = UnitConvert()
c = unit_convert.convert(1, time_unit, 'iu') * unit_convert.convert(
1, freq_unit, 'iu'
)
assert (
len(a) == len(b) == len(freq)
), "freq, a, b must all be of the same length"
if complex:
signal = np.zeros(len(t), dtype=np.complex128)
else:
signal = np.zeros(len(t), dtype=np.float64)
for w_n, a_n, b_n in zip(freq, a, b):
if complex:
signal += (a_n - 1j * b_n) * np.exp(1j * c * w_n * t)
else:
signal += a_n * np.cos(c * w_n * t) + b_n * np.sin(c * w_n * t)
if normalize:
nrm = np.abs(signal).max()
if nrm > 1.0e-16:
signal *= 1.0 / nrm
return signal
def gaussian(t, t0, sigma):
"""Return a Gaussian shape with peak amplitude 1.0
Parameters:
t (float, numpy.ndarray): time value or grid
t0 (float): center of peak
sigma (float): width of Gaussian
Returns:
(float, numpy.ndarray): Gaussian shape of same type as `t`
"""
return np.exp(-((t - t0) ** 2) / (2 * sigma ** 2))
@np.vectorize
def box(t, t_start, t_stop):
"""Return a box-shape (Theta-function) that is zero before `t_start` and
after `t_stop` and one elsewehere.
Parameters:
t (scalar, numpy.ndarray): Time point or time grid
t_start (scalar): First value of `t` for which the box has value 1
t_stop (scalar): Last value of `t` for which the box has value 1
Returns:
box_shape (numpy.ndarray(float)): If `t` is an array, `box_shape` is
an array of the same size as `t` If `t` is scalar, `box_shape` is
an array of size 1 (which for all intents and purposes can be used
like a float)
"""
if t < t_start:
return 0.0
if t > t_stop:
return 0.0
return 1.0
def blackman(t, t_start, t_stop, a=0.16):
"""Return a Blackman function between `t_start` and `t_stop`,
see http://en.wikipedia.org/wiki/Window_function#Blackman_windows
A Blackman shape looks nearly identical to a Gaussian with a 6-sigma
interval between start and stop Unlike the Gaussian,
however, it will go exactly to zero at the edges. Thus, Blackman pulses
are often preferable to Gaussians.
Parameters:
t (float, numpy.ndarray): Time point or time grid
t_start (float): Starting point of Blackman shape
t_stop (float): End point of Blackman shape
Returns:
(float, numpy.ndarray(float)):
If `t` is a scalar, `blackman_shape` is the scalar value of the
Blackman shape at `t`. If `t` is an array, `blackman_shape` is an
array of same size as `t`, containing the values for the Blackman
shape (zero before `t_start` and after `t_stop`)
See Also:
numpy.blackman
"""
T = t_stop - t_start
return (
0.5
* (
1.0
- a
- np.cos(2.0 * np.pi * (t - t_start) / T)
+ a * np.cos(4.0 * np.pi * (t - t_start) / T)
)
* box(t, t_start, t_stop)
)
@np.vectorize
def flattop(t, t_start, t_stop, t_rise, t_fall=None):
"""Return flattop shape, starting at `t_start` with a sine-squared ramp
that goes to 1 within `t_rise`, and ramps down to 0 again within `t_fall`
from `t_stop`
Parameters:
t (scalar, numpy.ndarray): Time point or time grid
t_start (scalar): Start of flattop window
t_stop (scalar): Stop of flattop window
t_rise (scalar): Duration of ramp-up, starting at `t_start`
t_fall (scalar): Duration of ramp-down, ending at `t_stop`.
If not given, `t_fall=t_rise`.
Returns:
flattop_shape (numpy.ndarray(float)): If `t` is an array,
`flattop_shape` is an array of the same size as `t` If `t` is
scalar, `flattop_ox_shape` is an array of size 1 (which for all
intents and purposes can be used like a float)
"""
if t_start <= t <= t_stop:
f = 1.0
if t_fall is None:
t_fall = t_rise
if t <= t_start + t_rise:
f = np.sin(np.pi * (t - t_start) / (2.0 * t_rise)) ** 2
elif t >= t_stop - t_fall:
f = np.sin(np.pi * (t - t_stop) / (2.0 * t_fall)) ** 2
return f
else:
return 0.0
| 2,940 | 0 | 301 |
580fe2ae21bafab826eedcd80fc69eafda308613 | 1,950 | py | Python | towhee/dag/utils/callstack.py | jeffoverflow/towhee | c576d22a4cdfc3909a3323b0d1decab87e83d26c | [
"Apache-2.0"
] | 1 | 2021-08-31T12:32:26.000Z | 2021-08-31T12:32:26.000Z | towhee/dag/utils/callstack.py | jeffoverflow/towhee | c576d22a4cdfc3909a3323b0d1decab87e83d26c | [
"Apache-2.0"
] | null | null | null | towhee/dag/utils/callstack.py | jeffoverflow/towhee | c576d22a4cdfc3909a3323b0d1decab87e83d26c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 29.545455 | 87 | 0.618974 | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Callstack:
def collect(self):
"""
Collect the frames of current callstack.
"""
raise NotImplementedError
def num_frames(self) -> int:
"""
Get the number of frames
"""
raise NotImplementedError
def find_func(self, func_name: str) -> int:
"""
Given a function name, find the first-match from the outermost frame
Args:
func_name: the function name to find.
Returns:
The first-matched frame index.
Raises:
Examples:
"""
raise NotImplementedError
def hash(self, start: int = None, end: int = None, items: list[str] = None) -> str:
"""
Get a hash value from frames.
Args:
start: the index of the start frame.
end: the index of the end frame.
items: the items to be hashed. Supported items are {filename, lineno,
funcname, codectx, index, lasti}, where codectx denotes the current
line of code of the context, index denotes the frame's index of the
callstack, lasti denotes the index of last attempted instruction in
bytecode.
Returns:
The hash value.
Raises:
Examples:
"""
raise NotImplementedError
| 0 | 1,334 | 23 |
5ad644287dc5486c8f56f3950ef25fd87286b222 | 81 | py | Python | juliaset/__init__.py | PageotD/juliaset | 7c1f98020eeff291fcf040cfcdf25a89e72f46a9 | [
"BSD-3-Clause"
] | null | null | null | juliaset/__init__.py | PageotD/juliaset | 7c1f98020eeff291fcf040cfcdf25a89e72f46a9 | [
"BSD-3-Clause"
] | null | null | null | juliaset/__init__.py | PageotD/juliaset | 7c1f98020eeff291fcf040cfcdf25a89e72f46a9 | [
"BSD-3-Clause"
] | 1 | 2021-08-09T06:45:43.000Z | 2021-08-09T06:45:43.000Z | from .juliaset import julia
from .juliaset import JuliaSet
__version__ = "0.3.0" | 20.25 | 30 | 0.777778 | from .juliaset import julia
from .juliaset import JuliaSet
__version__ = "0.3.0" | 0 | 0 | 0 |
e8d570012d3d6f62ce2e2349d25b09b740429217 | 24,567 | py | Python | src/oci/opsi/models/host_configuration_summary.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/opsi/models/host_configuration_summary.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/opsi/models/host_configuration_summary.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class HostConfigurationSummary(object):
"""
Summary of a host configuration for a resource.
"""
#: A constant which can be used with the entity_source property of a HostConfigurationSummary.
#: This constant has a value of "MACS_MANAGED_EXTERNAL_HOST"
ENTITY_SOURCE_MACS_MANAGED_EXTERNAL_HOST = "MACS_MANAGED_EXTERNAL_HOST"
#: A constant which can be used with the entity_source property of a HostConfigurationSummary.
#: This constant has a value of "EM_MANAGED_EXTERNAL_HOST"
ENTITY_SOURCE_EM_MANAGED_EXTERNAL_HOST = "EM_MANAGED_EXTERNAL_HOST"
#: A constant which can be used with the platform_type property of a HostConfigurationSummary.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
def __init__(self, **kwargs):
"""
Initializes a new HostConfigurationSummary object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.opsi.models.MacsManagedExternalHostConfigurationSummary`
* :class:`~oci.opsi.models.EmManagedExternalHostConfigurationSummary`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param host_insight_id:
The value to assign to the host_insight_id property of this HostConfigurationSummary.
:type host_insight_id: str
:param entity_source:
The value to assign to the entity_source property of this HostConfigurationSummary.
Allowed values for this property are: "MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type entity_source: str
:param compartment_id:
The value to assign to the compartment_id property of this HostConfigurationSummary.
:type compartment_id: str
:param host_name:
The value to assign to the host_name property of this HostConfigurationSummary.
:type host_name: str
:param platform_type:
The value to assign to the platform_type property of this HostConfigurationSummary.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param platform_version:
The value to assign to the platform_version property of this HostConfigurationSummary.
:type platform_version: str
:param platform_vendor:
The value to assign to the platform_vendor property of this HostConfigurationSummary.
:type platform_vendor: str
:param total_cpus:
The value to assign to the total_cpus property of this HostConfigurationSummary.
:type total_cpus: int
:param total_memory_in_gbs:
The value to assign to the total_memory_in_gbs property of this HostConfigurationSummary.
:type total_memory_in_gbs: float
:param cpu_architecture:
The value to assign to the cpu_architecture property of this HostConfigurationSummary.
:type cpu_architecture: str
:param cpu_cache_in_mbs:
The value to assign to the cpu_cache_in_mbs property of this HostConfigurationSummary.
:type cpu_cache_in_mbs: float
:param cpu_vendor:
The value to assign to the cpu_vendor property of this HostConfigurationSummary.
:type cpu_vendor: str
:param cpu_frequency_in_mhz:
The value to assign to the cpu_frequency_in_mhz property of this HostConfigurationSummary.
:type cpu_frequency_in_mhz: float
:param cpu_implementation:
The value to assign to the cpu_implementation property of this HostConfigurationSummary.
:type cpu_implementation: str
:param cores_per_socket:
The value to assign to the cores_per_socket property of this HostConfigurationSummary.
:type cores_per_socket: int
:param total_sockets:
The value to assign to the total_sockets property of this HostConfigurationSummary.
:type total_sockets: int
:param threads_per_socket:
The value to assign to the threads_per_socket property of this HostConfigurationSummary.
:type threads_per_socket: int
:param is_hyper_threading_enabled:
The value to assign to the is_hyper_threading_enabled property of this HostConfigurationSummary.
:type is_hyper_threading_enabled: bool
:param defined_tags:
The value to assign to the defined_tags property of this HostConfigurationSummary.
:type defined_tags: dict(str, dict(str, object))
:param freeform_tags:
The value to assign to the freeform_tags property of this HostConfigurationSummary.
:type freeform_tags: dict(str, str)
"""
self.swagger_types = {
'host_insight_id': 'str',
'entity_source': 'str',
'compartment_id': 'str',
'host_name': 'str',
'platform_type': 'str',
'platform_version': 'str',
'platform_vendor': 'str',
'total_cpus': 'int',
'total_memory_in_gbs': 'float',
'cpu_architecture': 'str',
'cpu_cache_in_mbs': 'float',
'cpu_vendor': 'str',
'cpu_frequency_in_mhz': 'float',
'cpu_implementation': 'str',
'cores_per_socket': 'int',
'total_sockets': 'int',
'threads_per_socket': 'int',
'is_hyper_threading_enabled': 'bool',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)'
}
self.attribute_map = {
'host_insight_id': 'hostInsightId',
'entity_source': 'entitySource',
'compartment_id': 'compartmentId',
'host_name': 'hostName',
'platform_type': 'platformType',
'platform_version': 'platformVersion',
'platform_vendor': 'platformVendor',
'total_cpus': 'totalCpus',
'total_memory_in_gbs': 'totalMemoryInGBs',
'cpu_architecture': 'cpuArchitecture',
'cpu_cache_in_mbs': 'cpuCacheInMBs',
'cpu_vendor': 'cpuVendor',
'cpu_frequency_in_mhz': 'cpuFrequencyInMhz',
'cpu_implementation': 'cpuImplementation',
'cores_per_socket': 'coresPerSocket',
'total_sockets': 'totalSockets',
'threads_per_socket': 'threadsPerSocket',
'is_hyper_threading_enabled': 'isHyperThreadingEnabled',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags'
}
self._host_insight_id = None
self._entity_source = None
self._compartment_id = None
self._host_name = None
self._platform_type = None
self._platform_version = None
self._platform_vendor = None
self._total_cpus = None
self._total_memory_in_gbs = None
self._cpu_architecture = None
self._cpu_cache_in_mbs = None
self._cpu_vendor = None
self._cpu_frequency_in_mhz = None
self._cpu_implementation = None
self._cores_per_socket = None
self._total_sockets = None
self._threads_per_socket = None
self._is_hyper_threading_enabled = None
self._defined_tags = None
self._freeform_tags = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['entitySource']
if type == 'MACS_MANAGED_EXTERNAL_HOST':
return 'MacsManagedExternalHostConfigurationSummary'
if type == 'EM_MANAGED_EXTERNAL_HOST':
return 'EmManagedExternalHostConfigurationSummary'
else:
return 'HostConfigurationSummary'
@property
def host_insight_id(self):
"""
**[Required]** Gets the host_insight_id of this HostConfigurationSummary.
The `OCID`__ of the host insight resource.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The host_insight_id of this HostConfigurationSummary.
:rtype: str
"""
return self._host_insight_id
@host_insight_id.setter
def host_insight_id(self, host_insight_id):
"""
Sets the host_insight_id of this HostConfigurationSummary.
The `OCID`__ of the host insight resource.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param host_insight_id: The host_insight_id of this HostConfigurationSummary.
:type: str
"""
self._host_insight_id = host_insight_id
@property
def entity_source(self):
"""
**[Required]** Gets the entity_source of this HostConfigurationSummary.
Source of the host entity.
Allowed values for this property are: "MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The entity_source of this HostConfigurationSummary.
:rtype: str
"""
return self._entity_source
@entity_source.setter
def entity_source(self, entity_source):
"""
Sets the entity_source of this HostConfigurationSummary.
Source of the host entity.
:param entity_source: The entity_source of this HostConfigurationSummary.
:type: str
"""
allowed_values = ["MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST"]
if not value_allowed_none_or_none_sentinel(entity_source, allowed_values):
entity_source = 'UNKNOWN_ENUM_VALUE'
self._entity_source = entity_source
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this HostConfigurationSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this HostConfigurationSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this HostConfigurationSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this HostConfigurationSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def host_name(self):
"""
**[Required]** Gets the host_name of this HostConfigurationSummary.
The host name. The host name is unique amongst the hosts managed by the same management agent.
:return: The host_name of this HostConfigurationSummary.
:rtype: str
"""
return self._host_name
@host_name.setter
def host_name(self, host_name):
"""
Sets the host_name of this HostConfigurationSummary.
The host name. The host name is unique amongst the hosts managed by the same management agent.
:param host_name: The host_name of this HostConfigurationSummary.
:type: str
"""
self._host_name = host_name
@property
def platform_type(self):
"""
**[Required]** Gets the platform_type of this HostConfigurationSummary.
Platform type.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this HostConfigurationSummary.
Platform type.
:param platform_type: The platform_type of this HostConfigurationSummary.
:type: str
"""
allowed_values = ["LINUX"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def platform_version(self):
"""
**[Required]** Gets the platform_version of this HostConfigurationSummary.
Platform version.
:return: The platform_version of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_version
@platform_version.setter
def platform_version(self, platform_version):
"""
Sets the platform_version of this HostConfigurationSummary.
Platform version.
:param platform_version: The platform_version of this HostConfigurationSummary.
:type: str
"""
self._platform_version = platform_version
@property
def platform_vendor(self):
"""
**[Required]** Gets the platform_vendor of this HostConfigurationSummary.
Platform vendor.
:return: The platform_vendor of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_vendor
@platform_vendor.setter
def platform_vendor(self, platform_vendor):
"""
Sets the platform_vendor of this HostConfigurationSummary.
Platform vendor.
:param platform_vendor: The platform_vendor of this HostConfigurationSummary.
:type: str
"""
self._platform_vendor = platform_vendor
@property
def total_cpus(self):
"""
**[Required]** Gets the total_cpus of this HostConfigurationSummary.
Total CPU on this host.
:return: The total_cpus of this HostConfigurationSummary.
:rtype: int
"""
return self._total_cpus
@total_cpus.setter
def total_cpus(self, total_cpus):
"""
Sets the total_cpus of this HostConfigurationSummary.
Total CPU on this host.
:param total_cpus: The total_cpus of this HostConfigurationSummary.
:type: int
"""
self._total_cpus = total_cpus
@property
def total_memory_in_gbs(self):
"""
**[Required]** Gets the total_memory_in_gbs of this HostConfigurationSummary.
Total amount of usable physical memory in gibabytes
:return: The total_memory_in_gbs of this HostConfigurationSummary.
:rtype: float
"""
return self._total_memory_in_gbs
@total_memory_in_gbs.setter
def total_memory_in_gbs(self, total_memory_in_gbs):
"""
Sets the total_memory_in_gbs of this HostConfigurationSummary.
Total amount of usable physical memory in gibabytes
:param total_memory_in_gbs: The total_memory_in_gbs of this HostConfigurationSummary.
:type: float
"""
self._total_memory_in_gbs = total_memory_in_gbs
@property
def cpu_architecture(self):
"""
**[Required]** Gets the cpu_architecture of this HostConfigurationSummary.
CPU architechure
:return: The cpu_architecture of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_architecture
@cpu_architecture.setter
def cpu_architecture(self, cpu_architecture):
"""
Sets the cpu_architecture of this HostConfigurationSummary.
CPU architechure
:param cpu_architecture: The cpu_architecture of this HostConfigurationSummary.
:type: str
"""
self._cpu_architecture = cpu_architecture
@property
def cpu_cache_in_mbs(self):
"""
**[Required]** Gets the cpu_cache_in_mbs of this HostConfigurationSummary.
Size of cache memory in megabytes.
:return: The cpu_cache_in_mbs of this HostConfigurationSummary.
:rtype: float
"""
return self._cpu_cache_in_mbs
@cpu_cache_in_mbs.setter
def cpu_cache_in_mbs(self, cpu_cache_in_mbs):
"""
Sets the cpu_cache_in_mbs of this HostConfigurationSummary.
Size of cache memory in megabytes.
:param cpu_cache_in_mbs: The cpu_cache_in_mbs of this HostConfigurationSummary.
:type: float
"""
self._cpu_cache_in_mbs = cpu_cache_in_mbs
@property
def cpu_vendor(self):
"""
**[Required]** Gets the cpu_vendor of this HostConfigurationSummary.
Name of the CPU vendor.
:return: The cpu_vendor of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_vendor
@cpu_vendor.setter
def cpu_vendor(self, cpu_vendor):
"""
Sets the cpu_vendor of this HostConfigurationSummary.
Name of the CPU vendor.
:param cpu_vendor: The cpu_vendor of this HostConfigurationSummary.
:type: str
"""
self._cpu_vendor = cpu_vendor
@property
def cpu_frequency_in_mhz(self):
"""
**[Required]** Gets the cpu_frequency_in_mhz of this HostConfigurationSummary.
Clock frequency of the processor in megahertz.
:return: The cpu_frequency_in_mhz of this HostConfigurationSummary.
:rtype: float
"""
return self._cpu_frequency_in_mhz
@cpu_frequency_in_mhz.setter
def cpu_frequency_in_mhz(self, cpu_frequency_in_mhz):
"""
Sets the cpu_frequency_in_mhz of this HostConfigurationSummary.
Clock frequency of the processor in megahertz.
:param cpu_frequency_in_mhz: The cpu_frequency_in_mhz of this HostConfigurationSummary.
:type: float
"""
self._cpu_frequency_in_mhz = cpu_frequency_in_mhz
@property
def cpu_implementation(self):
"""
**[Required]** Gets the cpu_implementation of this HostConfigurationSummary.
Model name of processor.
:return: The cpu_implementation of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_implementation
@cpu_implementation.setter
def cpu_implementation(self, cpu_implementation):
"""
Sets the cpu_implementation of this HostConfigurationSummary.
Model name of processor.
:param cpu_implementation: The cpu_implementation of this HostConfigurationSummary.
:type: str
"""
self._cpu_implementation = cpu_implementation
@property
def cores_per_socket(self):
"""
**[Required]** Gets the cores_per_socket of this HostConfigurationSummary.
Number of cores per socket.
:return: The cores_per_socket of this HostConfigurationSummary.
:rtype: int
"""
return self._cores_per_socket
@cores_per_socket.setter
def cores_per_socket(self, cores_per_socket):
"""
Sets the cores_per_socket of this HostConfigurationSummary.
Number of cores per socket.
:param cores_per_socket: The cores_per_socket of this HostConfigurationSummary.
:type: int
"""
self._cores_per_socket = cores_per_socket
@property
def total_sockets(self):
"""
**[Required]** Gets the total_sockets of this HostConfigurationSummary.
Number of total sockets.
:return: The total_sockets of this HostConfigurationSummary.
:rtype: int
"""
return self._total_sockets
@total_sockets.setter
def total_sockets(self, total_sockets):
"""
Sets the total_sockets of this HostConfigurationSummary.
Number of total sockets.
:param total_sockets: The total_sockets of this HostConfigurationSummary.
:type: int
"""
self._total_sockets = total_sockets
@property
def threads_per_socket(self):
"""
**[Required]** Gets the threads_per_socket of this HostConfigurationSummary.
Number of threads per socket.
:return: The threads_per_socket of this HostConfigurationSummary.
:rtype: int
"""
return self._threads_per_socket
@threads_per_socket.setter
def threads_per_socket(self, threads_per_socket):
"""
Sets the threads_per_socket of this HostConfigurationSummary.
Number of threads per socket.
:param threads_per_socket: The threads_per_socket of this HostConfigurationSummary.
:type: int
"""
self._threads_per_socket = threads_per_socket
@property
def is_hyper_threading_enabled(self):
"""
**[Required]** Gets the is_hyper_threading_enabled of this HostConfigurationSummary.
Indicates if hyper-threading is enabled or not
:return: The is_hyper_threading_enabled of this HostConfigurationSummary.
:rtype: bool
"""
return self._is_hyper_threading_enabled
@is_hyper_threading_enabled.setter
def is_hyper_threading_enabled(self, is_hyper_threading_enabled):
"""
Sets the is_hyper_threading_enabled of this HostConfigurationSummary.
Indicates if hyper-threading is enabled or not
:param is_hyper_threading_enabled: The is_hyper_threading_enabled of this HostConfigurationSummary.
:type: bool
"""
self._is_hyper_threading_enabled = is_hyper_threading_enabled
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this HostConfigurationSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this HostConfigurationSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this HostConfigurationSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this HostConfigurationSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this HostConfigurationSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this HostConfigurationSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this HostConfigurationSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this HostConfigurationSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
| 34.073509 | 245 | 0.666138 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class HostConfigurationSummary(object):
"""
Summary of a host configuration for a resource.
"""
#: A constant which can be used with the entity_source property of a HostConfigurationSummary.
#: This constant has a value of "MACS_MANAGED_EXTERNAL_HOST"
ENTITY_SOURCE_MACS_MANAGED_EXTERNAL_HOST = "MACS_MANAGED_EXTERNAL_HOST"
#: A constant which can be used with the entity_source property of a HostConfigurationSummary.
#: This constant has a value of "EM_MANAGED_EXTERNAL_HOST"
ENTITY_SOURCE_EM_MANAGED_EXTERNAL_HOST = "EM_MANAGED_EXTERNAL_HOST"
#: A constant which can be used with the platform_type property of a HostConfigurationSummary.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
def __init__(self, **kwargs):
"""
Initializes a new HostConfigurationSummary object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.opsi.models.MacsManagedExternalHostConfigurationSummary`
* :class:`~oci.opsi.models.EmManagedExternalHostConfigurationSummary`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param host_insight_id:
The value to assign to the host_insight_id property of this HostConfigurationSummary.
:type host_insight_id: str
:param entity_source:
The value to assign to the entity_source property of this HostConfigurationSummary.
Allowed values for this property are: "MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type entity_source: str
:param compartment_id:
The value to assign to the compartment_id property of this HostConfigurationSummary.
:type compartment_id: str
:param host_name:
The value to assign to the host_name property of this HostConfigurationSummary.
:type host_name: str
:param platform_type:
The value to assign to the platform_type property of this HostConfigurationSummary.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param platform_version:
The value to assign to the platform_version property of this HostConfigurationSummary.
:type platform_version: str
:param platform_vendor:
The value to assign to the platform_vendor property of this HostConfigurationSummary.
:type platform_vendor: str
:param total_cpus:
The value to assign to the total_cpus property of this HostConfigurationSummary.
:type total_cpus: int
:param total_memory_in_gbs:
The value to assign to the total_memory_in_gbs property of this HostConfigurationSummary.
:type total_memory_in_gbs: float
:param cpu_architecture:
The value to assign to the cpu_architecture property of this HostConfigurationSummary.
:type cpu_architecture: str
:param cpu_cache_in_mbs:
The value to assign to the cpu_cache_in_mbs property of this HostConfigurationSummary.
:type cpu_cache_in_mbs: float
:param cpu_vendor:
The value to assign to the cpu_vendor property of this HostConfigurationSummary.
:type cpu_vendor: str
:param cpu_frequency_in_mhz:
The value to assign to the cpu_frequency_in_mhz property of this HostConfigurationSummary.
:type cpu_frequency_in_mhz: float
:param cpu_implementation:
The value to assign to the cpu_implementation property of this HostConfigurationSummary.
:type cpu_implementation: str
:param cores_per_socket:
The value to assign to the cores_per_socket property of this HostConfigurationSummary.
:type cores_per_socket: int
:param total_sockets:
The value to assign to the total_sockets property of this HostConfigurationSummary.
:type total_sockets: int
:param threads_per_socket:
The value to assign to the threads_per_socket property of this HostConfigurationSummary.
:type threads_per_socket: int
:param is_hyper_threading_enabled:
The value to assign to the is_hyper_threading_enabled property of this HostConfigurationSummary.
:type is_hyper_threading_enabled: bool
:param defined_tags:
The value to assign to the defined_tags property of this HostConfigurationSummary.
:type defined_tags: dict(str, dict(str, object))
:param freeform_tags:
The value to assign to the freeform_tags property of this HostConfigurationSummary.
:type freeform_tags: dict(str, str)
"""
self.swagger_types = {
'host_insight_id': 'str',
'entity_source': 'str',
'compartment_id': 'str',
'host_name': 'str',
'platform_type': 'str',
'platform_version': 'str',
'platform_vendor': 'str',
'total_cpus': 'int',
'total_memory_in_gbs': 'float',
'cpu_architecture': 'str',
'cpu_cache_in_mbs': 'float',
'cpu_vendor': 'str',
'cpu_frequency_in_mhz': 'float',
'cpu_implementation': 'str',
'cores_per_socket': 'int',
'total_sockets': 'int',
'threads_per_socket': 'int',
'is_hyper_threading_enabled': 'bool',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)'
}
self.attribute_map = {
'host_insight_id': 'hostInsightId',
'entity_source': 'entitySource',
'compartment_id': 'compartmentId',
'host_name': 'hostName',
'platform_type': 'platformType',
'platform_version': 'platformVersion',
'platform_vendor': 'platformVendor',
'total_cpus': 'totalCpus',
'total_memory_in_gbs': 'totalMemoryInGBs',
'cpu_architecture': 'cpuArchitecture',
'cpu_cache_in_mbs': 'cpuCacheInMBs',
'cpu_vendor': 'cpuVendor',
'cpu_frequency_in_mhz': 'cpuFrequencyInMhz',
'cpu_implementation': 'cpuImplementation',
'cores_per_socket': 'coresPerSocket',
'total_sockets': 'totalSockets',
'threads_per_socket': 'threadsPerSocket',
'is_hyper_threading_enabled': 'isHyperThreadingEnabled',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags'
}
self._host_insight_id = None
self._entity_source = None
self._compartment_id = None
self._host_name = None
self._platform_type = None
self._platform_version = None
self._platform_vendor = None
self._total_cpus = None
self._total_memory_in_gbs = None
self._cpu_architecture = None
self._cpu_cache_in_mbs = None
self._cpu_vendor = None
self._cpu_frequency_in_mhz = None
self._cpu_implementation = None
self._cores_per_socket = None
self._total_sockets = None
self._threads_per_socket = None
self._is_hyper_threading_enabled = None
self._defined_tags = None
self._freeform_tags = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['entitySource']
if type == 'MACS_MANAGED_EXTERNAL_HOST':
return 'MacsManagedExternalHostConfigurationSummary'
if type == 'EM_MANAGED_EXTERNAL_HOST':
return 'EmManagedExternalHostConfigurationSummary'
else:
return 'HostConfigurationSummary'
@property
def host_insight_id(self):
"""
**[Required]** Gets the host_insight_id of this HostConfigurationSummary.
The `OCID`__ of the host insight resource.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The host_insight_id of this HostConfigurationSummary.
:rtype: str
"""
return self._host_insight_id
@host_insight_id.setter
def host_insight_id(self, host_insight_id):
"""
Sets the host_insight_id of this HostConfigurationSummary.
The `OCID`__ of the host insight resource.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param host_insight_id: The host_insight_id of this HostConfigurationSummary.
:type: str
"""
self._host_insight_id = host_insight_id
@property
def entity_source(self):
"""
**[Required]** Gets the entity_source of this HostConfigurationSummary.
Source of the host entity.
Allowed values for this property are: "MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The entity_source of this HostConfigurationSummary.
:rtype: str
"""
return self._entity_source
@entity_source.setter
def entity_source(self, entity_source):
"""
Sets the entity_source of this HostConfigurationSummary.
Source of the host entity.
:param entity_source: The entity_source of this HostConfigurationSummary.
:type: str
"""
allowed_values = ["MACS_MANAGED_EXTERNAL_HOST", "EM_MANAGED_EXTERNAL_HOST"]
if not value_allowed_none_or_none_sentinel(entity_source, allowed_values):
entity_source = 'UNKNOWN_ENUM_VALUE'
self._entity_source = entity_source
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this HostConfigurationSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this HostConfigurationSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this HostConfigurationSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this HostConfigurationSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def host_name(self):
"""
**[Required]** Gets the host_name of this HostConfigurationSummary.
The host name. The host name is unique amongst the hosts managed by the same management agent.
:return: The host_name of this HostConfigurationSummary.
:rtype: str
"""
return self._host_name
@host_name.setter
def host_name(self, host_name):
"""
Sets the host_name of this HostConfigurationSummary.
The host name. The host name is unique amongst the hosts managed by the same management agent.
:param host_name: The host_name of this HostConfigurationSummary.
:type: str
"""
self._host_name = host_name
@property
def platform_type(self):
"""
**[Required]** Gets the platform_type of this HostConfigurationSummary.
Platform type.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this HostConfigurationSummary.
Platform type.
:param platform_type: The platform_type of this HostConfigurationSummary.
:type: str
"""
allowed_values = ["LINUX"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def platform_version(self):
"""
**[Required]** Gets the platform_version of this HostConfigurationSummary.
Platform version.
:return: The platform_version of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_version
@platform_version.setter
def platform_version(self, platform_version):
"""
Sets the platform_version of this HostConfigurationSummary.
Platform version.
:param platform_version: The platform_version of this HostConfigurationSummary.
:type: str
"""
self._platform_version = platform_version
@property
def platform_vendor(self):
"""
**[Required]** Gets the platform_vendor of this HostConfigurationSummary.
Platform vendor.
:return: The platform_vendor of this HostConfigurationSummary.
:rtype: str
"""
return self._platform_vendor
@platform_vendor.setter
def platform_vendor(self, platform_vendor):
"""
Sets the platform_vendor of this HostConfigurationSummary.
Platform vendor.
:param platform_vendor: The platform_vendor of this HostConfigurationSummary.
:type: str
"""
self._platform_vendor = platform_vendor
@property
def total_cpus(self):
"""
**[Required]** Gets the total_cpus of this HostConfigurationSummary.
Total CPU on this host.
:return: The total_cpus of this HostConfigurationSummary.
:rtype: int
"""
return self._total_cpus
@total_cpus.setter
def total_cpus(self, total_cpus):
"""
Sets the total_cpus of this HostConfigurationSummary.
Total CPU on this host.
:param total_cpus: The total_cpus of this HostConfigurationSummary.
:type: int
"""
self._total_cpus = total_cpus
@property
def total_memory_in_gbs(self):
"""
**[Required]** Gets the total_memory_in_gbs of this HostConfigurationSummary.
Total amount of usable physical memory in gibabytes
:return: The total_memory_in_gbs of this HostConfigurationSummary.
:rtype: float
"""
return self._total_memory_in_gbs
@total_memory_in_gbs.setter
def total_memory_in_gbs(self, total_memory_in_gbs):
"""
Sets the total_memory_in_gbs of this HostConfigurationSummary.
Total amount of usable physical memory in gibabytes
:param total_memory_in_gbs: The total_memory_in_gbs of this HostConfigurationSummary.
:type: float
"""
self._total_memory_in_gbs = total_memory_in_gbs
@property
def cpu_architecture(self):
"""
**[Required]** Gets the cpu_architecture of this HostConfigurationSummary.
CPU architechure
:return: The cpu_architecture of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_architecture
@cpu_architecture.setter
def cpu_architecture(self, cpu_architecture):
"""
Sets the cpu_architecture of this HostConfigurationSummary.
CPU architechure
:param cpu_architecture: The cpu_architecture of this HostConfigurationSummary.
:type: str
"""
self._cpu_architecture = cpu_architecture
@property
def cpu_cache_in_mbs(self):
"""
**[Required]** Gets the cpu_cache_in_mbs of this HostConfigurationSummary.
Size of cache memory in megabytes.
:return: The cpu_cache_in_mbs of this HostConfigurationSummary.
:rtype: float
"""
return self._cpu_cache_in_mbs
@cpu_cache_in_mbs.setter
def cpu_cache_in_mbs(self, cpu_cache_in_mbs):
"""
Sets the cpu_cache_in_mbs of this HostConfigurationSummary.
Size of cache memory in megabytes.
:param cpu_cache_in_mbs: The cpu_cache_in_mbs of this HostConfigurationSummary.
:type: float
"""
self._cpu_cache_in_mbs = cpu_cache_in_mbs
@property
def cpu_vendor(self):
"""
**[Required]** Gets the cpu_vendor of this HostConfigurationSummary.
Name of the CPU vendor.
:return: The cpu_vendor of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_vendor
@cpu_vendor.setter
def cpu_vendor(self, cpu_vendor):
"""
Sets the cpu_vendor of this HostConfigurationSummary.
Name of the CPU vendor.
:param cpu_vendor: The cpu_vendor of this HostConfigurationSummary.
:type: str
"""
self._cpu_vendor = cpu_vendor
@property
def cpu_frequency_in_mhz(self):
"""
**[Required]** Gets the cpu_frequency_in_mhz of this HostConfigurationSummary.
Clock frequency of the processor in megahertz.
:return: The cpu_frequency_in_mhz of this HostConfigurationSummary.
:rtype: float
"""
return self._cpu_frequency_in_mhz
@cpu_frequency_in_mhz.setter
def cpu_frequency_in_mhz(self, cpu_frequency_in_mhz):
"""
Sets the cpu_frequency_in_mhz of this HostConfigurationSummary.
Clock frequency of the processor in megahertz.
:param cpu_frequency_in_mhz: The cpu_frequency_in_mhz of this HostConfigurationSummary.
:type: float
"""
self._cpu_frequency_in_mhz = cpu_frequency_in_mhz
@property
def cpu_implementation(self):
"""
**[Required]** Gets the cpu_implementation of this HostConfigurationSummary.
Model name of processor.
:return: The cpu_implementation of this HostConfigurationSummary.
:rtype: str
"""
return self._cpu_implementation
@cpu_implementation.setter
def cpu_implementation(self, cpu_implementation):
"""
Sets the cpu_implementation of this HostConfigurationSummary.
Model name of processor.
:param cpu_implementation: The cpu_implementation of this HostConfigurationSummary.
:type: str
"""
self._cpu_implementation = cpu_implementation
@property
def cores_per_socket(self):
"""
**[Required]** Gets the cores_per_socket of this HostConfigurationSummary.
Number of cores per socket.
:return: The cores_per_socket of this HostConfigurationSummary.
:rtype: int
"""
return self._cores_per_socket
@cores_per_socket.setter
def cores_per_socket(self, cores_per_socket):
"""
Sets the cores_per_socket of this HostConfigurationSummary.
Number of cores per socket.
:param cores_per_socket: The cores_per_socket of this HostConfigurationSummary.
:type: int
"""
self._cores_per_socket = cores_per_socket
@property
def total_sockets(self):
"""
**[Required]** Gets the total_sockets of this HostConfigurationSummary.
Number of total sockets.
:return: The total_sockets of this HostConfigurationSummary.
:rtype: int
"""
return self._total_sockets
@total_sockets.setter
def total_sockets(self, total_sockets):
"""
Sets the total_sockets of this HostConfigurationSummary.
Number of total sockets.
:param total_sockets: The total_sockets of this HostConfigurationSummary.
:type: int
"""
self._total_sockets = total_sockets
@property
def threads_per_socket(self):
"""
**[Required]** Gets the threads_per_socket of this HostConfigurationSummary.
Number of threads per socket.
:return: The threads_per_socket of this HostConfigurationSummary.
:rtype: int
"""
return self._threads_per_socket
@threads_per_socket.setter
def threads_per_socket(self, threads_per_socket):
"""
Sets the threads_per_socket of this HostConfigurationSummary.
Number of threads per socket.
:param threads_per_socket: The threads_per_socket of this HostConfigurationSummary.
:type: int
"""
self._threads_per_socket = threads_per_socket
@property
def is_hyper_threading_enabled(self):
"""
**[Required]** Gets the is_hyper_threading_enabled of this HostConfigurationSummary.
Indicates if hyper-threading is enabled or not
:return: The is_hyper_threading_enabled of this HostConfigurationSummary.
:rtype: bool
"""
return self._is_hyper_threading_enabled
@is_hyper_threading_enabled.setter
def is_hyper_threading_enabled(self, is_hyper_threading_enabled):
"""
Sets the is_hyper_threading_enabled of this HostConfigurationSummary.
Indicates if hyper-threading is enabled or not
:param is_hyper_threading_enabled: The is_hyper_threading_enabled of this HostConfigurationSummary.
:type: bool
"""
self._is_hyper_threading_enabled = is_hyper_threading_enabled
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this HostConfigurationSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this HostConfigurationSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this HostConfigurationSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this HostConfigurationSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this HostConfigurationSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this HostConfigurationSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this HostConfigurationSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this HostConfigurationSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 177 | 0 | 81 |
7bc96d131fe36d89c52069b4fce808a34c1e9e63 | 2,220 | py | Python | tests/DealtHandTests.py | cjrgreen/Bridge | c87c5f3f853f0dfbea9b41e2f7847dd4ef4f5410 | [
"MIT"
] | null | null | null | tests/DealtHandTests.py | cjrgreen/Bridge | c87c5f3f853f0dfbea9b41e2f7847dd4ef4f5410 | [
"MIT"
] | null | null | null | tests/DealtHandTests.py | cjrgreen/Bridge | c87c5f3f853f0dfbea9b41e2f7847dd4ef4f5410 | [
"MIT"
] | null | null | null | from context import Card
import unittest
from functools import reduce
runner = unittest.TextTestRunner()
suite = DealtHandsUnitTestSuite()
runner.run(suite)
| 50.454545 | 153 | 0.588288 | from context import Card
import unittest
from functools import reduce
class DealtHandsUnitTests(unittest.TestCase):
def setUp(self):
self.deck = Card.ShuffledDeck()
def allCardsInDeck(self):
self.assert_(len(self.deck.cards) == 52, "not 52 cards in deck")
def allCardsInHands(self):
fullHands = list(map(lambda h: 1 if (len(h.cards) == 13) else 0, self.deck.hands))
self.assert_(reduce(lambda c, h: c + h, fullHands, 0) == 4, "not 13 cards in some hand")
def noDupsInHands(self):
withoutDups = list(map(lambda h: 1 if (len(set(h.cards)) == 13) else 0, self.deck.hands))
self.assert_(reduce(lambda c, h: c + h, withoutDups, 0) == 4, "duplicates in some hand")
def noDupsBetweenHands(self):
hl = [1, 1, 1, 2, 2, 3]
hr = [2, 3, 4, 3, 4, 4]
withoutDups = list(map(lambda h1, h2: 1 if (len(set(self.deck.hands[h1-1].cards) & set(self.deck.hands[h2-1].cards))) == 0 else 0, hl, hr))
self.assert_(reduce(lambda c, h: c + h, withoutDups, 0) == 6, "duplicates between some hands")
def noForeignCards(self):
cardsInDeck = list(map(lambda h: 1 if (len(set(h.cards) & set(self.deck.cards))) == 13 else 0, self.deck.hands))
self.assert_(reduce(lambda c, h: c + h, cardsInDeck, 0) == 4, "foreign cards in some hands")
def validPointCounts(self):
hcpCounts = list(map(lambda h: h.hcp_count(), self.deck.hands))
self.assert_(reduce(lambda p, q: p + q, hcpCounts, 0) == 40, "high card points do not add up to 40")
self.assert_(len(list(filter(lambda p: p < 0 or p > 37, hcpCounts))) == 0, "too many high card points in some hand")
class DealtHandsUnitTestSuite(unittest.TestSuite):
def __init__(self):
super().__init__(map(
DealtHandsUnitTests,
["allCardsInDeck",
"allCardsInHands",
"noDupsInHands",
"noDupsBetweenHands",
"noForeignCards",
"validPointCounts"
]))
runner = unittest.TextTestRunner()
suite = DealtHandsUnitTestSuite()
runner.run(suite)
| 1,728 | 53 | 266 |
4bbb5e6467c534130112f7f6d603cc901dbf5787 | 3,661 | py | Python | tests/test_models_functional.py | jic-dtool/dtool-gui-tk | 05b73daad05ad20f2f18b692051b257384f61698 | [
"MIT"
] | 1 | 2020-08-28T00:30:23.000Z | 2020-08-28T00:30:23.000Z | tests/test_models_functional.py | jic-dtool/dtool-gui | 05b73daad05ad20f2f18b692051b257384f61698 | [
"MIT"
] | 27 | 2020-12-17T11:33:15.000Z | 2021-02-24T14:37:34.000Z | tests/test_models_functional.py | jic-dtool/dtool-gui | 05b73daad05ad20f2f18b692051b257384f61698 | [
"MIT"
] | null | null | null | "Functional tests showing how models can be used to create/edit datasets."
import os
import dtoolcore.utils
from . import tmp_dir_fixture # NOQA
import pytest
| 34.214953 | 92 | 0.738596 | "Functional tests showing how models can be used to create/edit datasets."
import os
import dtoolcore.utils
from . import tmp_dir_fixture # NOQA
import pytest
def test_create_dataset(tmp_dir_fixture): # NOQA
import dtool_gui_tk.models
input_directory = os.path.join(tmp_dir_fixture, "input_directory")
os.mkdir(input_directory)
item_content = "my data in a file"
with open(os.path.join(input_directory, "data.txt"), "w") as fh:
fh.write(item_content)
base_uri_directory = os.path.join(tmp_dir_fixture, "datasets")
os.mkdir(base_uri_directory)
config_path = os.path.join(tmp_dir_fixture, "dtool-gui.json")
base_uri_model = dtool_gui_tk.models.LocalBaseURIModel(config_path)
base_uri_model.put_base_uri(base_uri_directory)
proto_dataset_model = dtool_gui_tk.models.ProtoDataSetModel()
metadata_model = dtool_gui_tk.models.MetadataModel()
metadata_model.add_metadata_property(
name="project",
schema={"type": "string", "maxLength": 10},
required=True
)
metadata_model.add_metadata_property(
name="age",
schema={"type": "integer", "maximum": 10},
required=False
)
with pytest.raises(dtool_gui_tk.models.MissingDataSetNameError):
proto_dataset_model.create()
proto_dataset_model.set_name("my-dataset")
with pytest.raises(dtool_gui_tk.models.MissingInputDirectoryError):
proto_dataset_model.create()
proto_dataset_model.set_input_directory(input_directory)
with pytest.raises(dtool_gui_tk.models.MissingBaseURIModelError):
proto_dataset_model.create()
proto_dataset_model.set_base_uri_model(base_uri_model)
with pytest.raises(dtool_gui_tk.models.MissingMetadataModelError):
proto_dataset_model.create()
proto_dataset_model.set_metadata_model(metadata_model)
with pytest.raises(dtool_gui_tk.models.MissingRequiredMetadataError):
proto_dataset_model.create()
proto_dataset_model.metadata_model.set_value("project", "dtool-gui") # NOQA
proto_dataset_model.metadata_model.set_value("age", "not-an-integer") # NOQA
proto_dataset_model.metadata_model.select_optional_item("age")
# Raises because "age" is not an integer.
with pytest.raises(dtool_gui_tk.models.MetadataValidationError):
proto_dataset_model.create()
proto_dataset_model.metadata_model.set_value("project", "too-long-project-name") # NOQA
proto_dataset_model.metadata_model.set_value("age", 5) # NOQA
# Raises because "project" name is too long"
with pytest.raises(dtool_gui_tk.models.MetadataValidationError):
proto_dataset_model.create()
proto_dataset_model.metadata_model.set_value("project", "dtool-gui")
proto_dataset_model.create()
expected_uri = dtoolcore.utils.sanitise_uri(
os.path.join(base_uri_directory, "my-dataset")
)
assert proto_dataset_model.uri == expected_uri
# Check that the dataset has the expected annotations.
ds = dtoolcore.DataSet.from_uri(expected_uri)
assert ds.get_annotation("project") == "dtool-gui"
assert ds.get_annotation("age") == 5
# Check that the dataset has "special" annotation for the metadata schema.
expected_schema = proto_dataset_model.metadata_model.get_master_schema()
assert ds.get_annotation("_metadata_schema") == expected_schema
expected_readme = """---
project: dtool-gui
age: 5"""
assert ds.get_readme_content() == expected_readme
assert len(ds.identifiers) == 1
identifier = list(ds.identifiers)[0]
with open(ds.item_content_abspath(identifier)) as fh:
assert item_content == fh.read()
| 3,473 | 0 | 23 |
4a57f1a0ebb098b2a3935364cf2712e889e042e0 | 3,912 | py | Python | CLI_tool/cloudwatch.py | Juniper/vSRX-AWS | a9179d8b115229b1f26aec166a85261fd2352b14 | [
"Apache-2.0"
] | 18 | 2017-07-03T22:45:19.000Z | 2021-04-04T16:51:28.000Z | CLI_tool/cloudwatch.py | Juniper/vSRX-AWS | a9179d8b115229b1f26aec166a85261fd2352b14 | [
"Apache-2.0"
] | 13 | 2017-10-25T18:22:22.000Z | 2021-09-15T00:09:54.000Z | CLI_tool/cloudwatch.py | Juniper/vSRX-AWS | a9179d8b115229b1f26aec166a85261fd2352b14 | [
"Apache-2.0"
] | 23 | 2017-07-20T03:29:25.000Z | 2021-02-24T20:27:05.000Z | import sys
import os
import boto3
from botocore.exceptions import ClientError
import click
import json
import datetime
import time
import re
from util import *
line_widget_x = 0
line_widget_y = 0
| 39.918367 | 90 | 0.695297 | import sys
import os
import boto3
from botocore.exceptions import ClientError
import click
import json
import datetime
import time
import re
from util import *
line_widget_x = 0
line_widget_y = 0
def get_line_widget(stacked, metrics, region):
global line_widget_x
global line_widget_y
widget = {}
widget['type'] = 'metric'
widget['x'] = line_widget_x
widget['y'] = line_widget_y
widget['width'] = 6
widget['height'] = 6
widget['properties'] = {}
widget['properties']['view'] = "timeSeries"
widget['properties']['stacked'] = stacked
widget['properties']['metrics'] = metrics
widget['properties']['region'] = region
line_widget_x += 6
if line_widget_x > 18:
line_widget_x = 0
line_widget_y += 6
metric_names = []
for metric in metrics:
metric_names.append(metric[1])
metric_name_str = ','.join(metric_names)
info_msg('Adding line widget for metrics - ' + metric_name_str)
return widget
def create_or_update_dashboard(session, region, dashboard_name, widgets):
body = {'widgets' : widgets}
body_json = json.dumps(body)
client = session.client("cloudwatch", region_name=region)
response = client.put_dashboard(DashboardName = dashboard_name,
DashboardBody = body_json)
debug_msg('put_dashboard response:' + json.dumps(response))
def configure_cloudwatch_dashbord(session, region, instance, metric_ns):
widgets = []
ge_interface_num = 0
instance_id = instance.instance_id
info_msg('Creating CloudWatch dashboard for instance %s, namespace %s'\
% (instance_id,metric_ns))
if len(instance.network_interfaces) > 0:
ge_interface_num = len(instance.network_interfaces) - 1
dataplane_cpu_num = instance.cpu_options['CoreCount'] * \
instance.cpu_options['ThreadsPerCore'] - 1
recpu_metrics = [[ metric_ns, "RECPUUtil", "Instance ID", instance_id]]
widgets.append(get_line_widget(False, recpu_metrics, region))
dpcpu_metrics = []
for cpuno in range(dataplane_cpu_num):
dp_cpuno = cpuno + 1
metric_name = "DataPlaneCPU%dUtil" % dp_cpuno
metric = [metric_ns, metric_name, "Instance ID", instance_id]
dpcpu_metrics.append(metric)
widgets.append(get_line_widget(True, dpcpu_metrics, region))
remem_metrics = [[ metric_ns, "REMemoryUtil", "Instance ID", instance_id ]]
widgets.append(get_line_widget(False, remem_metrics, region))
dpmem_metrics = [[ metric_ns, "DataplaneHeapMemoryUtil", "Instance ID", instance_id ]]
widgets.append(get_line_widget(False, dpmem_metrics, region))
diskutil_metrics = [[ metric_ns, "DiskUtil", "Instance ID", instance_id ]]
widgets.append(get_line_widget(False, diskutil_metrics, region))
sessutil_metric = [[ metric_ns, "FlowSessionUtil", "Instance ID", instance_id ]]
widgets.append(get_line_widget(False, sessutil_metric, region))
for ge_id in range(ge_interface_num):
gepps_metrics = []
metric_name = "Ge00%dInputPPS" % ge_id
gepps_metrics.append([metric_ns, metric_name, "Instance ID", instance_id])
metric_name = "Ge00%dOutputPPS" % ge_id
gepps_metrics.append([metric_ns, metric_name, "Instance ID", instance_id])
widgets.append(get_line_widget(True, gepps_metrics, region))
gekbps_metrics = []
metric_name = "Ge00%dInputKBPS" % ge_id
gekbps_metrics.append([metric_ns, metric_name, "Instance ID", instance_id])
metric_name = "Ge00%dOutputKBPS" % ge_id
gekbps_metrics.append([metric_ns, metric_name, "Instance ID", instance_id])
widgets.append(get_line_widget(True, gekbps_metrics, region))
dashboard_name = 'vsrx_%s' % instance_id
create_or_update_dashboard(session, region, dashboard_name, widgets)
info_msg("Created CloudWatch dashboard %s" % dashboard_name)
| 3,640 | 0 | 73 |
a516b4bd6ffcc75cae9128c7509055239e67111c | 643 | py | Python | pyfiles/11_5808.py | StevenPZChan/pythonchallenge | 84c0e7458189f6d74e2cfbd169d854dae11d07a9 | [
"MIT"
] | null | null | null | pyfiles/11_5808.py | StevenPZChan/pythonchallenge | 84c0e7458189f6d74e2cfbd169d854dae11d07a9 | [
"MIT"
] | null | null | null | pyfiles/11_5808.py | StevenPZChan/pythonchallenge | 84c0e7458189f6d74e2cfbd169d854dae11d07a9 | [
"MIT"
] | null | null | null | from io import BytesIO
import requests
from PIL import Image
header = {'Authorization': 'Basic aHVnZTpmaWxl', }
response = requests.get('http://www.pythonchallenge.com/pc/return/cave.jpg', headers=header)
img = Image.open(BytesIO(response.content))
width, height = img.size
img_new = Image.new('RGB', (width // 2, height // 2))
odd = 1
even = 0
for x in range(width):
for y in range(height):
if x % 2 == odd and y % 2 == odd: # x % 2 == even and y % 2 == even
# multiplied by 5 to increase contrast
img_new.putpixel((x // 2, y // 2), tuple(5 * p for p in img.getpixel((x, y))))
img_new.show() # evil
| 29.227273 | 92 | 0.62986 | from io import BytesIO
import requests
from PIL import Image
header = {'Authorization': 'Basic aHVnZTpmaWxl', }
response = requests.get('http://www.pythonchallenge.com/pc/return/cave.jpg', headers=header)
img = Image.open(BytesIO(response.content))
width, height = img.size
img_new = Image.new('RGB', (width // 2, height // 2))
odd = 1
even = 0
for x in range(width):
for y in range(height):
if x % 2 == odd and y % 2 == odd: # x % 2 == even and y % 2 == even
# multiplied by 5 to increase contrast
img_new.putpixel((x // 2, y // 2), tuple(5 * p for p in img.getpixel((x, y))))
img_new.show() # evil
| 0 | 0 | 0 |
ce348970688385f43caa2da6afef7a2e7ea9bf77 | 31,556 | py | Python | AmapFunctions/AdministrativeDistrictEnquiry.py | Gaoyifei1011/AmapProgram | d45a27abf9f508d922f37abc34f00da6d0aab4a0 | [
"MIT"
] | 1 | 2021-05-19T02:48:49.000Z | 2021-05-19T02:48:49.000Z | AmapFunctions/AdministrativeDistrictEnquiry.py | Gaoyifei1011/AmapProgram | d45a27abf9f508d922f37abc34f00da6d0aab4a0 | [
"MIT"
] | 1 | 2021-05-18T16:01:56.000Z | 2021-05-20T02:14:52.000Z | AmapFunctions/AdministrativeDistrictEnquiry.py | Gaoyifei1011/AmapProgram | d45a27abf9f508d922f37abc34f00da6d0aab4a0 | [
"MIT"
] | 1 | 2021-06-04T06:39:57.000Z | 2021-06-04T06:39:57.000Z | # -*- coding:utf-8 -*-
# 导入的库
import inspect
import json
import time
from typing import Any
import requests
from SelfExpection.CustomExpection import CustomExpection
from SelfExpection.OfficialException import OfficialException
from logrecord.WriteLog import WriteLog
class AdministrativeDistrictEnquiry:
"""
Class:行政区域查询
行政区域查询是一类简单的HTTP接口,根据用户输入的搜索条件可以帮助用户快速的查找特定的行政区域信息。
"""
# 获取高德地图数据API的钥匙
APIkey = '<请自己输入自己申请的API Key>'
def get_administrative_district(self, keywords: str,
sub_district: int,
**kwargs: dict[str, Any]
) -> dict:
"""
函数:行政区域查询数据。\n
Args:
keywords:查询关键字,可选。规则:只支持单个关键词语搜索关键词支持:行政区名称、citycode、adcode。例如,在subdistrict=2,搜索省份(例如山东),能够显示市(例如济南),区(例如历下区)。adcode信息可参考城市编码表获取
sub_district:子级行政区,可选。规则:设置显示下级行政区级数(行政区级别包括:国家、省/直辖市、市、区/县、乡镇/街道多级数据)。可选值:0、1、2、3等数字,并以此类推
0:不返回下级行政区;1:返回下一级行政区;2:返回下两级行政区;3:返回下三级行政区。
需要在此特殊说明,目前部分城市和省直辖县因为没有区县的概念,故在市级下方直接显示街道。例如:广东-东莞、海南-文昌市
kwargs:
page:需要第几页数据,可选。最外层的districts最多会返回20个数据,若超过限制,请用page请求下一页数据。例如page=2;page=3。默认page=1
offset:最外层返回数据个数,可选。
extensions:返回结果控制,可选。此项控制行政区信息中返回行政区边界坐标点; 可选值:base、all;base:不返回行政区边界坐标点;all:只返回当前查询district的边界值,不返回子节点的边界值;
目前不能返回乡镇/街道级别的边界值。
filter:根据区划过滤,可选。按照指定行政区划进行过滤,填入后则只返回该省/直辖市信息。填入adcode,为了保证数据的正确,强烈建议填入此参数
output:返回数据格式类型,可选。可选值:JSON,XML。
"""
self.keywords = keywords
self.sub_district = sub_district
if 'extensions' in kwargs:
self.extensions = kwargs['extensions']
if 'filter' in kwargs:
self.filter = kwargs['filter']
if 'output' in kwargs:
self.output = kwargs['output']
if 'offset' in kwargs:
self.offset = kwargs['offset']
if 'page' in kwargs:
self.page = kwargs['page']
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
# 传入参数
parameters = {'key': self.APIkey,
'keywords': self.keywords,
'subdistrict': self.sub_district,
}
if self.extensions is not None:
parameters.update(extensions=self.extensions)
if self.filter is not None:
parameters.update(filter=self.filter)
if self.output is not None:
parameters.update(output=self.output)
if self.offset is not None:
parameters.update(offset=self.offset)
if self.page is not None:
parameters.update(page=self.page)
# 获取数据
try:
request_information = requests.get("https://restapi.amap.com/v3/config/district?parameters",
params=parameters)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - request_information:{1}'.format(function_name,
request_information)
)
request_information.close() # 关闭访问
request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常
# 返回格式化后的JSON数据
json_decode = json.loads(request_information.text)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - Administrative district data successful get.'.format(
function_name)
)
return json_decode
except requests.exceptions.ConnectionError as e:
time.sleep(1)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__)
)
# 异常信息
error_connection = 'ConnectionError -- please wait 3 seconds'
error_connection_dict = {'status': '2',
'info': 'requests.exceptions.ConnectionError',
'detail_information': requests.exceptions.ConnectionError,
'error_prompt': error_connection
}
return error_connection_dict
except requests.exceptions.ChunkedEncodingError as e:
time.sleep(1)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__
)
)
# 异常信息
error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'
error_chuck_encoding_dict = {'status': '2',
'info': 'HTTPError',
'detail_information': requests.exceptions.ChunkedEncodingError,
'error_prompt': error_chuck_encoding
}
return error_chuck_encoding_dict
except Exception as e:
time.sleep(1)
error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__)
)
# 异常信息
error_information_dict = {'status': '2',
'info': 'HTTPError',
'detail_information': requests.exceptions.ChunkedEncodingError,
'error_prompt': error_information
}
return error_information_dict
def parse_administrative_district(self, json_decode: dict,
sub_district: int
) -> list:
"""
函数:解析行政区域查询数据。
Args:
json_decode:get_administrative_district()方法从网络中获取的数据
sub_district:返回的下几级行政区域的标志
"""
# TODO:未来版本将返回数据从list升级为dict
self.json_decode = json_decode
self.sub_district = sub_district
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
try:
if self.json_decode['status'] == '0':
# 官方文档异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise OfficialException
elif self.json_decode['status'] == '2':
# 自定义异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise CustomExpection
elif self.json_decode['status'] == '1':
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
if self.json_decode['infocode'] == "10000": # 请求数据成功的状态码
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - infocode:{1}'.format(function_name,
self.json_decode[
'infocode'])
)
district_level = {'country': '国',
'province': '省',
'city': '市',
'district': '区/县级市/县',
'street': '街道/镇/乡'
}
# 请求结果
keywords_count = self.json_decode['count']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - keywords count:{1}'.format(function_name,
keywords_count)
)
resultContext.append("根据您提供的关键字已为您查找到{0}个结果".format(keywords_count))
# 行政区域数目
districts = self.json_decode['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - districts acquired successfully'.format(
function_name)
)
# 输出行政区信息
sub_district_value = self.sub_district
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - sub_district_value:{1}'.format(function_name,
sub_district_value)
)
global_sub = self.sub_district
# only for debugging
writeLog.write_to_log(file_name=log_filename, log_level=1,
context='Function name:{0} - global_sub:{1}'.format(function_name,
global_sub)
)
if districts and sub_district_value >= 0: # 里面的信息不为空
for district in districts:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - {1}'.format(function_name,
self.print_subdistrict.__name__
)
)
context = self.print_subdistrict(district, sub_district_value - 1, district_level,
global_sub)
resultContext.extend(context)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - print district successful run.'.format(function_name)
)
return resultContext
except OfficialException as officialException:
# 获得的错误信息
errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errcode:{1}'.format(function_name,
errcode)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errorInfo:{1}'.format(function_name,
errorInfo)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - solution:{1}'.format(function_name,
solution)
)
resultContext.append(errorInfo)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
except CustomExpection as customException:
info, detail_information, error_prompt = customException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - info:{1}'.format(function_name,
info)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - detail_information:{1}'.format(function_name,
detail_information)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='error_prompt:{0}'.format(error_prompt)
)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
def print_subdistrict(self, district: dict,
sub_district_value: int,
district_level: dict,
global_sub_district_value: int
) -> list:
"""
函数:打印查询的行政区域
Args:
district: 传入的关键字查询对应的行政区域
sub_district_value:代表当前下一级行政区域的位置
district_level:行政区域级别
global_sub_district_value:传入全局查询的行政区域
"""
# TODO:未来版本由于数据量巨大,将其放入子线程中进行,防止卡父GUI进程
# TODO:未来版本将返回数据从list升级为dict
self.district = district
self.district_level = district_level
self.global_sub_district_value = global_sub_district_value
self.sub_district_value = sub_district_value
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
name = self.district['name']
level = self.district_level[self.district['level']]
# 当前行政区域
subtraction = global_sub_district_value - sub_district_value - 1
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - global:{1}'.format(function_name,
str(self.global_sub_district_value))
)
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - sub_district_value:{1}'.format(function_name,
sub_district_value)
)
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - subtraction:{1}'.format(function_name,
str(subtraction))
)
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - district search successfully'.format(function_name)
)
# 同级行政区域
if subtraction == 0:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction, name,
level)
)
resultContext.append("您提供的关键字查询名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下一级行政区域
elif subtraction == 1:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level)
)
resultContext.append("您查询的关键字的下一级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下二级行政区域
elif subtraction == 2:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level)
)
resultContext.append("您查询的关键字的下二级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下三级行政区域
elif subtraction == 3:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level
)
)
resultContext.append("您查询的关键字的下三级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
else:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - Query Failed'.format(function_name)
)
resultContext.append("查询错误")
# 条件成立,继续搜索下一级行政区
sub_districts = self.district['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - search sub districts'.format(function_name)
)
# 行政区域结果数目
len_sub_districts = len(self.district['districts'])
if len_sub_districts > 0:
resultContext.append("该行政区域包括{0}个结果".format(len_sub_districts))
if sub_districts and self.sub_district_value >= 0:
for sub_district in sub_districts:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - {1}'.format(function_name,
self.print_subdistrict.__name__)
)
context = self.print_subdistrict(sub_district, self.sub_district_value - 1, self.district_level,
self.global_sub_district_value)
resultContext.extend(context)
return resultContext
def get_sub_administrative_district(self, json_decode
) -> list:
"""
函数:解析行政区域下一级数据。
Args:
json_decode:get_administrative_district()方法从网络中获取的数据
"""
# TODO:未来版本将返回数据从list升级为dict
self.json_decode = json_decode
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
try:
if self.json_decode['status'] == '0':
# 官方文档异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise OfficialException
elif self.json_decode['status'] == '2':
# 自定义异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise CustomExpection
elif self.json_decode['status'] == '1':
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
if self.json_decode['infocode'] == "10000": # 请求数据成功的状态码
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - infocode:{1}'.format(function_name,
self.json_decode[
'infocode'])
)
# 请求结果
keywords_count = self.json_decode['count']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - keywords count:{1}'.format(function_name,
keywords_count)
)
# 行政区域数目
districts = self.json_decode['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - districts acquired successfully'.format(
function_name)
)
# 输出行政区信息
if districts: # 里面的信息不为空
for district in districts:
# 下一级行政区域列表
sub_districts = district['districts']
sub_districts.sort(key=lambda x: x['adcode'])
for subdistrict in sub_districts:
resultContext.append(subdistrict['name'])
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - print district successful run.'.format(function_name)
)
return resultContext
except OfficialException as officialException:
# 获得的错误信息
errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errcode:{1}'.format(function_name,
errcode)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errorInfo:{1}'.format(function_name,
errorInfo)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - solution:{1}'.format(function_name,
solution)
)
resultContext.append(errorInfo)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
except CustomExpection as customException:
info, detail_information, error_prompt = customException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - info:{1}'.format(function_name,
info)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - detail_information:{1}'.format(function_name,
detail_information)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='error_prompt:{0}'.format(error_prompt)
)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
| 49.93038 | 141 | 0.414881 | # -*- coding:utf-8 -*-
# 导入的库
import inspect
import json
import time
from typing import Any
import requests
from SelfExpection.CustomExpection import CustomExpection
from SelfExpection.OfficialException import OfficialException
from logrecord.WriteLog import WriteLog
class AdministrativeDistrictEnquiry:
"""
Class:行政区域查询
行政区域查询是一类简单的HTTP接口,根据用户输入的搜索条件可以帮助用户快速的查找特定的行政区域信息。
"""
def __init__(self) -> None:
self.district = None
self.district_level = None
self.extensions = None
self.filter = None
self.global_sub_district_value = None
self.json_decode = None
self.keywords = None
self.offset = None
self.output = None
self.page = None
self.sub_district = None
self.sub_district_value = None
self.global_sub_district_value = None
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
log_filename = writeLog.create_filename(class_name=class_name)
writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))
# 获取高德地图数据API的钥匙
APIkey = '<请自己输入自己申请的API Key>'
def get_administrative_district(self, keywords: str,
sub_district: int,
**kwargs: dict[str, Any]
) -> dict:
"""
函数:行政区域查询数据。\n
Args:
keywords:查询关键字,可选。规则:只支持单个关键词语搜索关键词支持:行政区名称、citycode、adcode。例如,在subdistrict=2,搜索省份(例如山东),能够显示市(例如济南),区(例如历下区)。adcode信息可参考城市编码表获取
sub_district:子级行政区,可选。规则:设置显示下级行政区级数(行政区级别包括:国家、省/直辖市、市、区/县、乡镇/街道多级数据)。可选值:0、1、2、3等数字,并以此类推
0:不返回下级行政区;1:返回下一级行政区;2:返回下两级行政区;3:返回下三级行政区。
需要在此特殊说明,目前部分城市和省直辖县因为没有区县的概念,故在市级下方直接显示街道。例如:广东-东莞、海南-文昌市
kwargs:
page:需要第几页数据,可选。最外层的districts最多会返回20个数据,若超过限制,请用page请求下一页数据。例如page=2;page=3。默认page=1
offset:最外层返回数据个数,可选。
extensions:返回结果控制,可选。此项控制行政区信息中返回行政区边界坐标点; 可选值:base、all;base:不返回行政区边界坐标点;all:只返回当前查询district的边界值,不返回子节点的边界值;
目前不能返回乡镇/街道级别的边界值。
filter:根据区划过滤,可选。按照指定行政区划进行过滤,填入后则只返回该省/直辖市信息。填入adcode,为了保证数据的正确,强烈建议填入此参数
output:返回数据格式类型,可选。可选值:JSON,XML。
"""
self.keywords = keywords
self.sub_district = sub_district
if 'extensions' in kwargs:
self.extensions = kwargs['extensions']
if 'filter' in kwargs:
self.filter = kwargs['filter']
if 'output' in kwargs:
self.output = kwargs['output']
if 'offset' in kwargs:
self.offset = kwargs['offset']
if 'page' in kwargs:
self.page = kwargs['page']
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
# 传入参数
parameters = {'key': self.APIkey,
'keywords': self.keywords,
'subdistrict': self.sub_district,
}
if self.extensions is not None:
parameters.update(extensions=self.extensions)
if self.filter is not None:
parameters.update(filter=self.filter)
if self.output is not None:
parameters.update(output=self.output)
if self.offset is not None:
parameters.update(offset=self.offset)
if self.page is not None:
parameters.update(page=self.page)
# 获取数据
try:
request_information = requests.get("https://restapi.amap.com/v3/config/district?parameters",
params=parameters)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - request_information:{1}'.format(function_name,
request_information)
)
request_information.close() # 关闭访问
request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常
# 返回格式化后的JSON数据
json_decode = json.loads(request_information.text)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - Administrative district data successful get.'.format(
function_name)
)
return json_decode
except requests.exceptions.ConnectionError as e:
time.sleep(1)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__)
)
# 异常信息
error_connection = 'ConnectionError -- please wait 3 seconds'
error_connection_dict = {'status': '2',
'info': 'requests.exceptions.ConnectionError',
'detail_information': requests.exceptions.ConnectionError,
'error_prompt': error_connection
}
return error_connection_dict
except requests.exceptions.ChunkedEncodingError as e:
time.sleep(1)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__
)
)
# 异常信息
error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'
error_chuck_encoding_dict = {'status': '2',
'info': 'HTTPError',
'detail_information': requests.exceptions.ChunkedEncodingError,
'error_prompt': error_chuck_encoding
}
return error_chuck_encoding_dict
except Exception as e:
time.sleep(1)
error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=5,
context='Function name:{0} - {1} has occured.'.format(function_name,
e.__class__.__name__)
)
# 异常信息
error_information_dict = {'status': '2',
'info': 'HTTPError',
'detail_information': requests.exceptions.ChunkedEncodingError,
'error_prompt': error_information
}
return error_information_dict
def parse_administrative_district(self, json_decode: dict,
sub_district: int
) -> list:
"""
函数:解析行政区域查询数据。
Args:
json_decode:get_administrative_district()方法从网络中获取的数据
sub_district:返回的下几级行政区域的标志
"""
# TODO:未来版本将返回数据从list升级为dict
self.json_decode = json_decode
self.sub_district = sub_district
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
try:
if self.json_decode['status'] == '0':
# 官方文档异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise OfficialException
elif self.json_decode['status'] == '2':
# 自定义异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise CustomExpection
elif self.json_decode['status'] == '1':
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
if self.json_decode['infocode'] == "10000": # 请求数据成功的状态码
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - infocode:{1}'.format(function_name,
self.json_decode[
'infocode'])
)
district_level = {'country': '国',
'province': '省',
'city': '市',
'district': '区/县级市/县',
'street': '街道/镇/乡'
}
# 请求结果
keywords_count = self.json_decode['count']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - keywords count:{1}'.format(function_name,
keywords_count)
)
resultContext.append("根据您提供的关键字已为您查找到{0}个结果".format(keywords_count))
# 行政区域数目
districts = self.json_decode['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - districts acquired successfully'.format(
function_name)
)
# 输出行政区信息
sub_district_value = self.sub_district
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - sub_district_value:{1}'.format(function_name,
sub_district_value)
)
global_sub = self.sub_district
# only for debugging
writeLog.write_to_log(file_name=log_filename, log_level=1,
context='Function name:{0} - global_sub:{1}'.format(function_name,
global_sub)
)
if districts and sub_district_value >= 0: # 里面的信息不为空
for district in districts:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - {1}'.format(function_name,
self.print_subdistrict.__name__
)
)
context = self.print_subdistrict(district, sub_district_value - 1, district_level,
global_sub)
resultContext.extend(context)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - print district successful run.'.format(function_name)
)
return resultContext
except OfficialException as officialException:
# 获得的错误信息
errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errcode:{1}'.format(function_name,
errcode)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errorInfo:{1}'.format(function_name,
errorInfo)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - solution:{1}'.format(function_name,
solution)
)
resultContext.append(errorInfo)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
except CustomExpection as customException:
info, detail_information, error_prompt = customException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - info:{1}'.format(function_name,
info)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - detail_information:{1}'.format(function_name,
detail_information)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='error_prompt:{0}'.format(error_prompt)
)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
def print_subdistrict(self, district: dict,
sub_district_value: int,
district_level: dict,
global_sub_district_value: int
) -> list:
"""
函数:打印查询的行政区域
Args:
district: 传入的关键字查询对应的行政区域
sub_district_value:代表当前下一级行政区域的位置
district_level:行政区域级别
global_sub_district_value:传入全局查询的行政区域
"""
# TODO:未来版本由于数据量巨大,将其放入子线程中进行,防止卡父GUI进程
# TODO:未来版本将返回数据从list升级为dict
self.district = district
self.district_level = district_level
self.global_sub_district_value = global_sub_district_value
self.sub_district_value = sub_district_value
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
name = self.district['name']
level = self.district_level[self.district['level']]
# 当前行政区域
subtraction = global_sub_district_value - sub_district_value - 1
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - global:{1}'.format(function_name,
str(self.global_sub_district_value))
)
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - sub_district_value:{1}'.format(function_name,
sub_district_value)
)
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - subtraction:{1}'.format(function_name,
str(subtraction))
)
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - district search successfully'.format(function_name)
)
# 同级行政区域
if subtraction == 0:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction, name,
level)
)
resultContext.append("您提供的关键字查询名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下一级行政区域
elif subtraction == 1:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level)
)
resultContext.append("您查询的关键字的下一级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下二级行政区域
elif subtraction == 2:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level)
)
resultContext.append("您查询的关键字的下二级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
# 下三级行政区域
elif subtraction == 3:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,
subtraction,
name,
level
)
)
resultContext.append("您查询的关键字的下三级行政区名为“{0}”的行政区级别为“{1}”".format(name, level))
else:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - Query Failed'.format(function_name)
)
resultContext.append("查询错误")
# 条件成立,继续搜索下一级行政区
sub_districts = self.district['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - search sub districts'.format(function_name)
)
# 行政区域结果数目
len_sub_districts = len(self.district['districts'])
if len_sub_districts > 0:
resultContext.append("该行政区域包括{0}个结果".format(len_sub_districts))
if sub_districts and self.sub_district_value >= 0:
for sub_district in sub_districts:
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - {1}'.format(function_name,
self.print_subdistrict.__name__)
)
context = self.print_subdistrict(sub_district, self.sub_district_value - 1, self.district_level,
self.global_sub_district_value)
resultContext.extend(context)
return resultContext
def get_sub_administrative_district(self, json_decode
) -> list:
"""
函数:解析行政区域下一级数据。
Args:
json_decode:get_administrative_district()方法从网络中获取的数据
"""
# TODO:未来版本将返回数据从list升级为dict
self.json_decode = json_decode
# 输出结果
resultContext = []
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
try:
if self.json_decode['status'] == '0':
# 官方文档异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise OfficialException
elif self.json_decode['status'] == '2':
# 自定义异常
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
raise CustomExpection
elif self.json_decode['status'] == '1':
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - status:{1}'.format(function_name,
self.json_decode['status'])
)
if self.json_decode['infocode'] == "10000": # 请求数据成功的状态码
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - infocode:{1}'.format(function_name,
self.json_decode[
'infocode'])
)
# 请求结果
keywords_count = self.json_decode['count']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - keywords count:{1}'.format(function_name,
keywords_count)
)
# 行政区域数目
districts = self.json_decode['districts']
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - districts acquired successfully'.format(
function_name)
)
# 输出行政区信息
if districts: # 里面的信息不为空
for district in districts:
# 下一级行政区域列表
sub_districts = district['districts']
sub_districts.sort(key=lambda x: x['adcode'])
for subdistrict in sub_districts:
resultContext.append(subdistrict['name'])
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=6,
context='Function name:{0} - print district successful run.'.format(function_name)
)
return resultContext
except OfficialException as officialException:
# 获得的错误信息
errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errcode:{1}'.format(function_name,
errcode)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - errorInfo:{1}'.format(function_name,
errorInfo)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - solution:{1}'.format(function_name,
solution)
)
resultContext.append(errorInfo)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
except CustomExpection as customException:
info, detail_information, error_prompt = customException.get_error_info(self.json_decode)
# 打印到日志文件中
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - info:{1}'.format(function_name,
info)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='Function name:{0} - detail_information:{1}'.format(function_name,
detail_information)
)
writeLog.write_to_log(file_name=log_filename,
log_level=3,
context='error_prompt:{0}'.format(error_prompt)
)
context = "行政区域信息查询失败,换个词进行搜索吧"
resultContext.append(context)
return resultContext
| 739 | 0 | 29 |
a6172a279a003d0b8205d4cd3bbc4af0f3ab9a0e | 1,089 | py | Python | sysdescrparser/cisco_ios.py | datadope-io/sysdescrparser | c57d595707fec902f7a5bbf2a52089f6448610c4 | [
"MIT"
] | 12 | 2015-05-12T09:07:04.000Z | 2021-11-19T13:22:54.000Z | sysdescrparser/cisco_ios.py | datadope-io/sysdescrparser | c57d595707fec902f7a5bbf2a52089f6448610c4 | [
"MIT"
] | 3 | 2020-05-10T09:59:08.000Z | 2021-02-22T17:48:26.000Z | sysdescrparser/cisco_ios.py | datadope-io/sysdescrparser | c57d595707fec902f7a5bbf2a52089f6448610c4 | [
"MIT"
] | 6 | 2018-03-04T21:51:15.000Z | 2022-02-21T16:26:54.000Z | # -*- coding: utf-8 -*-
"""sysdescrparser.cisco_ios."""
import re
from cisco import Cisco
# pylint: disable=no-member
class CiscoIOS(Cisco):
"""Class CiscoIOS.
SNMP sysDescr for CiscoIOS.
"""
def __init__(self, raw):
"""Constructor."""
super(CiscoIOS, self).__init__(raw)
self.os = 'IOS'
self.model = self.UNKNOWN
self.version = self.UNKNOWN
def parse(self):
"""Parse."""
regex = (r'Cisco Internetwork Operating System Software ..IOS'
r' .* Software \((.*)\), Version (.*), .*RELEASE')
pat = re.compile(regex)
res = pat.search(self.raw)
if res:
self.model = res.group(1)
self.version = res.group(2)
return self
regex = (r'Cisco IOS Software,'
r'.* Software \((.*)\), Version (.*), .*RELEASE')
pat = re.compile(regex)
res = pat.search(self.raw)
if res:
self.model = res.group(1)
self.version = res.group(2)
return self
return False
| 23.170213 | 70 | 0.520661 | # -*- coding: utf-8 -*-
"""sysdescrparser.cisco_ios."""
import re
from cisco import Cisco
# pylint: disable=no-member
class CiscoIOS(Cisco):
"""Class CiscoIOS.
SNMP sysDescr for CiscoIOS.
"""
def __init__(self, raw):
"""Constructor."""
super(CiscoIOS, self).__init__(raw)
self.os = 'IOS'
self.model = self.UNKNOWN
self.version = self.UNKNOWN
def parse(self):
"""Parse."""
regex = (r'Cisco Internetwork Operating System Software ..IOS'
r' .* Software \((.*)\), Version (.*), .*RELEASE')
pat = re.compile(regex)
res = pat.search(self.raw)
if res:
self.model = res.group(1)
self.version = res.group(2)
return self
regex = (r'Cisco IOS Software,'
r'.* Software \((.*)\), Version (.*), .*RELEASE')
pat = re.compile(regex)
res = pat.search(self.raw)
if res:
self.model = res.group(1)
self.version = res.group(2)
return self
return False
| 0 | 0 | 0 |
542263394693276337544bc6c2cf5359c6011319 | 1,736 | py | Python | auto_generated_scripts/discrete_prob_dist_plot.py | patel-zeel/pyprobml-fork | 4b1757c41223c36a8e7bcef729a220a23b7c5217 | [
"MIT"
] | null | null | null | auto_generated_scripts/discrete_prob_dist_plot.py | patel-zeel/pyprobml-fork | 4b1757c41223c36a8e7bcef729a220a23b7c5217 | [
"MIT"
] | 5 | 2022-03-23T12:03:49.000Z | 2022-03-24T12:18:17.000Z | auto_generated_scripts/discrete_prob_dist_plot.py | patel-zeel/pyprobml-fork | 4b1757c41223c36a8e7bcef729a220a23b7c5217 | [
"MIT"
] | 1 | 2022-03-23T06:02:01.000Z | 2022-03-23T06:02:01.000Z | #!/usr/bin/env python
# coding: utf-8
# # Discrete Probability Distribution Plot
# In[1]:
import os
try:
import jax
except:
get_ipython().run_line_magic('pip', 'install jax jaxlib')
import jax
import jax.numpy as jnp
try:
import matplotlib.pyplot as plt
except:
get_ipython().run_line_magic('pip', 'install matplotlib')
import matplotlib.pyplot as plt
try:
import seaborn as sns
except:
get_ipython().run_line_magic('pip', 'install seaborn')
import seaborn as sns
# In[2]:
dev_mode = "DEV_MODE" in os.environ
if dev_mode:
import sys
sys.path.append("scripts")
import pyprobml_utils as pml
from latexify import latexify
latexify(width_scale_factor=2, fig_height=1.5)
# In[3]:
# Bar graphs showing a uniform discrete distribution and another with full prob on one value.
x = jnp.arange(1,5)
uniform_probs = jnp.repeat(1.0 / len(x), len(x))
make_graph(uniform_probs, "uniform_histogram_latexified.pdf")
make_graph([1, 0, 0, 0], "delta_histogram_latexified.pdf");
# ## Demo
#
# You can see different examples of discrete distributions by changing the seed in the following demo.
# In[4]:
from ipywidgets import interact
@interact(seed=(0, 10))
| 19.505618 | 102 | 0.678571 | #!/usr/bin/env python
# coding: utf-8
# # Discrete Probability Distribution Plot
# In[1]:
import os
try:
import jax
except:
get_ipython().run_line_magic('pip', 'install jax jaxlib')
import jax
import jax.numpy as jnp
try:
import matplotlib.pyplot as plt
except:
get_ipython().run_line_magic('pip', 'install matplotlib')
import matplotlib.pyplot as plt
try:
import seaborn as sns
except:
get_ipython().run_line_magic('pip', 'install seaborn')
import seaborn as sns
# In[2]:
dev_mode = "DEV_MODE" in os.environ
if dev_mode:
import sys
sys.path.append("scripts")
import pyprobml_utils as pml
from latexify import latexify
latexify(width_scale_factor=2, fig_height=1.5)
# In[3]:
# Bar graphs showing a uniform discrete distribution and another with full prob on one value.
x = jnp.arange(1,5)
uniform_probs = jnp.repeat(1.0 / len(x), len(x))
def make_graph(probs, save_name):
plt.figure()
plt.bar(x, probs, align="center")
plt.xlim([min(x) - 0.5, max(x) + 0.5])
plt.xticks(x)
plt.yticks(jnp.linspace(0, 1, 5))
plt.xlabel("$x$")
plt.ylabel("$Pr(X=x)$")
sns.despine()
if dev_mode and len(save_name) > 0:
pml.savefig(save_name)
make_graph(uniform_probs, "uniform_histogram_latexified.pdf")
make_graph([1, 0, 0, 0], "delta_histogram_latexified.pdf");
# ## Demo
#
# You can see different examples of discrete distributions by changing the seed in the following demo.
# In[4]:
from ipywidgets import interact
@interact(seed=(0, 10))
def generate_random(seed):
key = jax.random.PRNGKey(seed)
probs = jax.random.uniform(key, shape=(4,))
probs = probs / jnp.sum(probs)
make_graph(probs, save_name="")
| 464 | 0 | 45 |
ca647cb6e242d99f50c820962c61ad7177ab3ebb | 3,241 | py | Python | bin/pwd_.py | kis87988/pybashish | c75587dc16a412e0cfaf9586da20252225ff5a08 | [
"CNRI-Python"
] | null | null | null | bin/pwd_.py | kis87988/pybashish | c75587dc16a412e0cfaf9586da20252225ff5a08 | [
"CNRI-Python"
] | null | null | null | bin/pwd_.py | kis87988/pybashish | c75587dc16a412e0cfaf9586da20252225ff5a08 | [
"CNRI-Python"
] | null | null | null | #!/usr/bin/env python3
"""
usage: pwd_.py [-h] [-P] [--brief] [--home]
show the os.environ["PWD"], by default just its "os.path.abspath"
optional arguments:
-h, --help show this help message and exit
-P, --physical show the "realpath"s, not "abspath"s, of sym links
--brief show the briefest abspath/ homepath/ realpath
--home show the ~/... relpath in place of abspath or realpath
quirks:
defaults to "--home", in the spirit of Bash "dirs +0" and Zsh "dirs -p", unlike their "pwd"s
offers "--brief" and "--home", unlike Bash anywhere
offers "--physical" like Linux, not just "-P" like Mac
doesn't offer the explicit "--logical" of Linux, nor the "-L" of Mac and Linux
examples:
pwd
pwd -P
pwd_.py --brief
pwd_.py --home
"""
# FIXME: add "--verbose" a la "hostname"
# FIXME: somehow remember we don't want to abbreviate down to colliding "-" the unconventional "--"
from __future__ import print_function
import os
import sys
import argdoc
#
# Git-track some Python idioms here
#
# deffed in many files # missing from docs.python.org
def os_path_homepath(path):
"""Return the ~/... relpath of a file or dir inside the Home, else the realpath"""
home = os.path.realpath(os.environ["HOME"])
homepath = path
if path == home:
homepath = "~"
elif path.startswith(home + os.path.sep):
homepath = "~" + os.path.sep + os.path.relpath(path, start=home)
return homepath
# deffed in many files # missing from docs.python.org
def min_path_formatter_not_relpath(exemplar):
"""Choose the def that abbreviates this path most sharply: abs, real, rel, or home"""
formatters = (
os.path.abspath,
os.path.realpath,
# os.path.relpath,
os_path_homepath,
)
formatter = formatters[0]
for formatter_ in formatters[1:]:
if len(formatter_(exemplar)) < len(formatter(exemplar)):
formatter = formatter_
return formatter
# deffed in many files # missing from docs.python.org
if __name__ == "__main__":
main(sys.argv)
# copied from: git clone https://github.com/pelavarre/pybashish.git
| 26.785124 | 99 | 0.653502 | #!/usr/bin/env python3
"""
usage: pwd_.py [-h] [-P] [--brief] [--home]
show the os.environ["PWD"], by default just its "os.path.abspath"
optional arguments:
-h, --help show this help message and exit
-P, --physical show the "realpath"s, not "abspath"s, of sym links
--brief show the briefest abspath/ homepath/ realpath
--home show the ~/... relpath in place of abspath or realpath
quirks:
defaults to "--home", in the spirit of Bash "dirs +0" and Zsh "dirs -p", unlike their "pwd"s
offers "--brief" and "--home", unlike Bash anywhere
offers "--physical" like Linux, not just "-P" like Mac
doesn't offer the explicit "--logical" of Linux, nor the "-L" of Mac and Linux
examples:
pwd
pwd -P
pwd_.py --brief
pwd_.py --home
"""
# FIXME: add "--verbose" a la "hostname"
# FIXME: somehow remember we don't want to abbreviate down to colliding "-" the unconventional "--"
from __future__ import print_function
import os
import sys
import argdoc
def main(argv):
pwd_argv_tail = argv[1:] if argv[1:] else ["--home"] # FIXME: more robust default
args = argdoc.parse_args(pwd_argv_tail)
pwd = os.environ["PWD"]
abspath = os.path.abspath(pwd)
realpath = os.path.realpath(pwd)
try:
gotcwd = os.getcwd()
except FileNotFoundError as exc:
print(pwd)
stderr_print("pwd.py: error: {}: {}".format(type(exc).__name__, exc))
sys.exit(1) # FIXME: more robust "pwd" vs the current working dir deleted
assert gotcwd == realpath
path = realpath if args.physical else abspath # FIXME: count -L -P contradictions
formatter = min_path_formatter_not_relpath(path)
briefpath = formatter(path)
homepath = os_path_homepath(path)
printable = path
if args.home:
printable = homepath
elif args.brief: # FIXME: count -H -B contradictions
printable = briefpath
print(printable)
#
# Git-track some Python idioms here
#
# deffed in many files # missing from docs.python.org
def os_path_homepath(path):
"""Return the ~/... relpath of a file or dir inside the Home, else the realpath"""
home = os.path.realpath(os.environ["HOME"])
homepath = path
if path == home:
homepath = "~"
elif path.startswith(home + os.path.sep):
homepath = "~" + os.path.sep + os.path.relpath(path, start=home)
return homepath
# deffed in many files # missing from docs.python.org
def min_path_formatter_not_relpath(exemplar):
"""Choose the def that abbreviates this path most sharply: abs, real, rel, or home"""
formatters = (
os.path.abspath,
os.path.realpath,
# os.path.relpath,
os_path_homepath,
)
formatter = formatters[0]
for formatter_ in formatters[1:]:
if len(formatter_(exemplar)) < len(formatter(exemplar)):
formatter = formatter_
return formatter
# deffed in many files # missing from docs.python.org
def stderr_print(*args, **kwargs):
sys.stdout.flush()
print(*args, **kwargs, file=sys.stderr)
sys.stderr.flush() # esp. when kwargs["end"] != "\n"
if __name__ == "__main__":
main(sys.argv)
# copied from: git clone https://github.com/pelavarre/pybashish.git
| 1,042 | 0 | 45 |
6c0bf1ebef607bf5bf28dbf570a0813f5cc4cdcb | 279 | py | Python | snippets/popular_libraries/click-lib/intermediate-features/app_dirs.py | melvio/python3-examples | 5340fe17e0a5001a81cf195e63f825b77dc16fca | [
"Apache-2.0"
] | null | null | null | snippets/popular_libraries/click-lib/intermediate-features/app_dirs.py | melvio/python3-examples | 5340fe17e0a5001a81cf195e63f825b77dc16fca | [
"Apache-2.0"
] | null | null | null | snippets/popular_libraries/click-lib/intermediate-features/app_dirs.py | melvio/python3-examples | 5340fe17e0a5001a81cf195e63f825b77dc16fca | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import click
app_name = "myappname"
config_file_name = "myconf.ini"
app_dir_name = click.get_app_dir(app_name)
click.echo(app_dir_name)
# ./app_dirs.py
# /home/melv/.config/myappname
# XDG_CONFIG_HOME=/home/melv/ ./app_dirs.py
# /home/melv/myappname
| 16.411765 | 43 | 0.749104 | #!/usr/bin/env python3
import click
app_name = "myappname"
config_file_name = "myconf.ini"
app_dir_name = click.get_app_dir(app_name)
click.echo(app_dir_name)
# ./app_dirs.py
# /home/melv/.config/myappname
# XDG_CONFIG_HOME=/home/melv/ ./app_dirs.py
# /home/melv/myappname
| 0 | 0 | 0 |
9b7929b6eca5ceecd90a6ca47f628f3cb1a32ca7 | 249 | py | Python | agpy/test_doc.py | keflavich/agpy | fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1 | [
"MIT"
] | 16 | 2015-05-08T11:14:26.000Z | 2021-11-19T19:05:16.000Z | agpy/test_doc.py | keflavich/agpy | fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1 | [
"MIT"
] | 3 | 2016-05-12T16:27:14.000Z | 2020-12-27T01:14:24.000Z | agpy/test_doc.py | keflavich/agpy | fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1 | [
"MIT"
] | 19 | 2015-03-30T22:34:14.000Z | 2020-11-25T23:29:53.000Z | from pylab import *
import pylab
import agpy.test_doc
print "beta.__module__:",beta.__module__
for k,v in pylab.__dict__.iteritems():
if hasattr(v,'__module__'):
if v.__module__ is None:
locals()[k].__module__ = 'pylab'
| 20.75 | 44 | 0.670683 | from pylab import *
import pylab
import agpy.test_doc
print "beta.__module__:",beta.__module__
for k,v in pylab.__dict__.iteritems():
if hasattr(v,'__module__'):
if v.__module__ is None:
locals()[k].__module__ = 'pylab'
| 0 | 0 | 0 |
84f0b9b86abb65e10f3d5faa2d8140d0f4d0412a | 2,321 | py | Python | fix_voc_label.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | 1 | 2020-11-16T17:11:43.000Z | 2020-11-16T17:11:43.000Z | fix_voc_label.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | null | null | null | fix_voc_label.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import os
from os import listdir, getcwd
from os.path import join
import argparse
# classes = [
# "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
# "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
# "pottedplant", "sheep", "sofa", "train", "tvmonitor"
# ]
classes = []
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_dir",
help="Directory for VOC annotation xml files")
parser.add_argument("--src_label",
help="Label need to be fixed")
parser.add_argument("--dst_label",
help="New label name")
args = parser.parse_args()
anno_files = listdir(args.anno_dir)
for anno in anno_files:
res = convert_annotation(anno, args)
| 26.678161 | 76 | 0.557087 | import xml.etree.ElementTree as ET
import os
from os import listdir, getcwd
from os.path import join
import argparse
# classes = [
# "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
# "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
# "pottedplant", "sheep", "sofa", "train", "tvmonitor"
# ]
classes = []
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[1]) / 2.0 - 1
y = (box[2] + box[3]) / 2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def parse_cls(anno_files, args):
for anno in anno_files:
if not '.xml' in anno:
continue
anno_file = join(args.anno_dir, anno)
if not os.path.isfile(anno_file):
continue
in_file = open(anno_file)
tree = ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
cls = obj.find('name').text
if cls not in classes:
classes.append(cls)
def convert_annotation(image, args):
if args.anno_dir:
anno_file = join(args.anno_dir, image.split('.')[0]) + '.xml'
if not os.path.isfile(anno_file):
return False
in_file = open(anno_file)
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
have_obj = False
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name')
if cls.text == args.src_label:
print('Fixed '+cls.text)
cls.text = args.dst_label
have_obj = True
tree.write(anno_file)
return have_obj
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_dir",
help="Directory for VOC annotation xml files")
parser.add_argument("--src_label",
help="Label need to be fixed")
parser.add_argument("--dst_label",
help="New label name")
args = parser.parse_args()
anno_files = listdir(args.anno_dir)
for anno in anno_files:
res = convert_annotation(anno, args)
| 1,386 | 0 | 69 |
4871c7891c9c79ec3d3449ce676340ebbba4c089 | 2,716 | py | Python | WearChristmasHat/wear_hat_cv.py | TitusWongCN/WeChatSubscriptionArticles | 7076ec764269a6d5df2075e860940ac39128a052 | [
"MIT"
] | 9 | 2020-02-26T14:06:55.000Z | 2022-01-13T13:59:58.000Z | WearChristmasHat/wear_hat_cv.py | Tomcqueen/WeChatSubscriptionArticles | b2b45f3d0a204209b30944cf28945b794515c434 | [
"MIT"
] | null | null | null | WearChristmasHat/wear_hat_cv.py | Tomcqueen/WeChatSubscriptionArticles | b2b45f3d0a204209b30944cf28945b794515c434 | [
"MIT"
] | 6 | 2020-02-26T14:09:19.000Z | 2022-01-13T14:00:00.000Z | # -*- coding=utf-8 -*-
import cv2
from PIL import Image
import math
image_path = './face1.png'
human_img = Image.open(image_path)
human_img = human_img.convert('RGBA')
# 圣诞帽相关参数
hat_img = Image.open("./hat.png")
hat_brim_length = 175.0
hat_height_buffer = 25.0
hat_img = hat_img.convert('RGBA')
# load image:
image = cv2.imread(image_path, 0)
# find faces:
cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
faces = cascade.detectMultiScale(image, 1.3, 5)
# create landmark detector and load lbf model:
facemark = cv2.face.createFacemarkLBF()
facemark.loadModel('lbfmodel.yaml')
# run landmark detector:
# landmarks---[0, 16]----Jaw line
# landmarks---[17, 21]---Left eyebrow
# landmarks---[22, 26]---Right eyebrow
# landmarks---[27, 30]---Nose bridge
# landmarks---[30, 35]---Lower nose
# landmarks---[36, 41]---Left eye
# landmarks---[42, 47]---Right Eye
# landmarks---[48, 59]---Outer lip
# landmarks---[60, 67]---Inner lip
ok, landmarks = facemark.fit(image, faces)
print(ok)
chin = landmarks[0][0][:17]
nose_bridge = landmarks[0][0][27:31]
face_width = get_distance(chin[0], chin[-1])
hair_brim = get_distance(nose_bridge[-1], chin[int(len(chin)/2)])
resize_ratio = face_width / hat_brim_length
hat_width = int(hat_img.width * resize_ratio)
hat_height = int(hat_img.height * resize_ratio)
hat_buffer = int(hat_height_buffer * resize_ratio)
hat_img = hat_img.resize((hat_width, hat_height)) # convert size of hat
hat_bottom = int(nose_bridge[0][1]) - hair_brim
hat_top = hat_bottom - hat_height
hat_left = int(chin[0][0])
hat_right = hat_left + hat_width
# hat_img = hat_img.rotate(45)
hat_region = hat_img
human_region = (hat_left, hat_top + hat_buffer, hat_right, hat_bottom + hat_buffer)
human_img.paste(hat_region, human_region, mask=hat_img)
# human_img.show()
# print('hat done')
# 口罩相关参数
mask_img = Image.open("./mask.png")
mask_height = 330.0
mask_img = mask_img.convert('RGBA')
mask_actual_height = get_distance(nose_bridge[0], chin[int(len(chin)/2)])
mask_resize_ratio = mask_actual_height / mask_height
mask_width = int(mask_img.width * mask_resize_ratio)
mask_height = int(mask_img.height * mask_resize_ratio)
mask_top = int(nose_bridge[0][1])
mask_bottom = mask_top + mask_height
mask_left = int((nose_bridge[0][0] + chin[int(len(chin)/2)][0] - mask_width)/2)
mask_right = mask_left + mask_width
mask_img = mask_img.resize((mask_width, mask_height)) # convert size of mask
mask_region = mask_img
human_region = (mask_left, mask_top, mask_right, mask_bottom)
human_img.paste(mask_region, human_region, mask=mask_img)
human_img.show()
print('Done')
| 30.863636 | 83 | 0.732327 | # -*- coding=utf-8 -*-
import cv2
from PIL import Image
import math
def get_distance(point1, point2):
return int(math.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2))
image_path = './face1.png'
human_img = Image.open(image_path)
human_img = human_img.convert('RGBA')
# 圣诞帽相关参数
hat_img = Image.open("./hat.png")
hat_brim_length = 175.0
hat_height_buffer = 25.0
hat_img = hat_img.convert('RGBA')
# load image:
image = cv2.imread(image_path, 0)
# find faces:
cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
faces = cascade.detectMultiScale(image, 1.3, 5)
# create landmark detector and load lbf model:
facemark = cv2.face.createFacemarkLBF()
facemark.loadModel('lbfmodel.yaml')
# run landmark detector:
# landmarks---[0, 16]----Jaw line
# landmarks---[17, 21]---Left eyebrow
# landmarks---[22, 26]---Right eyebrow
# landmarks---[27, 30]---Nose bridge
# landmarks---[30, 35]---Lower nose
# landmarks---[36, 41]---Left eye
# landmarks---[42, 47]---Right Eye
# landmarks---[48, 59]---Outer lip
# landmarks---[60, 67]---Inner lip
ok, landmarks = facemark.fit(image, faces)
print(ok)
chin = landmarks[0][0][:17]
nose_bridge = landmarks[0][0][27:31]
face_width = get_distance(chin[0], chin[-1])
hair_brim = get_distance(nose_bridge[-1], chin[int(len(chin)/2)])
resize_ratio = face_width / hat_brim_length
hat_width = int(hat_img.width * resize_ratio)
hat_height = int(hat_img.height * resize_ratio)
hat_buffer = int(hat_height_buffer * resize_ratio)
hat_img = hat_img.resize((hat_width, hat_height)) # convert size of hat
hat_bottom = int(nose_bridge[0][1]) - hair_brim
hat_top = hat_bottom - hat_height
hat_left = int(chin[0][0])
hat_right = hat_left + hat_width
# hat_img = hat_img.rotate(45)
hat_region = hat_img
human_region = (hat_left, hat_top + hat_buffer, hat_right, hat_bottom + hat_buffer)
human_img.paste(hat_region, human_region, mask=hat_img)
# human_img.show()
# print('hat done')
# 口罩相关参数
mask_img = Image.open("./mask.png")
mask_height = 330.0
mask_img = mask_img.convert('RGBA')
mask_actual_height = get_distance(nose_bridge[0], chin[int(len(chin)/2)])
mask_resize_ratio = mask_actual_height / mask_height
mask_width = int(mask_img.width * mask_resize_ratio)
mask_height = int(mask_img.height * mask_resize_ratio)
mask_top = int(nose_bridge[0][1])
mask_bottom = mask_top + mask_height
mask_left = int((nose_bridge[0][0] + chin[int(len(chin)/2)][0] - mask_width)/2)
mask_right = mask_left + mask_width
mask_img = mask_img.resize((mask_width, mask_height)) # convert size of mask
mask_region = mask_img
human_region = (mask_left, mask_top, mask_right, mask_bottom)
human_img.paste(mask_region, human_region, mask=mask_img)
human_img.show()
print('Done')
| 95 | 0 | 23 |
3cb54cc59f97c8f3f99e4a6438331cafd5f35199 | 5,464 | py | Python | python/itypes/struct/data_struct.py | eddy-ilg/itypes | eaf1c4a86576c77caa34148c0fdc6b2e012119ff | [
"MIT"
] | null | null | null | python/itypes/struct/data_struct.py | eddy-ilg/itypes | eaf1c4a86576c77caa34148c0fdc6b2e012119ff | [
"MIT"
] | null | null | null | python/itypes/struct/data_struct.py | eddy-ilg/itypes | eaf1c4a86576c77caa34148c0fdc6b2e012119ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from ..type import is_list, is_dict
from .struct import Struct
from .helper import _common_keys
| 33.115152 | 119 | 0.517387 | #!/usr/bin/env python3
from ..type import is_list, is_dict
from .struct import Struct
from .helper import _common_keys
class DataStruct(Struct):
def __init__(self, *args, dims=None, **kwargs):
super().__init__(*args, **kwargs)
if dims is not None:
self.dims = dims
def clone_type(self):
return DataStruct(dims=self.dims)
def translate_data(self, func, *args, **kwargs):
def _translate(x, *args, **kwargs):
if self._is_data(x):
x = func(x, *args, **kwargs)
return x
return self.translate(_translate, *args, **kwargs)
def apply_data(self, func, *args, **kwargs):
def _apply(x):
if self._is_data(x):
func(x, *args, **kwargs)
self.apply(_apply, *args, **kwargs)
def nan_to_num(self):
def _convert(x):
return self._data_nan_to_num(x)
return self.translate_data(_convert)
def to_hwc(self):
old_dims = self.dims
def _to_hwc(x, dims):
if len(x.shape) == 2:
x = self._data_expand_dims(x)
if len(x.shape) == 4:
if x.shape[0] != 1:
raise Exception("to_hwc() error: batch dimension is greater than 1")
x = x[0, ...]
if len(x.shape) == 3 and "hwc" not in old_dims:
x = self._data_permute_dims(x, (1, 2, 0))
return x
return self.translate_data(_to_hwc, dims="hwc")
def to_chw(self):
old_dims = self.dims
def _to_chw(x, dims):
if len(x.shape) == 2:
x = self._data_expand_dims(x)
if len(x.shape) == 4:
if x.shape[0] != 1:
raise Exception("to_chw() error: batch dimension is greater than 1")
x = x[0, ...]
if len(x.shape) == 3 and "chw" not in old_dims:
x = self._data_permute_dims(x, (2, 0, 1))
return x
return self.translate_data(_to_chw, dims="chw")
def to_bhwc(self):
old_dims = self.dims
def _to_bhwc(x, dims):
if len(x.shape) == 2:
x = self._data_expand_dims(x)
if len(x.shape) == 3:
x = self._data_expand_dims(x)
if len(x.shape) == 4 and "hwc" not in old_dims:
x = self._data_permute_dims(x, (0, 2, 3, 1))
return x
return self.translate_data(_to_bhwc, dims="bhwc")
def to_bchw(self):
old_dims = self.dims
def _to_bchw(x, dims):
if len(x.shape) == 2:
x = self._data_expand_dims(x)
if len(x.shape) == 3:
x = self._data_expand_dims(x)
if len(x.shape) == 4 and "chw" not in old_dims:
x = self._data_permute_dims(x, (0, 3, 1, 2))
return x
return self.translate_data(_to_bchw, dims="bchw")
def concat_batch(self, inputs):
flat_keys = _common_keys(inputs)
result = inputs[0].create_empty()
for key in flat_keys:
members = []
for input in inputs:
members.append(input[key])
if str(key.last()) == 'dims':
dims = members[0]
for member in members:
if member != dims:
raise Exception("concat_batch() tensor dims don't agree")
result.dims = dims
elif self._is_data(members[0]):
result[key] = self._concat_data(members)
else:
result[key] = members
return result
def split_batch(self):
# Determine batch size
batch_size = None
def _test_bs(x):
nonlocal batch_size
new_batch_size = x.shape[0]
if batch_size is not None and batch_size != new_batch_size:
raise Exception(f"split_batch() found inconsistent batch dimensions {batch_size} and {new_batch_size}")
batch_size = new_batch_size
self.apply_data(_test_bs)
if batch_size is None:
raise Exception('split_batch() could not find any batch dimension')
# Extract structures for each batch entry
result = []
for idx in range(0, batch_size):
def _extract_batch(x):
nonlocal idx
if self._is_data(x):
if len(x.shape) == 4:
exp = self._data_expand_dims(x[idx, ...])
return exp
elif is_list(x):
return x[idx]
else:
return x
def _translate(x):
if is_dict(x):
if hasattr(x, 'clone_type'): y = x.clone_type()
else: y = type(x)()
for key, value in x.items():
y[key] = _translate(value)
else:
y = _extract_batch(x)
return y
entry = _translate(self)
result.append(entry)
return result
def _is_data(self, x): raise NotImplementedError
def _data_nan_to_num(self, x): raise NotImplementedError
def _concat_data(self, x): raise NotImplementedError
def _data_expand_dims(self, x): raise NotImplementedError
def _data_permute_dims(self, x, dims): return NotImplementedError
| 4,889 | 4 | 450 |
48a0794b51a6c5c4453618ae132fe4019225282a | 2,114 | py | Python | Python/getdata.py | SanjaykrishnaanH/Beacon_RD | c2c6084499520b2d7b545562e87f05ab1ad8a426 | [
"Apache-2.0"
] | null | null | null | Python/getdata.py | SanjaykrishnaanH/Beacon_RD | c2c6084499520b2d7b545562e87f05ab1ad8a426 | [
"Apache-2.0"
] | null | null | null | Python/getdata.py | SanjaykrishnaanH/Beacon_RD | c2c6084499520b2d7b545562e87f05ab1ad8a426 | [
"Apache-2.0"
] | null | null | null | import firebase_admin
import csv
import time
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate('beacon-dddae-firebase-adminsdk-2qj4p-4ab2ce47dd.json')
firebase_admin.initialize_app(cred)
flag=0
# print(u'{} => {}'.format(doc.id, doc.to_dict()))
while(flag==0):
db = firestore.client()
docs = db.collection(u'data').get()
varlist=["Humid","MQ7","MQ2"]
with open('data3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="Humid"):
writer.writerow([value, values])
csv_file.close()
with open('data2.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="MQ2"):
writer.writerow([value, values])
csv_file.close()
with open('data1.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="MQ7"):
writer.writerow([value, values])
csv_file.close()
print("Waiting for 5 minutes")
time.sleep(300) | 36.448276 | 98 | 0.502838 | import firebase_admin
import csv
import time
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate('beacon-dddae-firebase-adminsdk-2qj4p-4ab2ce47dd.json')
firebase_admin.initialize_app(cred)
flag=0
# print(u'{} => {}'.format(doc.id, doc.to_dict()))
while(flag==0):
db = firestore.client()
docs = db.collection(u'data').get()
varlist=["Humid","MQ7","MQ2"]
with open('data3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="Humid"):
writer.writerow([value, values])
csv_file.close()
with open('data2.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="MQ2"):
writer.writerow([value, values])
csv_file.close()
with open('data1.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["experience","salary"])
for doc in docs:
docid=doc.id
data=(doc.to_dict())
for key,value in data.items():
# print(key)
if(key=="Temp"):
for i in range(3):
for keys,values in data.items():
if(keys=="MQ7"):
writer.writerow([value, values])
csv_file.close()
print("Waiting for 5 minutes")
time.sleep(300) | 0 | 0 | 0 |
00cbc558f577adcc477d1fa77c2ebd1010c4b13d | 34,744 | py | Python | JJ_measurements/GV_Vbias.py | QNLSydney/JJ_measurements | cad43a2dc1231ed856ede39bedb726232c772b0f | [
"MIT"
] | null | null | null | JJ_measurements/GV_Vbias.py | QNLSydney/JJ_measurements | cad43a2dc1231ed856ede39bedb726232c772b0f | [
"MIT"
] | null | null | null | JJ_measurements/GV_Vbias.py | QNLSydney/JJ_measurements | cad43a2dc1231ed856ede39bedb726232c772b0f | [
"MIT"
] | null | null | null | # GV Vbias
# Alexis Jouan 13/10/2019
# Reading AC current and AC voltage with two lockins
import numpy as np
import time
from qcodes.dataset.measurements import Measurement
from qcodes.dataset.plotting import plot_by_id
from datetime import datetime
import qcodes_measurements as qcm
from qcodes_measurements.tools.measure import _run_functions, _get_window
| 34.129666 | 113 | 0.622093 | # GV Vbias
# Alexis Jouan 13/10/2019
# Reading AC current and AC voltage with two lockins
import numpy as np
import time
from qcodes.dataset.measurements import Measurement
from qcodes.dataset.plotting import plot_by_id
from datetime import datetime
import qcodes_measurements as qcm
from qcodes_measurements.tools.measure import _run_functions, _get_window
def GV_yoko_up(station, voltages, amplitude, stanford_gain_V_ac, stanford_gain_I_ac):
R_I = 1e4 #value of the resistor used to measure the current
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print(dt_string)
print(f'Stanford Gain V_AC ={stanford_gain_V_ac}')
print(f'V_max = {voltages[-1]} V')
time_constant = station.lockin_2.time_constant()
print(f'Integration time lockins {time_constant} s')
station.yoko.output('off')
station.yoko.source_mode("VOLT")
station.yoko.output('on')
station.yoko.voltage.step = 1e-3
station.yoko.voltage.inter_delay = 10e-3
station.lockin_2.amplitude(amplitude)
meas = Measurement()
meas.register_parameter(station.yoko.voltage)
meas.register_parameter(station.lockin_2.amplitude)
meas.register_parameter(station.lockin_1.Y, setpoints=(station.yoko.voltage,))
meas.register_parameter(station.lockin_2.Y, setpoints=(station.yoko.voltage,))
meas.register_parameter(station.lockin_1.X, setpoints=(station.yoko.voltage,))
meas.register_parameter(station.lockin_2.X, setpoints=(station.yoko.voltage,))
meas.register_custom_parameter("G_ac", unit="S", setpoints=(station.yoko.voltage,))
meas.register_custom_parameter("I_dc", unit="A", setpoints=(station.yoko.voltage,))
print(f'Frequency Lockin : {station.lockin_1.frequency()} Hz')
station.lockin_2.amplitude(amplitude)
print(f'V_ac polarization : {amplitude*1e3} mV')
print(f'Filter lockin 1 : {station.lockin_1.filter_slope()} dB roll off')
print(f'Sensitivity lockin 1 : {station.lockin_1.sensitivity()} V')
print(f'Filter lockin 2 : {station.lockin_2.filter_slope()} dB roll off')
print(f'Sensitivity lockin 2 : {station.lockin_2.sensitivity()} A')
v_init = voltages[0]
v_final = voltages[-1]
L = int(len(voltages)/2)
volt_sweep_init = np.linspace(0.0, v_init, L)
volt_sweep_final = np.linspace(v_final, 0.0, L)
with meas.run() as datasaver:
for v in volt_sweep_init:
station.yoko.voltage(v)
time.sleep(1)
for v in voltages:
station.yoko.voltage(v)
time.sleep(9*time_constant)
current_X_AC = station.lockin_2.X()/stanford_gain_I_ac
voltage_X_AC = station.lockin_1.X()/stanford_gain_V_ac
current_Y_AC = station.lockin_2.Y()/stanford_gain_I_ac
voltage_Y_AC = station.lockin_1.Y()/stanford_gain_V_ac
G_ac = current_X_AC/voltage_X_AC
datasaver.add_result(("G_ac",G_ac),
(station.yoko.voltage, v),
(station.lockin_1.Y,current_Y_AC),
(station.lockin_2.Y,voltage_Y_AC),
(station.lockin_1.X,current_X_AC),
(station.lockin_2.X,voltage_X_AC))
for v in volt_sweep_final:
station.yoko.voltage(v)
ID_exp = datasaver.run_id
station.yoko.voltage(0)
plot_by_id(ID_exp)
def GV_IV_up(station, voltages, amplitude, stanford_gain_V_ac, stanford_gain_I, stanford_gain_V):
R_I = 1e4 #value of the resistor used to measure the current
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print(dt_string)
print(f'Stanford Gain V_AC ={stanford_gain_V_ac}')
print(f'Stanford Gain I_DC ={stanford_gain_I}')
print(f'Stanford Gain V_DC ={stanford_gain_V}')
print(f'V_max = {voltages[-1]} V')
int_time = 1 #Integration time of the dmm's
station.dmm1.volt()
station.dmm1.NPLC(int_time)
station.dmm2.volt()
station.dmm2.NPLC(int_time)
#station.dmm2.volt()
#station.dmm2.NPLC(int_time)
print(f'Integration time DC = {int_time*0.02} s')
time_constant = station.lockin_2.time_constant()
print(f'Integration time lockins {time_constant} s')
#print(f'Stanford Gain V ={stanford_gain_V}') TO DO
#print(f'Stanford Gain I ={stanford_gain_I}')
print(f'Voltage Max V_max = {voltages[-1]}')
#station.yoko.output('off')
#station.yoko.source_mode("VOLT")
#station.yoko.output('on')
#station.yoko.voltage.step = 5e-3
#station.yoko.voltage.inter_delay = 10e-3
#
station.lockin_2.amplitude(amplitude)
meas = Measurement()
meas.register_parameter(station.lockin_2.sine_outdc)
meas.register_parameter(station.lockin_2.amplitude)
meas.register_parameter(station.dmm1.volt)
meas.register_parameter(station.dmm2.volt, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_1.Y, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_2.Y, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_1.X, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_2.X, setpoints=(station.dmm1.volt,))
meas.register_custom_parameter("G_ac", unit="S", setpoints=(station.dmm1.volt,))
meas.register_custom_parameter("I_dc", unit="A", setpoints=(station.dmm1.volt,))
print(f'Frequency Lockin : {station.lockin_1.frequency()} Hz')
station.lockin_2.amplitude(amplitude)
print(f'V_ac polarization : {amplitude*1e3} mV')
print(f'Filter lockin 1 : {station.lockin_1.filter_slope()} dB roll off')
print(f'Sensitivity lockin 1 : {station.lockin_1.sensitivity()} V')
print(f'Filter lockin 2 : {station.lockin_2.filter_slope()} dB roll off')
print(f'Sensitivity lockin 2 : {station.lockin_2.sensitivity()} A')
v_init = voltages[0]
v_final = voltages[-1]
L = int(len(voltages)/2)
volt_sweep_init = np.linspace(0.0, v_init, L)
volt_sweep_final = np.linspace(v_final, 0.0, L)
with meas.run() as datasaver:
for v in volt_sweep_init:
station.lockin_2.sine_outdc(v)
time.sleep(500e-3)
time.sleep(10)
for v in voltages:
station.lockin_2.sine_outdc(v)
v_dc = station.dmm1.volt()/stanford_gain_V
v_i_dc = station.dmm2.volt()/stanford_gain_I
i_dc = v_i_dc/R_I
time.sleep(9*time_constant)
current_X_AC = station.lockin_2.X()
voltage_X_AC = station.lockin_1.X()/stanford_gain_V_ac
current_Y_AC = station.lockin_2.Y()
voltage_Y_AC = station.lockin_1.Y()/stanford_gain_V_ac
G_ac = current_X_AC/voltage_X_AC
datasaver.add_result(("G_ac",G_ac),
(station.dmm1.volt, v_dc),
(station.dmm2.volt, v_i_dc),
("I_dc",i_dc),
(station.lockin_2.sine_outdc, v),
(station.lockin_1.Y,current_Y_AC),
(station.lockin_2.Y,voltage_Y_AC),
(station.lockin_1.X,current_X_AC),
(station.lockin_2.X,voltage_X_AC))
for v in volt_sweep_final:
station.lockin_2.sine_outdc(v)
time.sleep(100e-3)
ID_exp = datasaver.run_id
station.lockin_2.sine_outdc(0)
plot_by_id(ID_exp)
def GV_IV_yoko_up(station, voltages, amplitude, stanford_gain_V_ac, stanford_gain_I, stanford_gain_V):
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print(dt_string)
print(f'Stanford Gain V_AC ={stanford_gain_V_ac}')
print(f'Stanford Gain V_DC ={stanford_gain_V}')
print(f'Stanford Gain I ={stanford_gain_I}')
print(f'V_max = {voltages[-1]} V')
int_time = 1 #Integration time of the dmm's
station.dmm1.volt()
station.dmm1.NPLC(int_time)
station.dmm2.volt()
station.dmm2.NPLC(int_time)
#station.dmm2.volt()
#station.dmm2.NPLC(int_time)
print(f'Integration time DC = {int_time*0.02} s')
time_constant = station.lockin_2.time_constant()
print(f'Integration time lockins {time_constant} s')
#print(f'Stanford Gain V ={stanford_gain_V}') TO DO
#print(f'Stanford Gain I ={stanford_gain_I}')
print(f'Voltage Max V_max = {voltages[-1]}')
#station.yoko.output('off')
#station.yoko.source_mode("VOLT")
#station.yoko.output('on')
station.yoko.voltage.step = 0.1e-6
station.yoko.voltage.inter_delay = 5e-4
#
station.lockin_2.amplitude(amplitude)
meas = Measurement()
meas.register_parameter(station.yoko.voltage)
meas.register_parameter(station.lockin_2.amplitude)
meas.register_parameter(station.dmm1.volt)
meas.register_parameter(station.dmm2.volt, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_1.Y, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_2.Y, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_1.X, setpoints=(station.dmm1.volt,))
meas.register_parameter(station.lockin_2.X, setpoints=(station.dmm1.volt,))
meas.register_custom_parameter("G_ac", unit="S", setpoints=(station.dmm1.volt,))
meas.register_custom_parameter("I_dc", unit="A", setpoints=(station.dmm1.volt,))
print(f'Frequency Lockin : {station.lockin_1.frequency()} Hz')
station.lockin_2.amplitude(amplitude)
print(f'V_ac polarization : {amplitude*1e3} mV')
print(f'Filter lockin 1 : {station.lockin_1.filter_slope()} dB roll off')
print(f'Sensitivity lockin 1 : {station.lockin_1.sensitivity()} V')
print(f'Filter lockin 2 : {station.lockin_2.filter_slope()} dB roll off')
print(f'Sensitivity lockin 2 : {station.lockin_2.sensitivity()} A')
v_init = voltages[0]
v_final = voltages[-1]
L = int(len(voltages)/2)
volt_sweep_init = np.linspace(0.0, v_init, L)
volt_sweep_final = np.linspace(v_final, 0.0, L)
with meas.run() as datasaver:
for v in volt_sweep_init:
station.yoko.voltage(v)
time.sleep(100e-3)
time.sleep(1)
for v in voltages:
station.yoko.voltage(v)
v_dc = station.dmm1.volt()/stanford_gain_V
i_dc = -station.dmm2.volt()/stanford_gain_I #Using ithaco
#i_dc = v_i_dc/R_I
time.sleep(6*time_constant)
current_X_AC = station.lockin_2.X()/stanford_gain_I
voltage_X_AC = station.lockin_1.X()/stanford_gain_V_ac
current_Y_AC = station.lockin_2.Y()/stanford_gain_I
voltage_Y_AC = station.lockin_1.Y()/stanford_gain_V_ac
G_ac = current_X_AC/voltage_X_AC
datasaver.add_result(("G_ac",G_ac),
(station.dmm1.volt, v_dc),
(station.dmm2.volt, i_dc),
("I_dc",i_dc),
(station.yoko.voltage, v),
(station.lockin_1.Y,current_Y_AC),
(station.lockin_2.Y,voltage_Y_AC),
(station.lockin_1.X,current_X_AC),
(station.lockin_2.X,voltage_X_AC))
for v in volt_sweep_final:
station.yoko.voltage(v)
time.sleep(100e-3)
ID_exp = datasaver.run_id
station.yoko.voltage(0)
plot_by_id(ID_exp)
def GV_2D(station, voltages, v_gates, amplitude, stanford_gain_V_ac, stanford_gain_V, stanford_gain_I):
#Before using this code change these values according to your own setup :
R_I = 1e4 #value of the resistor used to measure the current
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S") # dd/mm/YY H:M:S
print(dt_string)
print(f'Stanford Gain V_AC ={stanford_gain_V_ac}')
print(f'Stanford Gain I_DC ={stanford_gain_I}')
print(f'Stanford Gain V_DC ={stanford_gain_V}')
print(f'Voltage Max V_max = {voltages[-1]} V')
int_time = 10 #Integration time of the dmm's
station.dmm1.volt()
station.dmm1.NPLC(int_time)
station.dmm2.volt()
station.dmm2.NPLC(int_time)
print(f'Integration time DC = {int_time*0.02} s')
time_constant = station.lockin_2.time_constant()
print(f'Integration time lockins {time_constant} s')
#station.yoko.output('off')
#station.yoko.source_mode("VOLT")
#station.yoko.output('on')
#station.yoko.voltage.step = 5e-3
#station.yoko.voltage.inter_delay = 10e-3
meas = Measurement()
meas.register_parameter(station.lockin_2.amplitude)
meas.register_parameter(station.lockin_2.sine_outdc)
meas.register_custom_parameter("V_dc_polar", unit="V")
meas.register_parameter(station.mdac_8.ch01.voltage)
meas.register_parameter(station.dmm1.volt)
meas.register_parameter(station.dmm2.volt, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_1.Y, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_2.Y, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_1.X, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_2.X, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_custom_parameter("I_dc", unit="A",setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_custom_parameter("G_ac", unit="S",setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_custom_parameter("R_dc", unit="Ohm",setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
print(f'Frequency Lockin : {station.lockin_1.frequency()} Hz')
station.lockin_2.amplitude(amplitude)
print(f'V_ac polarization : {amplitude*1e3} mV')
print(f'Filter lockin 1 : {station.lockin_1.filter_slope()} dB roll off')
print(f'Sensitivity lockin 1 : {station.lockin_1.sensitivity()} V')
print(f'Filter lockin 2 : {station.lockin_2.filter_slope()} dB roll off')
print(f'Sensitivity lockin 2 : {station.lockin_2.sensitivity()} A')
#Preparing the measurement :
v_init = voltages[0]
v_final = voltages[-1]
L = int(len(voltages)/2)
volt_sweep_init = np.linspace(0.0, v_init, L)
volt_sweep_back = np.linspace(v_final, v_init, 2*L)
M = len(voltages)
N = len(v_gates)
G_ac_plot = np.full((M,N), 0.0)
win = qcm.pyplot.PlotWindow(title="JJ dev. A")
win.resize(500,750)
voltages_live = voltages*1e6
plot1 = win.addPlot(title = "G_ac(V_dc, V_g)")
plot1.plot(setpoint_x = voltages_live, setpoint_y = v_gates)
plot1.left_axis.label = "V_g"
plot1.left_axis.units = "V"
plot1.bot_axis.label = "V_dc_polar"
plot1.bot_axis.units = "uV"
with meas.run() as datasaver:
for v in volt_sweep_init:
station.lockin_2.sine_outdc(v)
time.sleep(200e-3)
for i, v_g in enumerate(v_gates):
station.mdac_8.ch01.ramp(v_g, 0.01)
station.mdac_8.ch02.ramp(v_g, 0.01)
station.mdac_8.ch03.ramp(v_g, 0.01)
station.mdac_8.ch01.block()
station.mdac_8.ch02.block()
station.mdac_8.ch03.block()
print(f'V_g = {v_g} V')
for j, v in enumerate(voltages):
station.lockin_2.sine_outdc(v)
v_dc_polar = v
v_dc = station.dmm1.volt()/stanford_gain_V
v_i_dc = station.dmm2.volt()/stanford_gain_I
I_dc = v_i_dc/R_I
R_dc = v_dc/I_dc
time.sleep(9*time_constant)
voltage_X_AC = station.lockin_1.X()/stanford_gain_V_ac
current_X_AC = station.lockin_2.X()
voltage_Y_AC = station.lockin_1.Y()/stanford_gain_V_ac
current_Y_AC = station.lockin_2.Y()
G_ac = current_X_AC/voltage_X_AC
G_ac_plot[j, i] = G_ac
plot1.traces[0].update(G_ac_plot)
datasaver.add_result(("V_dc_polar", v_dc_polar),
(station.mdac_8.ch01.voltage, v_g),
("I_dc", I_dc),
("G_ac",G_ac),
("R_dc", R_dc),
(station.dmm1.volt, v_dc),
(station.dmm2.volt, v_i_dc),
(station.lockin_2.amplitude, amplitude),
(station.lockin_2.sine_outdc, v),
(station.lockin_2.Y,current_Y_AC),
(station.lockin_1.Y,voltage_Y_AC),
(station.lockin_2.X,current_X_AC),
(station.lockin_1.X,voltage_X_AC))
for v in volt_sweep_back:
station.lockin_2.sine_outdc(v)
time.sleep(100e-3)
time.sleep(3)
ID_exp = datasaver.run_id
station.lockin_2.sine_outdc(0)
plot_by_id(ID_exp)
win.export('figures/Gac_2D_plot_ID_exp_'+str(ID_exp)+'.png')
def GV_2D_yoko(station, voltages, v_gates, amplitude, stanford_gain_V_ac, stanford_gain_V, stanford_gain_I):
#Before using this code change these values according to your own setup :
R_I = 1e4 #value of the resistor used to measure the current
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S") # dd/mm/YY H:M:S
print(dt_string)
print(f'Stanford Gain V_AC ={stanford_gain_V_ac}')
print(f'Stanford Gain I_DC ={stanford_gain_I}')
print(f'Stanford Gain V_DC ={stanford_gain_V}')
print(f'Voltage Max V_max = {voltages[-1]} V')
int_time = 10 #Integration time of the dmm's
station.dmm1.volt()
station.dmm1.NPLC(int_time)
station.dmm2.volt()
station.dmm2.NPLC(int_time)
print(f'Integration time DC = {int_time*0.02} s')
time_constant = station.lockin_2.time_constant()
print(f'Integration time lockins {time_constant} s')
#station.yoko.output('off')
#station.yoko.source_mode("VOLT")
#station.yoko.output('on')
#station.yoko.voltage.step = 5e-3
#station.yoko.voltage.inter_delay = 10e-3
meas = Measurement()
meas.register_parameter(station.yoko.voltage)
meas.register_parameter(station.lockin_2.amplitude)
meas.register_custom_parameter("V_dc_polar", unit="V")
meas.register_parameter(station.mdac_8.ch01.voltage)
meas.register_parameter(station.dmm1.volt)
meas.register_parameter(station.dmm2.volt, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_1.Y, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_2.Y, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_1.X, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_parameter(station.lockin_2.X, setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_custom_parameter("I_dc", unit="A",setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_custom_parameter("G_ac", unit="S",setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
meas.register_custom_parameter("R_dc", unit="Ohm",setpoints=(station.dmm1.volt, station.mdac_8.ch01.voltage))
print(f'Frequency Lockin : {station.lockin_1.frequency()} Hz')
station.lockin_2.amplitude(amplitude)
print(f'V_ac polarization : {amplitude*1e3} mV')
print(f'Filter lockin 1 : {station.lockin_1.filter_slope()} dB roll off')
print(f'Sensitivity lockin 1 : {station.lockin_1.sensitivity()} V')
print(f'Filter lockin 2 : {station.lockin_2.filter_slope()} dB roll off')
print(f'Sensitivity lockin 2 : {station.lockin_2.sensitivity()} A')
#Preparing the measurement :
v_init = voltages[0]
v_final = voltages[-1]
L = int(len(voltages)/2)
volt_sweep_init = np.linspace(0.0, v_init, L)
volt_sweep_back = np.linspace(v_final, v_init, 2*L)
M = len(voltages)
N = len(v_gates)
G_ac_plot = np.full((M,N), 0.0)
win = qcm.pyplot.PlotWindow(title="JJ dev. A")
win.resize(500,750)
voltages_live = voltages*1e6
plot1 = win.addPlot(title = "G_ac(V_dc, V_g)")
plot1.plot(setpoint_x = voltages_live, setpoint_y = v_gates)
plot1.left_axis.label = "V_g"
plot1.left_axis.units = "V"
plot1.bot_axis.label = "V_dc_polar"
plot1.bot_axis.units = "uV"
with meas.run() as datasaver:
for v in volt_sweep_init:
station.yoko.voltage(v)
time.sleep(200e-3)
for i, v_g in enumerate(v_gates):
station.mdac_8.ch01.ramp(v_g, 0.01)
station.mdac_8.ch02.ramp(v_g, 0.01)
station.mdac_8.ch03.ramp(v_g, 0.01)
station.mdac_8.ch01.block()
station.mdac_8.ch02.block()
station.mdac_8.ch03.block()
print(f'V_g = {v_g} V')
for j, v in enumerate(voltages):
station.lockin_2.sine_outdc(v)
v_dc_polar = v
v_dc = station.dmm1.volt()/stanford_gain_V
v_i_dc = station.dmm2.volt()/stanford_gain_I
I_dc = v_i_dc/R_I
R_dc = v_dc/I_dc
time.sleep(9*time_constant)
voltage_X_AC = station.lockin_1.X()/stanford_gain_V_ac
current_X_AC = station.lockin_2.X()
voltage_Y_AC = station.lockin_1.Y()/stanford_gain_V_ac
current_Y_AC = station.lockin_2.Y()
G_ac = current_X_AC/voltage_X_AC
G_ac_plot[j, i] = G_ac
plot1.traces[0].update(G_ac_plot)
datasaver.add_result(("V_dc_polar", v_dc_polar),
(station.mdac_8.ch01.voltage, v_g),
("I_dc", I_dc),
("G_ac",G_ac),
("R_dc", R_dc),
(station.dmm1.volt, v_dc),
(station.dmm2.volt, v_i_dc),
(station.lockin_2.amplitude, amplitude),
(station.yoko.voltage, v),
(station.lockin_2.Y,current_Y_AC),
(station.lockin_1.Y,voltage_Y_AC),
(station.lockin_2.X,current_X_AC),
(station.lockin_1.X,voltage_X_AC))
for v in volt_sweep_back:
station.yoko.voltage(v)
time.sleep(100e-3)
time.sleep(3)
ID_exp = datasaver.run_id
station.yoko.voltage(0)
plot_by_id(ID_exp)
win.export('figures/Gac_2D_plot_ID_exp_'+str(ID_exp)+'.png')
def GV_B(station, voltages, field_rang_Y, amplitude, stanford_gain_V_ac, stanford_gain_V, stanford_gain_I):
#Before using this code change these values according to your own setup :
R_I = 1e4 #value of the resistor used to measure the current
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S") # dd/mm/YY H:M:S
print(dt_string)
print(f'Stanford Gain V_AC ={stanford_gain_V_ac}')
print(f'Stanford Gain I_DC ={stanford_gain_I}')
print(f'Stanford Gain V_DC ={stanford_gain_V}')
print(f'Voltage Max V_max = {voltages[-1]} V')
int_time = 10 #Integration time of the dmm's
station.dmm1.volt()
station.dmm1.NPLC(int_time)
station.dmm2.volt()
station.dmm2.NPLC(int_time)
print(f'Integration time DC = {int_time*0.02} s')
time_constant = station.lockin_2.time_constant()
print(f'Integration time lockins {time_constant} s')
#station.yoko.output('off')
#station.yoko.source_mode("VOLT")
#station.yoko.output('on')
#station.yoko.voltage.step = 5e-3
#station.yoko.voltage.inter_delay = 10e-3
meas = Measurement()
meas.register_parameter(station.lockin_2.amplitude)
meas.register_parameter(station.lockin_2.sine_outdc)
meas.register_custom_parameter("V_dc_polar", unit="V")
meas.register_parameter(station.mag.y_measured)
meas.register_parameter(station.dmm1.volt)
meas.register_parameter(station.dmm2.volt, setpoints=(station.dmm1.volt, station.mag.y_measured))
meas.register_parameter(station.lockin_1.Y, setpoints=(station.dmm1.volt, station.mag.y_measured))
meas.register_parameter(station.lockin_2.Y, setpoints=(station.dmm1.volt, station.mag.y_measured))
meas.register_parameter(station.lockin_1.X, setpoints=(station.dmm1.volt, station.mag.y_measured))
meas.register_parameter(station.lockin_2.X, setpoints=(station.dmm1.volt, station.mag.y_measured))
meas.register_custom_parameter("I_dc", unit="A",setpoints=(station.dmm1.volt, station.mag.y_measured))
meas.register_custom_parameter("G_ac", unit="S",setpoints=(station.dmm1.volt, station.mag.y_measured))
meas.register_custom_parameter("R_dc", unit="Ohm",setpoints=(station.dmm1.volt, station.mag.y_measured))
print(f'Frequency Lockin : {station.lockin_1.frequency()} Hz')
station.lockin_2.amplitude(amplitude)
print(f'V_ac polarization : {amplitude*1e3} mV')
print(f'Filter lockin 1 : {station.lockin_1.filter_slope()} dB roll off')
print(f'Sensitivity lockin 1 : {station.lockin_1.sensitivity()} V')
print(f'Filter lockin 2 : {station.lockin_2.filter_slope()} dB roll off')
print(f'Sensitivity lockin 2 : {station.lockin_2.sensitivity()} A')
#Preparing the measurement :
v_init = voltages[0]
v_final = voltages[-1]
L = int(len(voltages)/2)
volt_sweep_init = np.linspace(0.0, v_init, L)
volt_sweep_back = np.linspace(v_final, v_init, 2*L)
M = len(voltages)
N = len(field_rang_Y)
G_ac_plot = np.full((M,N), 0.0)
win = qcm.pyplot.PlotWindow(title="JJ dev. A")
win.resize(500,750)
voltages_live = voltages*1e6
plot1 = win.addPlot(title = "G_ac(V_dc, V_g)")
plot1.plot(setpoint_x = voltages_live, setpoint_y = field_rang_Y)
plot1.left_axis.label = "V_g"
plot1.left_axis.units = "V"
plot1.bot_axis.label = "V_dc_polar"
plot1.bot_axis.units = "uV"
with meas.run() as datasaver:
for v in volt_sweep_init:
station.lockin_2.sine_outdc(v)
time.sleep(200e-3)
for i, b in enumerate(field_rang_Y):
station.mag.y_target(b)
station.mag.ramp('simul')
while abs(station.mag.y_measured()-b)>0.001:
time.sleep(2)
time.sleep(5)
l_y = station.mag.y_measured()
print(l_y)
for j, v in enumerate(voltages):
station.lockin_2.sine_outdc(v)
v_dc_polar = v
v_dc = station.dmm1.volt()/stanford_gain_V
v_i_dc = station.dmm2.volt()/stanford_gain_I
I_dc = v_i_dc/R_I
R_dc = v_dc/I_dc
time.sleep(9*time_constant)
voltage_X_AC = station.lockin_1.X()/stanford_gain_V_ac
current_X_AC = station.lockin_2.X()
voltage_Y_AC = station.lockin_1.Y()/stanford_gain_V_ac
current_Y_AC = station.lockin_2.Y()
G_ac = current_X_AC/voltage_X_AC
G_ac_plot[j, i] = G_ac
plot1.traces[0].update(G_ac_plot)
datasaver.add_result(("V_dc_polar", v_dc_polar),
(station.mag.y_measured, l_y),
("I_dc", I_dc),
("G_ac",G_ac),
("R_dc", R_dc),
(station.dmm1.volt, v_dc),
(station.dmm2.volt, v_i_dc),
(station.lockin_2.amplitude, amplitude),
(station.lockin_2.sine_outdc, v),
(station.lockin_2.Y,current_Y_AC),
(station.lockin_1.Y,voltage_Y_AC),
(station.lockin_2.X,current_X_AC),
(station.lockin_1.X,voltage_X_AC))
for v in volt_sweep_back:
station.lockin_2.sine_outdc(v)
time.sleep(100e-3)
time.sleep(3)
ID_exp = datasaver.run_id
station.lockin_2.sine_outdc(0)
plot_by_id(ID_exp)
win.export('figures/Gac_B_plot_ID_exp_'+str(ID_exp)+'.png')
def GV_B_yoko(station, voltages, currents, amplitude, stanford_gain_V_ac, stanford_gain_V, stanford_gain_I):
#Before using this code change these values according to your own setup :
R_I = 1e4 #value of the resistor used to measure the current
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S") # dd/mm/YY H:M:S
print(dt_string)
print(f'Stanford Gain V_AC ={stanford_gain_V_ac}')
print(f'Stanford Gain I_DC ={stanford_gain_I}')
print(f'Stanford Gain V_DC ={stanford_gain_V}')
print(f'Voltage Max V_max = {voltages[-1]} V')
int_time = 10 #Integration time of the dmm's
station.dmm1.volt()
station.dmm1.NPLC(int_time)
station.dmm2.volt()
station.dmm2.NPLC(int_time)
print(f'Integration time DC = {int_time*0.02} s')
time_constant = station.lockin_2.time_constant()
print(f'Integration time lockins {time_constant} s')
#station.yoko.output('off')
#station.yoko.source_mode("VOLT")
#station.yoko.output('on')
#station.yoko.voltage.step = 5e-3
#station.yoko.voltage.inter_delay = 10e-3
meas = Measurement()
meas.register_parameter(station.lockin_2.amplitude)
meas.register_parameter(station.lockin_2.sine_outdc)
meas.register_custom_parameter("V_dc_polar", unit="V")
meas.register_parameter(station.yoko.current)
meas.register_parameter(station.dmm1.volt)
meas.register_parameter(station.dmm2.volt, setpoints=(station.dmm1.volt, station.yoko.current))
meas.register_parameter(station.lockin_1.Y, setpoints=(station.dmm1.volt, station.yoko.current))
meas.register_parameter(station.lockin_2.Y, setpoints=(station.dmm1.volt, station.yoko.current))
meas.register_parameter(station.lockin_1.X, setpoints=(station.dmm1.volt, station.yoko.current))
meas.register_parameter(station.lockin_2.X, setpoints=(station.dmm1.volt, station.yoko.current))
meas.register_custom_parameter("I_dc", unit="A",setpoints=(station.dmm1.volt, station.yoko.current))
meas.register_custom_parameter("G_ac", unit="S",setpoints=(station.dmm1.volt, station.yoko.current))
meas.register_custom_parameter("R_dc", unit="Ohm",setpoints=(station.dmm1.volt, station.yoko.current))
print(f'Frequency Lockin : {station.lockin_1.frequency()} Hz')
station.lockin_2.amplitude(amplitude)
print(f'V_ac polarization : {amplitude*1e3} mV')
print(f'Filter lockin 1 : {station.lockin_1.filter_slope()} dB roll off')
print(f'Sensitivity lockin 1 : {station.lockin_1.sensitivity()} V')
print(f'Filter lockin 2 : {station.lockin_2.filter_slope()} dB roll off')
print(f'Sensitivity lockin 2 : {station.lockin_2.sensitivity()} A')
station.yoko.current.step = 1e-6
station.yoko.current.inter_delay = 1e-3
#Preparing the measurement :
v_init = voltages[0]
v_final = voltages[-1]
L = int(len(voltages)/2)
volt_sweep_init = np.linspace(0.0, v_init, L)
volt_sweep_back = np.linspace(v_final, v_init, 2*L)
M = len(voltages)
N = len(currents)
G_ac_plot = np.full((M,N), 0.0)
win = qcm.pyplot.PlotWindow(title="JJ dev. A")
win.resize(500,750)
voltages_live = voltages*1e6
plot1 = win.addPlot(title = "G_ac(V_dc, V_g)")
plot1.plot(setpoint_x = voltages_live, setpoint_y = field_rang_Y)
plot1.left_axis.label = "V_g"
plot1.left_axis.units = "V"
plot1.bot_axis.label = "V_dc_polar"
plot1.bot_axis.units = "uV"
with meas.run() as datasaver:
for v in volt_sweep_init:
station.lockin_2.sine_outdc(v)
time.sleep(200e-3)
for i, I in enumerate(currents):
station.yoko.current(I)
print(I)
for j, v in enumerate(voltages):
station.lockin_2.sine_outdc(v)
v_dc_polar = v
v_dc = station.dmm1.volt()/stanford_gain_V
v_i_dc = station.dmm2.volt()/stanford_gain_I
I_dc = v_i_dc/R_I
R_dc = v_dc/I_dc
time.sleep(9*time_constant)
voltage_X_AC = station.lockin_1.X()/stanford_gain_V_ac
current_X_AC = station.lockin_2.X()
voltage_Y_AC = station.lockin_1.Y()/stanford_gain_V_ac
current_Y_AC = station.lockin_2.Y()
G_ac = current_X_AC/voltage_X_AC
G_ac_plot[j, i] = G_ac
plot1.traces[0].update(G_ac_plot)
datasaver.add_result(("V_dc_polar", v_dc_polar),
(station.yoko.current, I),
("I_dc", I_dc),
("G_ac",G_ac),
("R_dc", R_dc),
(station.dmm1.volt, v_dc),
(station.dmm2.volt, v_i_dc),
(station.lockin_2.amplitude, amplitude),
(station.lockin_2.sine_outdc, v),
(station.lockin_2.Y,current_Y_AC),
(station.lockin_1.Y,voltage_Y_AC),
(station.lockin_2.X,current_X_AC),
(station.lockin_1.X,voltage_X_AC))
for v in volt_sweep_back:
station.lockin_2.sine_outdc(v)
time.sleep(100e-3)
time.sleep(3)
ID_exp = datasaver.run_id
station.lockin_2.sine_outdc(0)
plot_by_id(ID_exp)
win.export('figures/Gac_B_plot_ID_exp_'+str(ID_exp)+'.png') | 34,221 | 0 | 161 |
8734cc92e74b00a6655cd99c87ba153df9215772 | 937 | py | Python | app_backend/algorithms.py | Unicorn-Dev/ProGraph | 4ec7a2c09b243562d5eb5f7cfeace0887fd162af | [
"MIT"
] | null | null | null | app_backend/algorithms.py | Unicorn-Dev/ProGraph | 4ec7a2c09b243562d5eb5f7cfeace0887fd162af | [
"MIT"
] | null | null | null | app_backend/algorithms.py | Unicorn-Dev/ProGraph | 4ec7a2c09b243562d5eb5f7cfeace0887fd162af | [
"MIT"
] | null | null | null | from .pillowgraph import PillowGraph
from graph.models import Graph
from app_backend import proxy_convert_functions as pc_funcs
from queue import Queue
| 29.28125 | 65 | 0.699039 | from .pillowgraph import PillowGraph
from graph.models import Graph
from app_backend import proxy_convert_functions as pc_funcs
from queue import Queue
def dfs(graph, vertex):
assert isinstance(vertex, str)
gr = PillowGraph(pc_funcs.StringToAdjListDict(graph.AdjList))
graph.AlgoSequense = ''
colors = {vertex: 'W' for vertex in gr.vertices}
sequence = []
gr.dfs(vertex, colors, sequence)
sequence = 'dfs', vertex, sequence
graph.AlgoSequense = str(sequence)
graph.save()
def bfs(graph, vertex):
assert isinstance(vertex, str)
gr = PillowGraph(pc_funcs.StringToAdjListDict(graph.AdjList))
graph.AlgoSequense = ''
walked = {vertex: False for vertex in gr.vertices}
walked[vertex] = True
queue = Queue()
queue.put(vertex)
sequence = []
gr.bfs(queue, walked, sequence)
sequence = 'bfs', vertex, sequence
graph.AlgoSequense = str(sequence)
graph.save()
| 737 | 0 | 46 |
2d25dff6c968fa1e168d218e9f8863856d19bfa5 | 277 | py | Python | spiral.py | itspuneet/itspuneet | d44f78afcff275aa56f03bba738ac3e4f2c30843 | [
"bzip2-1.0.6"
] | null | null | null | spiral.py | itspuneet/itspuneet | d44f78afcff275aa56f03bba738ac3e4f2c30843 | [
"bzip2-1.0.6"
] | null | null | null | spiral.py | itspuneet/itspuneet | d44f78afcff275aa56f03bba738ac3e4f2c30843 | [
"bzip2-1.0.6"
] | null | null | null | for i in range (1,6):
for j in range(1,6):
print("(",i,",",j,")",end='')
print()
a=[[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,19,20],[21,22,23,24,25]]
print(a)
n=1
for i in range (1,6):
for j in range(1,6):
print()
| 19.785714 | 80 | 0.440433 | for i in range (1,6):
for j in range(1,6):
print("(",i,",",j,")",end='')
print()
a=[[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,19,20],[21,22,23,24,25]]
print(a)
n=1
for i in range (1,6):
for j in range(1,6):
print()
| 0 | 0 | 0 |
c2b9fe0498043d576c71d4299795d9efbbc5aaa3 | 2,278 | py | Python | SublimeBot.py | gearbolt/SublimeBot-ShopifyScraper | e88336b24cfec03818332da39b6803ac3536e80b | [
"MIT"
] | 10 | 2017-07-18T23:36:20.000Z | 2021-06-05T14:20:58.000Z | SublimeBot.py | gearbolt/SublimeBot-ShopifyScraper | e88336b24cfec03818332da39b6803ac3536e80b | [
"MIT"
] | null | null | null | SublimeBot.py | gearbolt/SublimeBot-ShopifyScraper | e88336b24cfec03818332da39b6803ac3536e80b | [
"MIT"
] | 1 | 2017-08-21T05:43:00.000Z | 2017-08-21T05:43:00.000Z | # Import dependencies
import time
import json
import tweepy
import urllib2
import feedparser
from fake_useragent import UserAgent
from datetime import datetime, timedelta
from credentials import *
print("Loading Configuration Files...")
with open('config.json') as json_data_file:
data = json.load(json_data_file)
# Access and authorize Twitter credentials
consumer_key = data["twitterAccessKeys"][0]["consumer_key"]
consumer_secret = data["twitterAccessKeys"][1]["consumer_secret"]
access_token = data["twitterAccessKeys"][2]["access_token"]
access_token_secret = data["twitterAccessKeys"][3]["access_token_secret"]
print("\033[33m[INFO]\033[0m Your Consumer Key is " + consumer_key)
print("\033[33m[INFO]\033[0m Your Consumer Secret Key is " + consumer_secret)
print("\033[33m[INFO]\033[0m Your Access Token is " + access_token)
print("\033[33m[INFO]\033[0m Your Access Token Secret is " + access_token_secret)
time.sleep(3)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Date parsing function
# Establish user agent
ua = UserAgent()
# Set initial time
testTime = dt_parse(datetime.utcnow().isoformat())
link = data['link'][0]['baseLink']
# Run Shopify website scrubber
response = urllib2.urlopen(link + 'products.json')
data2 = json.load(response)
while True:
print("\033[33m" + str(testTime) + "! \033[0m")
for item in data2['products']: # Python's for loops are a "for each" loop
if (str(dt_parse(item['updated_at'])) > str(testTime)):
print('\033[1;36m[LOG]\033[0m ' + item['title'] + ' ' + link + item['handle'] + ' ' + item['updated_at'])
api.update_status(item['title'] + ' ' + link + 'products/' + item['handle'] + ' ' + str(dt_parse(item['updated_at'])))
print("\033[1;36m[LOG]\033[0m Checking Site! " + link)
print("\033[1;36m[LOG]\033[0m Site Checked! Status Code: " + str(response.code) + "!")
testTime = dt_parse(datetime.utcnow().isoformat())
time.sleep(5)
| 34 | 132 | 0.694469 | # Import dependencies
import time
import json
import tweepy
import urllib2
import feedparser
from fake_useragent import UserAgent
from datetime import datetime, timedelta
from credentials import *
print("Loading Configuration Files...")
with open('config.json') as json_data_file:
data = json.load(json_data_file)
# Access and authorize Twitter credentials
consumer_key = data["twitterAccessKeys"][0]["consumer_key"]
consumer_secret = data["twitterAccessKeys"][1]["consumer_secret"]
access_token = data["twitterAccessKeys"][2]["access_token"]
access_token_secret = data["twitterAccessKeys"][3]["access_token_secret"]
print("\033[33m[INFO]\033[0m Your Consumer Key is " + consumer_key)
print("\033[33m[INFO]\033[0m Your Consumer Secret Key is " + consumer_secret)
print("\033[33m[INFO]\033[0m Your Access Token is " + access_token)
print("\033[33m[INFO]\033[0m Your Access Token Secret is " + access_token_secret)
time.sleep(3)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Date parsing function
def dt_parse(t):
ret = datetime.strptime(t[0:16],'%Y-%m-%dT%H:%M')
if t[19]=='-':
ret-=timedelta(hours=int(t[19:22]),minutes=int(t[23:]))
elif t[19]=='+':
ret+=timedelta(hours=int(t[19:22]),minutes=int(t[23:]))
return ret
# Establish user agent
ua = UserAgent()
# Set initial time
testTime = dt_parse(datetime.utcnow().isoformat())
link = data['link'][0]['baseLink']
# Run Shopify website scrubber
response = urllib2.urlopen(link + 'products.json')
data2 = json.load(response)
while True:
print("\033[33m" + str(testTime) + "! \033[0m")
for item in data2['products']: # Python's for loops are a "for each" loop
if (str(dt_parse(item['updated_at'])) > str(testTime)):
print('\033[1;36m[LOG]\033[0m ' + item['title'] + ' ' + link + item['handle'] + ' ' + item['updated_at'])
api.update_status(item['title'] + ' ' + link + 'products/' + item['handle'] + ' ' + str(dt_parse(item['updated_at'])))
print("\033[1;36m[LOG]\033[0m Checking Site! " + link)
print("\033[1;36m[LOG]\033[0m Site Checked! Status Code: " + str(response.code) + "!")
testTime = dt_parse(datetime.utcnow().isoformat())
time.sleep(5)
| 208 | 0 | 22 |
21e05f8fbfff2249da74d6dc3be82334e7a0ea49 | 3,574 | py | Python | fastscape/processes/marine.py | rlange2/fastscape | 607747570ff6a1406ae1631217503ec6e6bec95d | [
"BSD-3-Clause"
] | 36 | 2019-11-01T12:24:15.000Z | 2022-03-15T06:11:25.000Z | fastscape/processes/marine.py | rlange2/fastscape | 607747570ff6a1406ae1631217503ec6e6bec95d | [
"BSD-3-Clause"
] | 12 | 2019-10-03T16:28:47.000Z | 2022-01-14T11:11:46.000Z | fastscape/processes/marine.py | rlange2/fastscape | 607747570ff6a1406ae1631217503ec6e6bec95d | [
"BSD-3-Clause"
] | 9 | 2020-03-31T07:40:25.000Z | 2022-03-09T14:14:41.000Z | import fastscapelib_fortran as fs
import xsimlab as xs
from .channel import ChannelErosion
from .context import FastscapelibContext
from .grid import UniformRectilinearGrid2D
from .main import SurfaceToErode
@xs.process
class Sea:
"""Sea level."""
# TODO: add diagnostics like shoreline extraction or
# continental area vs. marine masks.
level = xs.variable(
default=0.,
description='sea level (elevation)'
)
@xs.process
class MarineSedimentTransport:
"""Marine sediment transport, deposition and compaction.
The source of sediment used for marine transport originates from
channel erosion and/or transport, which, integrated over the whole
continental area, provides a volume of sediment yielded through
the shoreline.
A uniform, user-defined ratio of sand/silt is considered for this
sediment yield. Each of these grain size category has its own
properties like porosity, the exponential decreasing of porosity
with depth and the transport coefficient (diffusivity).
"""
ss_ratio_land = xs.variable(
description='sand/silt ratio of continental sediment source'
)
ss_ratio_sea = xs.variable(
dims=('y', 'x'),
intent='out',
description='sand/silt ratio of marine sediment layer'
)
porosity_sand = xs.variable(
description='surface (reference) porosity of sand'
)
porosity_silt = xs.variable(
description='surface (reference) porosity of silt'
)
e_depth_sand = xs.variable(
description='e-folding depth of exp. porosity curve for sand'
)
e_depth_silt = xs.variable(
description='e-folding depth of exp. porosity curve for silt'
)
diffusivity_sand = xs.variable(
description='diffusivity (transport coefficient) for sand'
)
diffusivity_silt = xs.variable(
description='diffusivity (transport coefficient) for silt'
)
layer_depth = xs.variable(
description='mean depth (thickness) of marine active layer'
)
shape = xs.foreign(UniformRectilinearGrid2D, 'shape')
fs_context = xs.foreign(FastscapelibContext, 'context')
elevation = xs.foreign(SurfaceToErode, 'elevation')
sediment_source = xs.foreign(ChannelErosion, 'erosion')
sea_level = xs.foreign(Sea, 'level')
erosion = xs.variable(
dims=('y', 'x'),
intent='out',
groups='erosion',
description='marine erosion or deposition of sand/silt'
)
| 30.547009 | 78 | 0.681309 | import fastscapelib_fortran as fs
import xsimlab as xs
from .channel import ChannelErosion
from .context import FastscapelibContext
from .grid import UniformRectilinearGrid2D
from .main import SurfaceToErode
@xs.process
class Sea:
"""Sea level."""
# TODO: add diagnostics like shoreline extraction or
# continental area vs. marine masks.
level = xs.variable(
default=0.,
description='sea level (elevation)'
)
@xs.process
class MarineSedimentTransport:
"""Marine sediment transport, deposition and compaction.
The source of sediment used for marine transport originates from
channel erosion and/or transport, which, integrated over the whole
continental area, provides a volume of sediment yielded through
the shoreline.
A uniform, user-defined ratio of sand/silt is considered for this
sediment yield. Each of these grain size category has its own
properties like porosity, the exponential decreasing of porosity
with depth and the transport coefficient (diffusivity).
"""
ss_ratio_land = xs.variable(
description='sand/silt ratio of continental sediment source'
)
ss_ratio_sea = xs.variable(
dims=('y', 'x'),
intent='out',
description='sand/silt ratio of marine sediment layer'
)
porosity_sand = xs.variable(
description='surface (reference) porosity of sand'
)
porosity_silt = xs.variable(
description='surface (reference) porosity of silt'
)
e_depth_sand = xs.variable(
description='e-folding depth of exp. porosity curve for sand'
)
e_depth_silt = xs.variable(
description='e-folding depth of exp. porosity curve for silt'
)
diffusivity_sand = xs.variable(
description='diffusivity (transport coefficient) for sand'
)
diffusivity_silt = xs.variable(
description='diffusivity (transport coefficient) for silt'
)
layer_depth = xs.variable(
description='mean depth (thickness) of marine active layer'
)
shape = xs.foreign(UniformRectilinearGrid2D, 'shape')
fs_context = xs.foreign(FastscapelibContext, 'context')
elevation = xs.foreign(SurfaceToErode, 'elevation')
sediment_source = xs.foreign(ChannelErosion, 'erosion')
sea_level = xs.foreign(Sea, 'level')
erosion = xs.variable(
dims=('y', 'x'),
intent='out',
groups='erosion',
description='marine erosion or deposition of sand/silt'
)
def initialize(self):
# needed so that channel erosion/transport is disabled below sealevel
self.fs_context["runmarine"] = True
def run_step(self):
self.fs_context["ratio"] = self.ss_ratio_land
self.fs_context["poro1"] = self.porosity_sand
self.fs_context["poro2"] = self.porosity_silt
self.fs_context["zporo1"] = self.e_depth_sand
self.fs_context["zporo2"] = self.e_depth_silt
self.fs_context["kdsea1"] = self.diffusivity_sand
self.fs_context["kdsea2"] = self.diffusivity_silt
self.fs_context["layer"] = self.layer_depth
self.fs_context["sealevel"] = self.sea_level
self.fs_context["Sedflux"] = self.sediment_source.ravel()
# bypass fastscapelib-fortran global state
self.fs_context["h"] = self.elevation.flatten()
fs.marine()
erosion_flat = self.elevation.ravel() - self.fs_context["h"]
self.erosion = erosion_flat.reshape(self.shape)
self.ss_ratio_sea = self.fs_context["fmix"].copy().reshape(self.shape)
| 1,017 | 0 | 54 |
439820f94d41a3f831e906b693f5e2ab2143b422 | 1,119 | py | Python | UpdateFunction/Ggnn.py | nadeemkajla/siamese_ged | 134808dea802b57ad41c5cfb38984e7bb22ddfb8 | [
"MIT"
] | 8 | 2018-07-06T17:00:01.000Z | 2021-09-06T15:15:42.000Z | UpdateFunction/Ggnn.py | nadeemkajla/siamese_ged | 134808dea802b57ad41c5cfb38984e7bb22ddfb8 | [
"MIT"
] | 3 | 2019-03-06T15:19:05.000Z | 2021-04-12T10:33:12.000Z | UpdateFunction/Ggnn.py | nadeemkajla/siamese_ged | 134808dea802b57ad41c5cfb38984e7bb22ddfb8 | [
"MIT"
] | 10 | 2018-07-06T17:00:03.000Z | 2021-12-02T02:51:22.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ggnn.py: Update function following the Gated Graph Neural Network (GGNN) framework.
* Bibliography: Li et al. (2016), Gated Graph Neural Networks (GG-NN)
Usage:
"""
from __future__ import print_function
import torch.nn as nn
# Own modules
__author__ = "Pau Riba"
__email__ = "priba@cvc.uab.cat"
# Constructor
# Update function
# Get the name of the used message function
# Get the message function arguments
# Get Output size
| 22.38 | 87 | 0.643432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ggnn.py: Update function following the Gated Graph Neural Network (GGNN) framework.
* Bibliography: Li et al. (2016), Gated Graph Neural Networks (GG-NN)
Usage:
"""
from __future__ import print_function
import torch.nn as nn
# Own modules
__author__ = "Pau Riba"
__email__ = "priba@cvc.uab.cat"
class Ggnn(nn.Module):
# Constructor
def __init__(self, args={}):
super(Ggnn, self).__init__()
self.args = args
self.message_size = args['in_m']
self.hidden_state_size = args['out']
self.gru = nn.GRUCell(self.message_size, self.hidden_state_size)
# Update function
def forward(self, h_v, m_v, args=None):
h_new = self.gru(m_v, h_v)
return h_new
# Get the name of the used message function
def get_definition(self):
return 'GGNN (Li et al. 2016)'
# Get the message function arguments
def get_args(self):
return self.args
# Get Output size
def get_out_size(self, size_h, size_e, args=None):
return self.m_size(size_h, size_e, args)
| 447 | 1 | 153 |
800a7eec9b2d6782788277f524db64aa275a938d | 633 | py | Python | hoomd/test-py/test_run_walltime.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | 2 | 2020-03-30T14:38:50.000Z | 2020-06-02T05:53:41.000Z | hoomd/test-py/test_run_walltime.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/test-py/test_run_walltime.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | 1 | 2020-05-20T07:00:08.000Z | 2020-05-20T07:00:08.000Z | # -*- coding: iso-8859-1 -*-
# Maintainer: joaander
import hoomd
hoomd.context.initialize()
import unittest
import os
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 25.32 | 76 | 0.620853 | # -*- coding: iso-8859-1 -*-
# Maintainer: joaander
import hoomd
hoomd.context.initialize()
import unittest
import os
class analyze_callback_tests(unittest.TestCase):
def setUp(self):
sysdef = hoomd.init.create_lattice(unitcell=hoomd.lattice.sq(a=2.0),
n=[1,2]);
self.a = -1;
def test_walltime_exception(self):
os.environ['HOOMD_WALLTIME_STOP'] = "0"
self.assertRaises(hoomd.WalltimeLimitReached, hoomd.run, 10);
def tearDown(self):
hoomd.context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 311 | 27 | 104 |
a41f600ea41bd2f2907b4c63ee97d0a4b016604c | 282 | py | Python | supervisr/provider/debug/__init__.py | BeryJu/supervisr | 2305b1e27abb0bfe9fcee93b79e012c62cba712e | [
"MIT"
] | 1 | 2020-01-12T21:33:03.000Z | 2020-01-12T21:33:03.000Z | supervisr/provider/debug/__init__.py | BeryJu/supervisr | 2305b1e27abb0bfe9fcee93b79e012c62cba712e | [
"MIT"
] | null | null | null | supervisr/provider/debug/__init__.py | BeryJu/supervisr | 2305b1e27abb0bfe9fcee93b79e012c62cba712e | [
"MIT"
] | null | null | null | """Supervisr Mod mod/provider/onlinenet Header"""
__ui_name__ = 'Supervisr mod/mod/provider/onlinenet'
__author__ = 'Supervisr Team'
__email__ = 'supervisr@beryju.org'
__version__ = '0.3.14-alpha'
default_app_config = 'supervisr.provider.debug.apps.SupervisrModProviderDebugConfig'
| 40.285714 | 84 | 0.801418 | """Supervisr Mod mod/provider/onlinenet Header"""
__ui_name__ = 'Supervisr mod/mod/provider/onlinenet'
__author__ = 'Supervisr Team'
__email__ = 'supervisr@beryju.org'
__version__ = '0.3.14-alpha'
default_app_config = 'supervisr.provider.debug.apps.SupervisrModProviderDebugConfig'
| 0 | 0 | 0 |
b67a3211141343017694589fa6422b8ca9cea73c | 887 | py | Python | infrastructure/framework/falcon/app.py | pachecobruno/python-ddd | 81812848a567d4605df346ef3630718d320706cc | [
"MIT"
] | null | null | null | infrastructure/framework/falcon/app.py | pachecobruno/python-ddd | 81812848a567d4605df346ef3630718d320706cc | [
"MIT"
] | null | null | null | infrastructure/framework/falcon/app.py | pachecobruno/python-ddd | 81812848a567d4605df346ef3630718d320706cc | [
"MIT"
] | null | null | null | import falcon
import yaml
from composition_root import FalconContainer
from infrastructure.framework.falcon.controllers import InfoController
app = falcon.API()
app.set_error_serializer(error_serializer)
app.add_route('/', FalconContainer.info_controller_factory())
app.add_route('/items', FalconContainer.items_controller_factory()) | 35.48 | 70 | 0.674183 | import falcon
import yaml
from composition_root import FalconContainer
from infrastructure.framework.falcon.controllers import InfoController
def error_serializer(req, resp, exception):
representation = None
preferred = req.client_prefers(('application/x-yaml',
'application/json'))
if preferred is not None:
if preferred == 'application/json':
representation = exception.to_json()
else:
representation = yaml.dump(exception.to_dict(),
encoding=None)
resp.body = representation
resp.content_type = preferred
resp.append_header('Vary', 'Accept')
app = falcon.API()
app.set_error_serializer(error_serializer)
app.add_route('/', FalconContainer.info_controller_factory())
app.add_route('/items', FalconContainer.items_controller_factory()) | 528 | 0 | 23 |
4485ab8fcc90cd7f77aa5f622c046a6da5ada355 | 2,147 | py | Python | essaysense/__init__.py | zlliang/essaysense | c9bf72b2f143417e6d5be91fd000ee70e48c3923 | [
"MIT"
] | 24 | 2018-05-24T06:56:25.000Z | 2022-03-05T13:09:51.000Z | essaysense/__init__.py | deltaquincy/essaysense | c9bf72b2f143417e6d5be91fd000ee70e48c3923 | [
"MIT"
] | 3 | 2019-12-22T04:26:44.000Z | 2019-12-22T04:27:03.000Z | essaysense/__init__.py | deltaquincy/essaysense | c9bf72b2f143417e6d5be91fd000ee70e48c3923 | [
"MIT"
] | 12 | 2018-05-24T06:57:34.000Z | 2020-05-01T11:40:32.000Z | """An NLP project on Automated Essay Scoring.
Project EssaySense
==================
EssaySense is an NLP project on Automated Essay Scoring, based on neural network
technologies.
Several neural network models are included to modelling the scoring task, imple-
mented using TensorFlow (see https://tensorflow.org). Pre-trainedmodels are also
included based on ASAP-AES (see https://www.kaggle.com/c/asap-aes/) dataset. You
can use this application to score English essays, or train new models by feeding
your own datasets.
Use this documentation
----------------------
For any submodule, class or function, you can use built-in 'help' method to
check the documentation.
>>> help(essaysense.datasets)
Requirements
------------
Note that this project is only compatible with Python 3. Also, TensorFlow 1.4.1+
and NLTK 3.2+ are required to make this project alive.
Subpackages
-----------
- datasets: datasets used in this project.
- models: models implemented in this project.
- metrics:
Run this project
----------------
Temporarily in this preview version, we deliver a command line interfate
'essaysense-cli' alongwith the project to run the models. For more information,
please see README.md.
Copyright and license
---------------------
Copyright (c) 2017 Quincy Liang & Jiancong Gao
Under MIT license
"""
# This project follows SemVer 2.0 (see https://semver.org)
__version__ = "0.0.4"
# Make datasets avaliable
from essaysense import datasets
# Configurations.
from essaysense import configs
# Models implemented in this version.
from essaysense import models
# Package metadata
avaliable_models = {
"lstm": {
"model": models.DocumentLevelLstmWithMotPooling,
"train": datasets.DocumentLevelTrainSet,
"test": datasets.DocumentLevelTestSet
},
"cnn-cnn": {
"model": models.SentenceLevelCnn,
"train": datasets.SentenceLevelTrainSet,
"test": datasets.SentenceLevelTestSet
},
"cnn-lstm": {
"model": models.SentenceLevelCnnLstmWithAttention,
"train": datasets.SentenceLevelTrainSet,
"test": datasets.SentenceLevelTestSet
}
}
| 28.626667 | 80 | 0.706567 | """An NLP project on Automated Essay Scoring.
Project EssaySense
==================
EssaySense is an NLP project on Automated Essay Scoring, based on neural network
technologies.
Several neural network models are included to modelling the scoring task, imple-
mented using TensorFlow (see https://tensorflow.org). Pre-trainedmodels are also
included based on ASAP-AES (see https://www.kaggle.com/c/asap-aes/) dataset. You
can use this application to score English essays, or train new models by feeding
your own datasets.
Use this documentation
----------------------
For any submodule, class or function, you can use built-in 'help' method to
check the documentation.
>>> help(essaysense.datasets)
Requirements
------------
Note that this project is only compatible with Python 3. Also, TensorFlow 1.4.1+
and NLTK 3.2+ are required to make this project alive.
Subpackages
-----------
- datasets: datasets used in this project.
- models: models implemented in this project.
- metrics:
Run this project
----------------
Temporarily in this preview version, we deliver a command line interfate
'essaysense-cli' alongwith the project to run the models. For more information,
please see README.md.
Copyright and license
---------------------
Copyright (c) 2017 Quincy Liang & Jiancong Gao
Under MIT license
"""
# This project follows SemVer 2.0 (see https://semver.org)
__version__ = "0.0.4"
# Make datasets avaliable
from essaysense import datasets
# Configurations.
from essaysense import configs
# Models implemented in this version.
from essaysense import models
# Package metadata
avaliable_models = {
"lstm": {
"model": models.DocumentLevelLstmWithMotPooling,
"train": datasets.DocumentLevelTrainSet,
"test": datasets.DocumentLevelTestSet
},
"cnn-cnn": {
"model": models.SentenceLevelCnn,
"train": datasets.SentenceLevelTrainSet,
"test": datasets.SentenceLevelTestSet
},
"cnn-lstm": {
"model": models.SentenceLevelCnnLstmWithAttention,
"train": datasets.SentenceLevelTrainSet,
"test": datasets.SentenceLevelTestSet
}
}
| 0 | 0 | 0 |
be11e51b64c711f834138060343f0be3b5398a65 | 16,066 | py | Python | telethon/tl/custom/conversation.py | megansquire/Telethon | e5f1b2afa3482fe77aaee514bc3ec1574ada9262 | [
"MIT"
] | 2 | 2021-04-29T14:19:25.000Z | 2021-09-17T07:13:49.000Z | telethon/tl/custom/conversation.py | josh8tg/Arabic-Telethon | 10467cff74e15aa7d664e976972e1f1c4af5ae57 | [
"MIT"
] | null | null | null | telethon/tl/custom/conversation.py | josh8tg/Arabic-Telethon | 10467cff74e15aa7d664e976972e1f1c4af5ae57 | [
"MIT"
] | 1 | 2020-04-16T22:02:26.000Z | 2020-04-16T22:02:26.000Z | import asyncio
import itertools
import time
from .chatgetter import ChatGetter
from ... import helpers, utils, errors
from ...events.common import EventCommon
# Sometimes the edits arrive very fast (within the same second).
# In that case we add a small delta so that the age is older, for
# comparision purposes. This value is enough for up to 1000 messages.
_EDIT_COLLISION_DELTA = 0.001
class Conversation(ChatGetter):
"""
Represents a conversation inside an specific chat.
A conversation keeps track of new messages since it was
created until its exit and easily lets you query the
current state.
If you need a conversation across two or more chats,
you should use two conversations and synchronize them
as you better see fit.
"""
_id_counter = 0
_custom_counter = 0
async def send_message(self, *args, **kwargs):
"""
Sends a message in the context of this conversation. Shorthand
for `telethon.client.messages.MessageMethods.send_message` with
``entity`` already set.
"""
message = await self._client.send_message(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
async def send_file(self, *args, **kwargs):
"""
Sends a file in the context of this conversation. Shorthand
for `telethon.client.uploads.UploadMethods.send_file` with
``entity`` already set.
"""
message = await self._client.send_file(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
def mark_read(self, message=None):
"""
Marks as read the latest received message if ``message is None``.
Otherwise, marks as read until the given message (or message ID).
This is equivalent to calling `client.send_read_acknowledge
<telethon.client.messages.MessageMethods.send_read_acknowledge>`.
"""
if message is None:
if self._incoming:
message = self._incoming[-1].id
else:
message = 0
elif not isinstance(message, int):
message = message.id
return self._client.send_read_acknowledge(
self._input_chat, max_id=message)
async def get_response(self, message=None, *, timeout=None):
"""
Gets the next message that responds to a previous one.
Args:
message (`Message <telethon.tl.custom.message.Message>` | `int`, optional):
The message (or the message ID) for which a response
is expected. By default this is the last sent message.
timeout (`int` | `float`, optional):
If present, this `timeout` (in seconds) will override the
per-action timeout defined for the conversation.
"""
return await self._get_message(
message, self._response_indices, self._pending_responses, timeout,
lambda x, y: True
)
async def get_reply(self, message=None, *, timeout=None):
"""
Gets the next message that explicitly replies to a previous one.
"""
return await self._get_message(
message, self._reply_indices, self._pending_replies, timeout,
lambda x, y: x.reply_to_msg_id == y
)
def _get_message(
self, target_message, indices, pending, timeout, condition):
"""
Gets the next desired message under the desired condition.
Args:
target_message (`object`):
The target message for which we want to find another
response that applies based on `condition`.
indices (`dict`):
This dictionary remembers the last ID chosen for the
input `target_message`.
pending (`dict`):
This dictionary remembers {msg_id: Future} to be set
once `condition` is met.
timeout (`int`):
The timeout (in seconds) override to use for this operation.
condition (`callable`):
The condition callable that checks if an incoming
message is a valid response.
"""
start_time = time.time()
target_id = self._get_message_id(target_message)
# If there is no last-chosen ID, make sure to pick one *after*
# the input message, since we don't want responses back in time
if target_id not in indices:
for i, incoming in enumerate(self._incoming):
if incoming.id > target_id:
indices[target_id] = i
break
else:
indices[target_id] = len(self._incoming)
# We will always return a future from here, even if the result
# can be set immediately. Otherwise, needing to await only
# sometimes is an annoying edge case (i.e. we would return
# a `Message` but `get_response()` always `await`'s).
future = self._client.loop.create_future()
# If there are enough responses saved return the next one
last_idx = indices[target_id]
if last_idx < len(self._incoming):
incoming = self._incoming[last_idx]
if condition(incoming, target_id):
indices[target_id] += 1
future.set_result(incoming)
return future
# Otherwise the next incoming response will be the one to use
pending[target_id] = future
return self._get_result(future, start_time, timeout)
async def get_edit(self, message=None, *, timeout=None):
"""
Awaits for an edit after the last message to arrive.
The arguments are the same as those for `get_response`.
"""
start_time = time.time()
target_id = self._get_message_id(message)
target_date = self._edit_dates.get(target_id, 0)
earliest_edit = min(
(x for x in self._incoming
if x.edit_date
and x.id > target_id
and x.edit_date.timestamp() > target_date
),
key=lambda x: x.edit_date.timestamp(),
default=None
)
if earliest_edit and earliest_edit.edit_date.timestamp() > target_date:
self._edit_dates[target_id] = earliest_edit.edit_date.timestamp()
return earliest_edit
# Otherwise the next incoming response will be the one to use
future = asyncio.Future(loop=self._client.loop)
self._pending_edits[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_read(self, message=None, *, timeout=None):
"""
Awaits for the sent message to be marked as read. Note that
receiving a response doesn't imply the message was read, and
this action will also trigger even without a response.
"""
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1
if self._last_read >= target_id:
return
self._pending_reads[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_event(self, event, *, timeout=None):
"""
Waits for a custom event to occur. Timeouts still apply.
Unless you're certain that your code will run fast enough,
generally you should get a "handle" of this special coroutine
before acting. Generally, you should do this:
>>> from telethon import TelegramClient, events
>>>
>>> client = TelegramClient(...)
>>>
>>> async def main():
>>> async with client.conversation(...) as conv:
>>> response = conv.wait_event(events.NewMessage(incoming=True))
>>> await conv.send_message('Hi')
>>> response = await response
This way your event can be registered before acting,
since the response may arrive before your event was
registered. It depends on your use case since this
also means the event can arrive before you send
a previous action.
"""
start_time = time.time()
if isinstance(event, type):
event = event()
await event.resolve(self._client)
counter = Conversation._custom_counter
Conversation._custom_counter += 1
future = asyncio.Future(loop=self._client.loop)
# We need the `async def` here because we want to block on the future
# from `_get_result` by using `await` on it. If we returned the future
# immediately we would `del` from `_custom` too early.
self._custom[counter] = (event, future)
return await result()
def cancel(self):
"""Cancels the current conversation and exits the context manager."""
raise _ConversationCancelled()
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
| 35.30989 | 87 | 0.608614 | import asyncio
import itertools
import time
from .chatgetter import ChatGetter
from ... import helpers, utils, errors
from ...events.common import EventCommon
# Sometimes the edits arrive very fast (within the same second).
# In that case we add a small delta so that the age is older, for
# comparision purposes. This value is enough for up to 1000 messages.
_EDIT_COLLISION_DELTA = 0.001
class Conversation(ChatGetter):
"""
Represents a conversation inside an specific chat.
A conversation keeps track of new messages since it was
created until its exit and easily lets you query the
current state.
If you need a conversation across two or more chats,
you should use two conversations and synchronize them
as you better see fit.
"""
_id_counter = 0
_custom_counter = 0
def __init__(self, client, input_chat,
*, timeout, total_timeout, max_messages,
exclusive, replies_are_responses):
# This call resets the client
ChatGetter.__init__(self, input_chat=input_chat)
self._id = Conversation._id_counter
Conversation._id_counter += 1
self._client = client
self._timeout = timeout
self._total_timeout = total_timeout
self._total_due = None
self._outgoing = set()
self._last_outgoing = 0
self._incoming = []
self._last_incoming = 0
self._max_incoming = max_messages
self._last_read = None
self._custom = {}
self._pending_responses = {}
self._pending_replies = {}
self._pending_edits = {}
self._pending_reads = {}
self._exclusive = exclusive
# The user is able to expect two responses for the same message.
# {desired message ID: next incoming index}
self._response_indices = {}
if replies_are_responses:
self._reply_indices = self._response_indices
else:
self._reply_indices = {}
self._edit_dates = {}
async def send_message(self, *args, **kwargs):
"""
Sends a message in the context of this conversation. Shorthand
for `telethon.client.messages.MessageMethods.send_message` with
``entity`` already set.
"""
message = await self._client.send_message(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
async def send_file(self, *args, **kwargs):
"""
Sends a file in the context of this conversation. Shorthand
for `telethon.client.uploads.UploadMethods.send_file` with
``entity`` already set.
"""
message = await self._client.send_file(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
def mark_read(self, message=None):
"""
Marks as read the latest received message if ``message is None``.
Otherwise, marks as read until the given message (or message ID).
This is equivalent to calling `client.send_read_acknowledge
<telethon.client.messages.MessageMethods.send_read_acknowledge>`.
"""
if message is None:
if self._incoming:
message = self._incoming[-1].id
else:
message = 0
elif not isinstance(message, int):
message = message.id
return self._client.send_read_acknowledge(
self._input_chat, max_id=message)
async def get_response(self, message=None, *, timeout=None):
"""
Gets the next message that responds to a previous one.
Args:
message (`Message <telethon.tl.custom.message.Message>` | `int`, optional):
The message (or the message ID) for which a response
is expected. By default this is the last sent message.
timeout (`int` | `float`, optional):
If present, this `timeout` (in seconds) will override the
per-action timeout defined for the conversation.
"""
return await self._get_message(
message, self._response_indices, self._pending_responses, timeout,
lambda x, y: True
)
async def get_reply(self, message=None, *, timeout=None):
"""
Gets the next message that explicitly replies to a previous one.
"""
return await self._get_message(
message, self._reply_indices, self._pending_replies, timeout,
lambda x, y: x.reply_to_msg_id == y
)
def _get_message(
self, target_message, indices, pending, timeout, condition):
"""
Gets the next desired message under the desired condition.
Args:
target_message (`object`):
The target message for which we want to find another
response that applies based on `condition`.
indices (`dict`):
This dictionary remembers the last ID chosen for the
input `target_message`.
pending (`dict`):
This dictionary remembers {msg_id: Future} to be set
once `condition` is met.
timeout (`int`):
The timeout (in seconds) override to use for this operation.
condition (`callable`):
The condition callable that checks if an incoming
message is a valid response.
"""
start_time = time.time()
target_id = self._get_message_id(target_message)
# If there is no last-chosen ID, make sure to pick one *after*
# the input message, since we don't want responses back in time
if target_id not in indices:
for i, incoming in enumerate(self._incoming):
if incoming.id > target_id:
indices[target_id] = i
break
else:
indices[target_id] = len(self._incoming)
# We will always return a future from here, even if the result
# can be set immediately. Otherwise, needing to await only
# sometimes is an annoying edge case (i.e. we would return
# a `Message` but `get_response()` always `await`'s).
future = self._client.loop.create_future()
# If there are enough responses saved return the next one
last_idx = indices[target_id]
if last_idx < len(self._incoming):
incoming = self._incoming[last_idx]
if condition(incoming, target_id):
indices[target_id] += 1
future.set_result(incoming)
return future
# Otherwise the next incoming response will be the one to use
pending[target_id] = future
return self._get_result(future, start_time, timeout)
async def get_edit(self, message=None, *, timeout=None):
"""
Awaits for an edit after the last message to arrive.
The arguments are the same as those for `get_response`.
"""
start_time = time.time()
target_id = self._get_message_id(message)
target_date = self._edit_dates.get(target_id, 0)
earliest_edit = min(
(x for x in self._incoming
if x.edit_date
and x.id > target_id
and x.edit_date.timestamp() > target_date
),
key=lambda x: x.edit_date.timestamp(),
default=None
)
if earliest_edit and earliest_edit.edit_date.timestamp() > target_date:
self._edit_dates[target_id] = earliest_edit.edit_date.timestamp()
return earliest_edit
# Otherwise the next incoming response will be the one to use
future = asyncio.Future(loop=self._client.loop)
self._pending_edits[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_read(self, message=None, *, timeout=None):
"""
Awaits for the sent message to be marked as read. Note that
receiving a response doesn't imply the message was read, and
this action will also trigger even without a response.
"""
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1
if self._last_read >= target_id:
return
self._pending_reads[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_event(self, event, *, timeout=None):
"""
Waits for a custom event to occur. Timeouts still apply.
Unless you're certain that your code will run fast enough,
generally you should get a "handle" of this special coroutine
before acting. Generally, you should do this:
>>> from telethon import TelegramClient, events
>>>
>>> client = TelegramClient(...)
>>>
>>> async def main():
>>> async with client.conversation(...) as conv:
>>> response = conv.wait_event(events.NewMessage(incoming=True))
>>> await conv.send_message('Hi')
>>> response = await response
This way your event can be registered before acting,
since the response may arrive before your event was
registered. It depends on your use case since this
also means the event can arrive before you send
a previous action.
"""
start_time = time.time()
if isinstance(event, type):
event = event()
await event.resolve(self._client)
counter = Conversation._custom_counter
Conversation._custom_counter += 1
future = asyncio.Future(loop=self._client.loop)
# We need the `async def` here because we want to block on the future
# from `_get_result` by using `await` on it. If we returned the future
# immediately we would `del` from `_custom` too early.
async def result():
try:
return await self._get_result(future, start_time, timeout)
finally:
del self._custom[counter]
self._custom[counter] = (event, future)
return await result()
async def _check_custom(self, built):
for i, (ev, fut) in self._custom.items():
ev_type = type(ev)
inst = built[ev_type]
if inst and ev.filter(inst):
fut.set_result(inst)
def _on_new_message(self, response):
response = response.message
if response.chat_id != self.chat_id or response.out:
return
if len(self._incoming) == self._max_incoming:
self._cancel_all(ValueError('Too many incoming messages'))
return
self._incoming.append(response)
found = []
for msg_id in self._pending_responses:
found.append(msg_id)
self._response_indices[msg_id] = len(self._incoming)
for msg_id in found:
self._pending_responses.pop(msg_id).set_result(response)
found.clear()
for msg_id in self._pending_replies:
if msg_id == response.reply_to_msg_id:
found.append(msg_id)
self._reply_indices[msg_id] = len(self._incoming)
for msg_id in found:
self._pending_replies.pop(msg_id).set_result(response)
def _on_edit(self, message):
message = message.message
if message.chat_id != self.chat_id or message.out:
return
found = []
for msg_id, pending in self._pending_edits.items():
if msg_id < message.id:
found.append(msg_id)
edit_ts = message.edit_date.timestamp()
# We compare <= because edit_ts resolution is always to
# seconds, but we may have increased _edit_dates before.
# Since the dates are ever growing this is not a problem.
if edit_ts <= self._edit_dates.get(msg_id, 0):
self._edit_dates[msg_id] += _EDIT_COLLISION_DELTA
else:
self._edit_dates[msg_id] = message.edit_date.timestamp()
for msg_id in found:
self._pending_edits.pop(msg_id).set_result(message)
def _on_read(self, event):
if event.chat_id != self.chat_id or event.inbox:
return
self._last_read = event.max_id
remove_reads = []
for msg_id, pending in self._pending_reads.items():
if msg_id >= self._last_read:
remove_reads.append(msg_id)
pending.set_result(True)
for to_remove in remove_reads:
del self._pending_reads[to_remove]
def _get_message_id(self, message):
if message is not None: # 0 is valid but false-y, check for None
return message if isinstance(message, int) else message.id
elif self._last_outgoing:
return self._last_outgoing
else:
raise ValueError('No message was sent previously')
def _get_result(self, future, start_time, timeout):
due = self._total_due
if timeout is None:
timeout = self._timeout
if timeout is not None:
due = min(due, start_time + timeout)
return asyncio.wait_for(
future,
timeout=None if due == float('inf') else due - time.time(),
loop=self._client.loop
)
def _cancel_all(self, exception=None):
for pending in itertools.chain(
self._pending_responses.values(),
self._pending_replies.values(),
self._pending_edits.values()):
if exception:
pending.set_exception(exception)
else:
pending.cancel()
for _, fut in self._custom.values():
if exception:
fut.set_exception(exception)
else:
fut.cancel()
async def __aenter__(self):
self._input_chat = \
await self._client.get_input_entity(self._input_chat)
self._chat_peer = utils.get_peer(self._input_chat)
# Make sure we're the only conversation in this chat if it's exclusive
chat_id = utils.get_peer_id(self._chat_peer)
count = self._client._ids_in_conversations.get(chat_id, 0)
if self._exclusive and count:
raise errors.AlreadyInConversationError()
self._client._ids_in_conversations[chat_id] = count + 1
self._client._conversations[self._id] = self
self._last_outgoing = 0
self._last_incoming = 0
for d in (
self._outgoing, self._incoming,
self._pending_responses, self._pending_replies,
self._pending_edits, self._response_indices,
self._reply_indices, self._edit_dates, self._custom):
d.clear()
if self._total_timeout:
self._total_due = time.time() + self._total_timeout
else:
self._total_due = float('inf')
return self
def cancel(self):
"""Cancels the current conversation and exits the context manager."""
raise _ConversationCancelled()
async def __aexit__(self, exc_type, exc_val, exc_tb):
chat_id = utils.get_peer_id(self._chat_peer)
if self._client._ids_in_conversations[chat_id] == 1:
del self._client._ids_in_conversations[chat_id]
else:
self._client._ids_in_conversations[chat_id] -= 1
del self._client._conversations[self._id]
self._cancel_all()
return isinstance(exc_val, _ConversationCancelled)
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
class _ConversationCancelled(InterruptedError):
pass
| 6,426 | 35 | 324 |
f1a6af247fbbfca805569215362ba5bc2e531f8a | 1,142 | py | Python | tests/test_dat.py | astrogewgaw/priwo | cc3ef488b6e9a36fb5d633d6c2c6135274b2c7b7 | [
"MIT"
] | 7 | 2021-04-24T12:16:11.000Z | 2021-12-31T14:46:35.000Z | tests/test_dat.py | astrogewgaw/priwo | cc3ef488b6e9a36fb5d633d6c2c6135274b2c7b7 | [
"MIT"
] | null | null | null | tests/test_dat.py | astrogewgaw/priwo | cc3ef488b6e9a36fb5d633d6c2c6135274b2c7b7 | [
"MIT"
] | null | null | null | import numpy as np
from pathlib import Path
from deepdiff import DeepDiff
from priwo import read_dat, write_dat
from tempfile import NamedTemporaryFile
fnames = [
"test_fake_presto_radio.dat",
"test_fake_presto_radio_breaks.dat",
"test_fake_presto_xray.dat",
]
fdata = np.asarray(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
],
dtype=np.float32,
)
def test_read_dat(datadir):
"""
Test reading in a `*.dat` file.
"""
for fname in fnames:
data = read_dat(datadir.joinpath(fname))["data"]
assert DeepDiff(fdata, data) == {}
def test_write_dat(datadir):
"""
Test writing out a `*.dat` file.
"""
for fname in fnames:
with NamedTemporaryFile(suffix=".dat") as tfobj:
write_dat(
read_dat(datadir.joinpath(fname)),
Path(tfobj.name),
)
data = read_dat(tfobj.name)["data"]
assert DeepDiff(fdata, data) == {}
| 18.126984 | 56 | 0.525394 | import numpy as np
from pathlib import Path
from deepdiff import DeepDiff
from priwo import read_dat, write_dat
from tempfile import NamedTemporaryFile
fnames = [
"test_fake_presto_radio.dat",
"test_fake_presto_radio_breaks.dat",
"test_fake_presto_xray.dat",
]
fdata = np.asarray(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
],
dtype=np.float32,
)
def test_read_dat(datadir):
"""
Test reading in a `*.dat` file.
"""
for fname in fnames:
data = read_dat(datadir.joinpath(fname))["data"]
assert DeepDiff(fdata, data) == {}
def test_write_dat(datadir):
"""
Test writing out a `*.dat` file.
"""
for fname in fnames:
with NamedTemporaryFile(suffix=".dat") as tfobj:
write_dat(
read_dat(datadir.joinpath(fname)),
Path(tfobj.name),
)
data = read_dat(tfobj.name)["data"]
assert DeepDiff(fdata, data) == {}
| 0 | 0 | 0 |
8e29b3597a7d87d4a47252a7d1068099575a2c48 | 144 | py | Python | src/avame/errors.py | nickchen-mitac/fork | 64dab56012da47465b4923f30f26925476c87afc | [
"Apache-2.0"
] | null | null | null | src/avame/errors.py | nickchen-mitac/fork | 64dab56012da47465b4923f30f26925476c87afc | [
"Apache-2.0"
] | null | null | null | src/avame/errors.py | nickchen-mitac/fork | 64dab56012da47465b4923f30f26925476c87afc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
| 20.571429 | 82 | 0.75 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class Timeout(Exception):
pass
| 0 | 13 | 23 |
6b6c4918437ff01fd308871c7f88d92fe127782e | 7,590 | py | Python | offrl/base.py | dlqudwns/RepB-SDE | d799c3bbfc9aeca9251dfa84255d1c1b90af42ce | [
"MIT"
] | null | null | null | offrl/base.py | dlqudwns/RepB-SDE | d799c3bbfc9aeca9251dfa84255d1c1b90af42ce | [
"MIT"
] | null | null | null | offrl/base.py | dlqudwns/RepB-SDE | d799c3bbfc9aeca9251dfa84255d1c1b90af42ce | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
SCALE_DIAG_MIN_MAX = (-20, 2)
EPS = 1e-6
def apply_squashing_func(sample, logp):
"""
Squash the ouput of the gaussian distribution and account for that in the log probability.
:param sample: (tf.Tensor) Action sampled from Gaussian distribution
:param logp: (tf.Tensor) Log probability before squashing
"""
# Squash the output
squashed_action = tf.tanh(sample)
squashed_action_logp = logp - tf.reduce_sum(tf.log(1 - squashed_action ** 2 + 1e-6), axis=1)
# incurred by change of variable
return squashed_action, squashed_action_logp
# Simple replay buffer
| 40.15873 | 100 | 0.645059 | import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
SCALE_DIAG_MIN_MAX = (-20, 2)
EPS = 1e-6
def apply_squashing_func(sample, logp):
"""
Squash the ouput of the gaussian distribution and account for that in the log probability.
:param sample: (tf.Tensor) Action sampled from Gaussian distribution
:param logp: (tf.Tensor) Log probability before squashing
"""
# Squash the output
squashed_action = tf.tanh(sample)
squashed_action_logp = logp - tf.reduce_sum(tf.log(1 - squashed_action ** 2 + 1e-6), axis=1)
# incurred by change of variable
return squashed_action, squashed_action_logp
class OnlineReplayBuffer:
def __init__(self, state_dim, action_dim, buffer_size):
self.buffer_size = buffer_size
self.obs = np.zeros([buffer_size, state_dim])
self.action = np.zeros([buffer_size, action_dim])
self.reward = np.zeros([buffer_size, 1])
self.next_obs = np.zeros([buffer_size, state_dim])
self.done = np.zeros([buffer_size, 1])
self._pointer = 0
self.size = 0
self.buffer = [self.obs, self.action, self.reward, self.next_obs, self.done]
def add_samples(self, *samples):
num_samples = len(samples[0])
index = np.arange(self._pointer, self._pointer + num_samples) % self.buffer_size
for buf, new_samples in zip(self.buffer, samples):
assert len(new_samples) == num_samples
buf[index] = new_samples
self._pointer = (self._pointer + num_samples) % self.buffer_size
self.size = min(self.size + num_samples, self.buffer_size)
def add_sample(self, *sample):
none_sample = [np.array(each)[None] for each in sample]
self.add_samples(*none_sample)
def can_sample(self, batch_size):
return self.size >= batch_size
def sample(self, batch_size):
indices = np.random.randint(0, self.size, size=batch_size)
return [each[indices] for each in self.buffer]
def sample_obs(self, batch_size):
indices = np.random.randint(0, self.size, size=batch_size)
return self.obs[indices]
def format_for_model_training(self):
obs, action, next_obs, reward = self.obs[:self.size], \
self.action[:self.size], self.next_obs[:self.size], self.reward[:self.size]
inputs = np.concatenate([obs, action], axis=-1)
targets = np.concatenate([reward, next_obs - obs], axis=-1)
return inputs, targets
# Simple replay buffer
class OfflineReplayBuffer:
def __init__(self, obs, action, reward, next_obs, done):
self.obs, self.action, self.reward, self.next_obs, self.done \
= obs, action, reward, next_obs, done
self.obs_mean = np.mean(self.obs, axis=0, keepdims=True)
self.obs_std = np.std(self.obs, axis=0, keepdims=True) + 1e-3
self.stan_obs = self.standardizer(np.array(self.obs))
self.stan_next_obs = self.standardizer(np.array(self.next_obs))
def standardizer(self, obs):
return (obs - self.obs_mean) / self.obs_std
def unstandardizer(self, obs):
return obs * self.obs_std + self.obs_mean
def format_for_model_training(self):
inputs = np.concatenate([self.stan_obs, self.action], axis=-1)
delta_obs = self.stan_next_obs - self.stan_obs
targets = np.concatenate([np.array(self.reward)[:, None], delta_obs], axis=-1)
terminals = np.reshape(np.array(self.done), [-1, 1])
return inputs, targets, terminals
def sample(self, batch_size):
obs, action, reward, next_obs, done = [], [], [], [], []
indices = np.random.randint(0, len(self.obs), size=batch_size)
for idx in indices:
obs.append(self.obs[idx])
action.append(self.action[idx])
reward.append(self.reward[idx])
next_obs.append(self.next_obs[idx])
done.append(self.done[idx])
obs, next_obs, action = np.array(obs), np.array(next_obs), np.array(action)
obs, next_obs = self.standardizer(obs), self.standardizer(next_obs)
return obs, action, np.array(reward)[:, None], next_obs, np.array(done)[:, None]
def sample_obs(self, batch_size):
indices = np.random.randint(0, len(self.obs), size=batch_size)
obs = [self.obs[idx] for idx in indices]
return self.standardizer(np.array(obs))
class SquahedGaussianActor(tf.keras.layers.Layer):
def __init__(self, action_dim, hidden_dim=256):
super(SquahedGaussianActor, self).__init__()
self.action_dim = action_dim
# Actor parameters
self.a_l0 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='a/f0')
self.a_l1 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='a/f1')
self.a_l2_mu = tf.keras.layers.Dense(action_dim, name='a/f2_mu')
self.a_l2_log_std = tf.keras.layers.Dense(action_dim, name='a/f2_log_std')
def feedforward(self, obs):
h = self.a_l0(obs)
h = self.a_l1(h)
mean = self.a_l2_mu(h)
log_std = self.a_l2_log_std(h)
std = tf.exp(tf.clip_by_value(log_std, *SCALE_DIAG_MIN_MAX))
return mean, std
def call(self, inputs, **_):
obs, = inputs
mean, std = self.feedforward(obs)
dist = tfp.distributions.MultivariateNormalDiag(mean, std)
dist.shape = mean.shape
sampled_action = dist.sample()
sampled_action_logp = dist.log_prob(sampled_action)
squahsed_action, squahsed_action_logp = \
apply_squashing_func(sampled_action, sampled_action_logp)
deterministic_action, _ = \
apply_squashing_func(mean, dist.log_prob(mean))
return deterministic_action, squahsed_action, squahsed_action_logp, dist
def nlogp(self, dist, action):
''' negative logp of unnormalized action '''
before_squahed_action = tf.atanh(
tf.clip_by_value(action, -1 + EPS, 1 - EPS))
log_likelihood = dist.log_prob(before_squahed_action)
log_likelihood -= tf.reduce_sum(
tf.log(1 - action ** 2 + EPS), axis=1)
return -tf.reduce_mean(log_likelihood)
class VNetwork(tf.keras.layers.Layer):
def __init__(self, output_dim=1, hidden_dim=64):
super(VNetwork, self).__init__()
self.v_l0 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='v/f0')
self.v_l1 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='v/f1')
self.v_l2 = tf.keras.layers.Dense(output_dim, name='v/f2')
def call(self, inputs, **_):
obs, = inputs
h = self.v_l0(obs)
h = self.v_l1(h)
return self.v_l2(h)
class QNetwork(tf.keras.layers.Layer):
def __init__(self, output_dim=1, num_critics=2, hidden_dim=64):
super(QNetwork, self).__init__()
self.num_critics = num_critics
self.qs_l0, self.qs_l1, self.qs_l2 = [], [], []
for i in range(self.num_critics):
self.qs_l0.append(tf.keras.layers.Dense(hidden_dim, activation='relu', name=f'q{i}/f0'))
self.qs_l1.append(tf.keras.layers.Dense(hidden_dim, activation='relu', name=f'q{i}/f1'))
self.qs_l2.append(tf.keras.layers.Dense(output_dim, name=f'q{i}/f2'))
def call(self, inputs, **_):
obs, action = inputs
obs_action = tf.concat([obs, action], axis=1)
outputs = []
for i in range(self.num_critics):
h = self.qs_l0[i](obs_action)
h = self.qs_l1[i](h)
outputs.append(self.qs_l2[i](h))
return outputs
| 5,796 | 542 | 573 |
9bc0c380b53f320d7ac6cd4ede7d18d860040a4b | 3,826 | py | Python | dominio/mixins.py | MinisterioPublicoRJ/api-cadg | a8998c4c234a65192f1dca8ea9a17a1d4a496556 | [
"MIT"
] | 6 | 2020-02-11T18:45:58.000Z | 2020-05-26T12:37:28.000Z | dominio/mixins.py | MinisterioPublicoRJ/api-cadg | a8998c4c234a65192f1dca8ea9a17a1d4a496556 | [
"MIT"
] | 120 | 2019-07-01T14:45:32.000Z | 2022-01-25T19:10:16.000Z | dominio/mixins.py | MinisterioPublicoRJ/apimpmapas | 196ad25a4922448b8ae7a66012a2843c7b7194ad | [
"MIT"
] | null | null | null | import math
import unicodedata
from decouple import config
from django.conf import settings
from django.core.paginator import EmptyPage, Paginator
from django.http import HttpResponseForbidden
from django.views.decorators.cache import cache_page
from jwt import InvalidSignatureError, DecodeError
from login.jwtlogin import unpack_jwt
| 29.658915 | 79 | 0.602196 | import math
import unicodedata
from decouple import config
from django.conf import settings
from django.core.paginator import EmptyPage, Paginator
from django.http import HttpResponseForbidden
from django.views.decorators.cache import cache_page
from jwt import InvalidSignatureError, DecodeError
from login.jwtlogin import unpack_jwt
class PaginatorMixin:
PAGE_SIZE = None
def get_n_pages(self, data):
return math.ceil(len(data)/self.PAGE_SIZE)
def paginate(self, model_response, page, page_size):
paginator = Paginator(model_response, page_size)
try:
page_data = paginator.page(page).object_list
except EmptyPage:
page_data = []
return page_data
class CacheMixin:
cache_timeout = None
cache_config = None
def __getattr__(self, key):
if key != "cache_key":
raise AttributeError
class_name = self.__class__.__name__
return ''.join(
[f'_{l.lower()}' if l.isupper() and i else l.lower()
for i, l in enumerate(class_name)]
)
def get_timeout(self):
timeout = settings.CACHE_TIMEOUT
if self.cache_timeout is not None:
return self.cache_timeout
if self.cache_config is not None:
timeout = config(
self.cache_config,
cast=int,
default=settings.CACHE_TIMEOUT
)
return timeout
def dispatch(self, request, *args, **kwargs):
return cache_page(
self.get_timeout(),
key_prefix=self.cache_key
)(super().dispatch)(request, *args, **kwargs)
class JWTAuthMixin:
orgao_url_kwarg = "orgao_id"
def authorize_user_in_orgao(self, token_payload, *args, **kwargs):
# TODO: nos deveríamos aceitar POST de um admin para qualquer órgão?
is_admin = token_payload.get("tipo_permissao", "regular") == "admin"
orgaos_validos = token_payload.get("ids_orgaos_lotados_validos", [])
orgao_payload = token_payload.get("orgao")
orgao_payload = int(orgao_payload) if orgao_payload else None
orgaos = (
[int(o) for o in orgaos_validos]
+ [orgao_payload]
)
orgao_url = kwargs.get(self.orgao_url_kwarg)
orgao_url = int(orgao_url) if orgao_url else orgao_url
return (
is_admin
or orgao_url in orgaos
or not orgao_url
)
def dispatch(self, request, *args, **kwargs):
try:
self.token_payload = unpack_jwt(request)
if self.authorize_user_in_orgao(
self.token_payload,
*args,
**kwargs
):
return super().dispatch(request, *args, **kwargs)
else:
return HttpResponseForbidden()
except (InvalidSignatureError, DecodeError):
return HttpResponseForbidden()
class SearchMixin:
search_fields = None
def normalize_string(self, s):
if not isinstance(s, str):
return ''
return unicodedata.normalize('NFD', s)\
.encode('ascii', 'ignore')\
.decode('utf-8')\
.lower()
def string_in_fields(self, s, x):
for f in self.search_fields:
try:
if s in self.normalize_string(x[f]):
return True
except Exception:
continue
return False
def search(self, search_string, data):
if data and not self.search_fields:
self.search_fields = list(data[0].keys())
if search_string:
search_string = self.normalize_string(search_string)
data = [x for x in data if self.string_in_fields(search_string, x)]
return data
| 3,008 | 389 | 92 |
088dfd54f1787f73dd612012fa9319fa21c0177a | 4,746 | py | Python | unittests/test_history.py | fraca7/pyqtcmd | 7648ab030cbcb308b451f8b34b022b523a08229f | [
"MIT"
] | null | null | null | unittests/test_history.py | fraca7/pyqtcmd | 7648ab030cbcb308b451f8b34b022b523a08229f | [
"MIT"
] | null | null | null | unittests/test_history.py | fraca7/pyqtcmd | 7648ab030cbcb308b451f8b34b022b523a08229f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import unittest
from pyqtcmd import History, Command, ConsistencyError
if __name__ == '__main__':
unittest.main()
| 25.793478 | 54 | 0.665402 | #!/usr/bin/env python3
import unittest
from pyqtcmd import History, Command, ConsistencyError
class DummyCommand(Command):
def __init__(self):
self.done = False
def do(self):
self.done = True
def undo(self):
self.done = False
class HistoryBaseTestCase(unittest.TestCase):
def setUp(self):
self.history = History()
self.changed = 0
self.history.changed.connect(self._changed)
def tearDown(self):
self.history.changed.disconnect(self._changed)
def _changed(self):
self.changed += 1
class HistoryInitialTestCase(HistoryBaseTestCase):
def test_cannot_undo(self):
self.assertFalse(self.history.can_undo())
def test_cannot_redo(self):
self.assertFalse(self.history.can_redo())
def test_is_not_modified(self):
self.assertFalse(self.history.is_modified())
def test_undo_raises(self):
with self.assertRaises(ConsistencyError):
self.history.undo()
def test_redo_raises(self):
with self.assertRaises(ConsistencyError):
self.history.redo()
class HistoryPastTestCase(HistoryBaseTestCase):
def setUp(self):
super().setUp()
self.cmd = DummyCommand()
self.history.run(self.cmd)
def test_command_done(self):
self.assertTrue(self.cmd.done)
def test_command_can_undo(self):
self.assertTrue(self.history.can_undo())
def test_command_cannot_redo(self):
self.assertFalse(self.history.can_redo())
def test_command_changed(self):
self.assertEqual(self.changed, 1)
def test_command_is_modified(self):
self.assertTrue(self.history.is_modified())
class HistoryFutureTestCase(HistoryBaseTestCase):
def setUp(self):
super().setUp()
self.cmd = DummyCommand()
self.history.run(self.cmd)
self.history.undo()
def test_command_undone(self):
self.assertFalse(self.cmd.done)
def test_command_cannot_undo(self):
self.assertFalse(self.history.can_undo())
def test_command_can_redo(self):
self.assertTrue(self.history.can_redo())
def test_command_changed(self):
self.assertEqual(self.changed, 2)
def test_command_is_not_modified(self):
self.assertFalse(self.history.is_modified())
def test_redo_done(self):
self.history.redo()
self.assertTrue(self.cmd.done)
def test_redo_can_undo(self):
self.history.redo()
self.assertTrue(self.history.can_undo())
def test_redo_cannot_redo(self):
self.history.redo()
self.assertFalse(self.history.can_redo())
def test_redo_changed(self):
self.history.redo()
self.assertEqual(self.changed, 3)
def test_redo_is_modified(self):
self.history.redo()
self.assertTrue(self.history.is_modified())
class HistorySaveTestCase(HistoryBaseTestCase):
def setUp(self):
super().setUp()
self.cmd1 = DummyCommand()
self.cmd2 = DummyCommand()
self.history.run(self.cmd1)
self.history.save_point()
self.history.run(self.cmd2)
def test_is_modified(self):
self.assertTrue(self.history.is_modified())
def test_undo_is_not_modified(self):
self.history.undo()
self.assertFalse(self.history.is_modified())
def test_undo_twice_is_modified(self):
self.history.undo()
self.history.undo()
self.assertTrue(self.history.is_modified())
class HistoryResetNewTestCase(HistoryBaseTestCase):
def setUp(self):
super().setUp()
self.history.run(DummyCommand())
self.history.run(DummyCommand())
self.history.undo()
self.changed = 0
self.history.reset()
def test_changed(self):
self.assertEqual(self.changed, 1)
def test_is_not_modified(self):
self.assertFalse(self.history.is_modified())
def test_cannot_undo(self):
self.assertFalse(self.history.can_undo())
def test_cannot_redo(self):
self.assertFalse(self.history.can_redo())
class HistoryResetNotNewTestCase(HistoryBaseTestCase):
def setUp(self):
super().setUp()
self.history.run(DummyCommand())
self.history.run(DummyCommand())
self.history.undo()
self.changed = 0
self.history.reset(is_new=False)
def test_changed(self):
self.assertEqual(self.changed, 1)
def test_is_modified(self):
self.assertTrue(self.history.is_modified())
def test_cannot_undo(self):
self.assertFalse(self.history.can_undo())
def test_cannot_redo(self):
self.assertFalse(self.history.can_redo())
if __name__ == '__main__':
unittest.main()
| 3,080 | 203 | 1,310 |
54fc1cb85e499884e6925b7ab89f59efd44ce1c8 | 519 | py | Python | semana_01/aulas/05-formata_strings.py | alexaldr/AceleraDev-Python | f655b31f5d672f617c43282dc91a19f761845f84 | [
"MIT"
] | 5 | 2020-05-19T17:12:02.000Z | 2020-05-31T16:19:04.000Z | semana-1/arquivos-aulas/05-formata_strings.py | lucashenrs/acelerapython-codenation | 6335d994aef6d878a89d5504c68e7eceea8984cb | [
"MIT"
] | 23 | 2021-03-19T04:59:51.000Z | 2022-02-10T15:15:12.000Z | semana_01/aulas/05-formata_strings.py | alexaldr/AceleraDev-Python | f655b31f5d672f617c43282dc91a19f761845f84 | [
"MIT"
] | 1 | 2020-07-29T17:56:12.000Z | 2020-07-29T17:56:12.000Z | idade = 29
print('Minha idade é: ' + str(idade))
print('Minha idade é: {}'.format(idade))
print(f'Minha idade é: {idade}')
nome = 'Élysson MR cdoiksabncdsaicbsdaoin dsaucubsdaocpiknbsdaoiyvcsdaopikbncsdaiyvbcds'
print(f'Meu nome é {nome:.15} e eu tenho {idade:03} anos')
dinheiro = 2.598
print(f'Eu tenho {dinheiro:.2f} R$')
lista_itens = ['Garfo', 'Faca', 'copo', 'Prato']
print(f'Eu almoço com {lista_itens[0]} e {lista_itens[1]} no {lista_itens[-1]}')
print(f'Eu terei {idade + 30} anos daqui a 30 anos')
| 22.565217 | 88 | 0.685934 | idade = 29
print('Minha idade é: ' + str(idade))
print('Minha idade é: {}'.format(idade))
print(f'Minha idade é: {idade}')
nome = 'Élysson MR cdoiksabncdsaicbsdaoin dsaucubsdaocpiknbsdaoiyvcsdaopikbncsdaiyvbcds'
print(f'Meu nome é {nome:.15} e eu tenho {idade:03} anos')
dinheiro = 2.598
print(f'Eu tenho {dinheiro:.2f} R$')
lista_itens = ['Garfo', 'Faca', 'copo', 'Prato']
print(f'Eu almoço com {lista_itens[0]} e {lista_itens[1]} no {lista_itens[-1]}')
print(f'Eu terei {idade + 30} anos daqui a 30 anos')
| 0 | 0 | 0 |
7d1cee70fe047374d76b7ebbe6bcabae1994465c | 1,692 | py | Python | qcs_api_client/models/edge.py | rigetti/qcs-api-client-python | 569cb9fa972dec1a706757374acb3df3ce649ec4 | [
"Apache-2.0"
] | 2 | 2021-12-15T23:24:40.000Z | 2022-01-03T01:14:17.000Z | qcs_api_client/models/edge.py | rigetti/qcs-api-client-python | 569cb9fa972dec1a706757374acb3df3ce649ec4 | [
"Apache-2.0"
] | 3 | 2022-01-10T21:55:13.000Z | 2022-02-22T19:41:01.000Z | qcs_api_client/models/edge.py | rigetti/qcs-api-client-python | 569cb9fa972dec1a706757374acb3df3ce649ec4 | [
"Apache-2.0"
] | 3 | 2021-12-15T23:24:42.000Z | 2022-01-09T11:16:17.000Z | from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, cast
import attr
from ..types import UNSET
from ..util.serialization import is_not_none
T = TypeVar("T", bound="Edge")
@attr.s(auto_attribs=True)
class Edge:
"""A degree-two logical connection in the quantum processor's architecture.
The existence of an edge in the ISA `Architecture` does not necessarily mean that a given 2Q
operation will be available on the edge. This information is conveyed by the presence of the
two `node_id` values in instances of `Instruction`.
Note that edges are undirected in this model. Thus edge :math:`(a, b)` is equivalent to edge
:math:`(b, a)`.
Attributes:
node_ids (List[int]): The integer ids of the computational nodes at the two ends of the edge. Order is not
important; an architecture edge is treated as undirected.
"""
node_ids: List[int]
@classmethod
| 30.763636 | 118 | 0.631797 | from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, cast
import attr
from ..types import UNSET
from ..util.serialization import is_not_none
T = TypeVar("T", bound="Edge")
@attr.s(auto_attribs=True)
class Edge:
"""A degree-two logical connection in the quantum processor's architecture.
The existence of an edge in the ISA `Architecture` does not necessarily mean that a given 2Q
operation will be available on the edge. This information is conveyed by the presence of the
two `node_id` values in instances of `Instruction`.
Note that edges are undirected in this model. Thus edge :math:`(a, b)` is equivalent to edge
:math:`(b, a)`.
Attributes:
node_ids (List[int]): The integer ids of the computational nodes at the two ends of the edge. Order is not
important; an architecture edge is treated as undirected.
"""
node_ids: List[int]
def to_dict(self, pick_by_predicate: Optional[Callable[[Any], bool]] = is_not_none) -> Dict[str, Any]:
node_ids = self.node_ids
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"node_ids": node_ids,
}
)
field_dict = {k: v for k, v in field_dict.items() if v != UNSET}
if pick_by_predicate is not None:
field_dict = {k: v for k, v in field_dict.items() if pick_by_predicate(v)}
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
node_ids = cast(List[int], d.pop("node_ids"))
edge = cls(
node_ids=node_ids,
)
return edge
| 691 | 0 | 53 |
fde4fd856e1de3d079eb519cfbb73d0ba8fb622e | 838 | py | Python | problems/problem_0268.py | delirious-lettuce/LeetCode | 4fbf2627f86cbaf1054c3df59bb9ffe72096d405 | [
"MIT"
] | 3 | 2018-02-14T23:50:07.000Z | 2022-01-20T11:34:42.000Z | problems/problem_0268.py | delirious-lettuce/LeetCode | 4fbf2627f86cbaf1054c3df59bb9ffe72096d405 | [
"MIT"
] | null | null | null | problems/problem_0268.py | delirious-lettuce/LeetCode | 4fbf2627f86cbaf1054c3df59bb9ffe72096d405 | [
"MIT"
] | null | null | null | class Solution:
"""
https://leetcode.com/problems/missing-number/
Given an array containing n distinct numbers taken from
0, 1, 2, ..., n, find the one that is missing from the array.
Example 1
Input: [3,0,1]
Output: 2
Example 2
Input: [9,6,4,2,3,5,7,0,1]
Output: 8
Note:
Your algorithm should run in linear runtime complexity. Could you
implement it using only constant extra space complexity?
"""
@staticmethod
def missingNumber(nums):
"""
:type nums: List[int]
:rtype: int
"""
# # Version 1
# n = len(nums)
# return n * (n + 1) / 2 - sum(nums)
# Version 2
result = 0
for a, b in zip(nums, range(len(nums))):
result ^= a ^ b
return result ^ len(nums)
| 21.487179 | 69 | 0.538186 | class Solution:
"""
https://leetcode.com/problems/missing-number/
Given an array containing n distinct numbers taken from
0, 1, 2, ..., n, find the one that is missing from the array.
Example 1
Input: [3,0,1]
Output: 2
Example 2
Input: [9,6,4,2,3,5,7,0,1]
Output: 8
Note:
Your algorithm should run in linear runtime complexity. Could you
implement it using only constant extra space complexity?
"""
@staticmethod
def missingNumber(nums):
"""
:type nums: List[int]
:rtype: int
"""
# # Version 1
# n = len(nums)
# return n * (n + 1) / 2 - sum(nums)
# Version 2
result = 0
for a, b in zip(nums, range(len(nums))):
result ^= a ^ b
return result ^ len(nums)
| 0 | 0 | 0 |
542cdd5714446a55eb248c70b1478b75afd1c092 | 8,312 | py | Python | ParsersClasses/SortParser.py | UFRGS-CAROL/radiation-benchmarks-parsers | a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b | [
"Apache-2.0"
] | null | null | null | ParsersClasses/SortParser.py | UFRGS-CAROL/radiation-benchmarks-parsers | a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b | [
"Apache-2.0"
] | null | null | null | ParsersClasses/SortParser.py | UFRGS-CAROL/radiation-benchmarks-parsers | a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b | [
"Apache-2.0"
] | null | null | null | import re
from Parser import Parser
import csv
| 37.273543 | 149 | 0.520693 | import re
from Parser import Parser
import csv
class SortParser(Parser):
_extendHeader = False
# must set this values in the _relativeErrorParser
_errOutOfOrder = None
_errCorrupted = None
_errLink = None
_errSync = None
_errDetected = None
_itOOO = None
_itCorrupted = None
_itSync = None
_itOOOCorr = None
_itSyncCorr = None
_itSyncOOO = None
_itLink = None
_itLinkOOO = None
_itLinkSync = None
_itLinkCorr = None
_itMultiple = None
_balanceMismatches = None
_hardDetec = None
_itHardDetec = None
_csvHeader = ['logFileName', 'Machine', 'Benchmark', 'Header',
'SDC', 'LOGGED_ERRORS', 'ACC_ERR', # 'ACC_TIME',
'ERR_OUTOFORDER', 'ERR_CORRUPTED', 'ERR_LINK', 'ERR_SYNC',
'IT_OOO', 'IT_CORRUPTED', 'IT_LINK', 'IT_SYNC',
'IT_OOO_CORR', 'IT_SYNC_CORR', 'IT_SYNC_OOO', 'IT_LINK_OOO',
'IT_LINK_CORR', 'IT_LINK_SYNC', 'IT_MULTIPLE', 'BALANCE_MISMATCHES',
'HARD_DETEC', 'IT_HARD_DETEC']
def localityParser(self):
pass
def jaccardCoefficient(self):
pass
"""
no return
this method will be called by parent classes,
so it needs only be adjusted to write the final content to self._outputListError
this method will be clalled in every SDC processing
"""
def _placeOutputOnList(self):
# 'logFileName', 'Machine', 'Benchmark', 'Header',
self._outputListError = [self._logFileName, self._machine, self._benchmark, self._header,
# 'SDC', 'LOGGED_ERRORS', 'ACC_ERR', 'ACC_TIME',
self._sdcIteration, self._iteErrors, self._accIteErrors,
# 'ERR_OUTOFORDER', 'ERR_CORRUPTED', 'ERR_LINK', 'ERR_SYNC',
self._errOutOfOrder, self._errCorrupted, self._errLink, self._errSync,
# 'IT_OOO', 'IT_CORRUPTED', 'IT_LINK', 'IT_SYNC',
self._itOOO, self._itCorrupted, self._itLink, self._itSync,
# 'IT_OOO_CORR', 'IT_SYNC_CORR', 'IT_SYNC_OOO', 'IT_LINK_OOO',
self._itOOOCorr, self._itSyncCorr, self._itSyncOOO, self._itLinkOOO,
# 'IT_LINK_CORR', 'IT_LINK_SYNC', 'IT_MULTIPLE', 'BALANCE_MISMATCHES',
self._itLinkCorr, self._itLinkSync, self._itMultiple, self._balanceMismatches,
# 'HARD_DETEC', 'IT_HARD_DETEC'
self._hardDetec, self._itHardDetec
]
"""
errString = ERR or INF string
return
a dict with: inf or err indexes, if err is set it could be
not_ordered, histogram_diff or link_key
"""
def parseErrMethod(self, errString):
ret = {}
if "INF" in errString:
ret['inf'] = 1
m = re.match(".*INF.*", errString)
if m:
ret['inf_err_string'] = errString
elif "ERR" in errString:
ret['err'] = 1
m = re.match(".*ERR.*\Elements not ordered.*index=(\d+) ([0-9\-]+)\>([0-9\-]+)", errString)
if m:
ret['not_ordered'] = 1
m = re.match(".*ERR.*\The histogram from element ([0-9\-]+) differs.*srcHist=(\d+) dstHist=(\d+)",
errString)
if m:
ret['histogram_diff'] = 1
ret['m_value'] = m
# ERR The link between Val and Key arrays in incorrect. index=2090080
# wrong_key=133787990 val=54684 correct_key_pointed_by_val=-1979613866
m = re.match(".*ERR.*\The link between Val and Key arrays in incorrect.*", errString)
if m:
ret['link_key'] = 1
return ret if len(ret) > 0 else None
"""
errList = list of dicts parsed by parseErrMethod
no return
set the class atributes which will be write by _writeToCSV method
"""
def _relativeErrorParser(self, errList):
if len(errList) < 1:
return
# from caio's parser
balance = 0
parsed_errors = 0
balance_mismatches = 0
err_counters = [0, 0, 0, 0, 0] # outoforder, corrupted, link error, sync/analisys error
it_err_counters = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0] # outoforder, corrupted, link error, sync/analisys error, ooo and corrupted,
# sync and corrupted, sync and ooo, link and corruption, link and ooo, link and sync, 3+ combinations
it_flags = [0, 0, 0, 0]
for i in errList:
if 'inf' in i:
# if inf_flag == 0:
# inf_flag = 1
it_err_counters[11] += 1
err_counters[4] += 1
elif 'err' in i:
if 'not_ordered' in i:
err_counters[0] += 1
it_flags[0] = 1
parsed_errors += 1
if 'histogram_diff' in i:
m = i['m_value']
if (int(m.group(2)) >= 32) or (((int(m.group(3)) - int(m.group(2))) >= 32) and (
((int(m.group(3)) - int(m.group(2))) % 2) == 0)):
err_counters[3] += 1
it_flags[3] = 1
parsed_errors += 1
# print(">>>>Warning: Ignoring element corruption - Element: ", m.group(1), "srcHist: ", m.group(2), "dstHist: ", m.group(3))
else:
err_counters[1] += 1
it_flags[1] = 1
parsed_errors += 1
balance += int(m.group(2)) - int(m.group(3))
if 'link_key' in i:
err_counters[2] += 1
it_flags[2] = 1
parsed_errors += 1
if balance != 0:
# print(">>>Warning: Balance is wrong:", balance)
balance_mismatches += 1
# balance = 0
err_type_count = 0
for flag in it_flags:
if flag != 0:
err_type_count += 1
if err_type_count >= 3:
it_err_counters[10] += 1 # more than 3 types of errors
for f in range(len(it_flags)):
it_flags[f] = 0
if it_flags[0] and it_flags[1]:
it_err_counters[4] += 1 # ooo and corrupted
it_flags[0] = 0
it_flags[1] = 0
if it_flags[0] and it_flags[3]:
it_err_counters[5] += 1 # sync and corrupted
it_flags[0] = 0
it_flags[3] = 0
if it_flags[1] and it_flags[3]:
it_err_counters[6] += 1 # sync and ooo
it_flags[1] = 0
it_flags[3] = 0
if it_flags[2] and it_flags[0]:
it_err_counters[7] += 1 # link and ooo
it_flags[2] = 0
it_flags[0] = 0
if it_flags[2] and it_flags[1]:
it_err_counters[8] += 1 # link and corrupted
it_flags[2] = 0
it_flags[1] = 0
if it_flags[2] and it_flags[3]:
it_err_counters[9] += 1 # link and sync
it_flags[2] = 0
it_flags[3] = 0
for f in range(len(it_flags)):
if it_flags[f]:
it_err_counters[f] += 1
it_flags[f] = 0
self._errOutOfOrder = err_counters[0]
self._errCorrupted = err_counters[1]
self._errLink = err_counters[2]
self._errSync = err_counters[3]
self._itOOO = it_err_counters[0]
self._itCorrupted = it_err_counters[1]
self._itLink = it_err_counters[2]
self._itSync = it_err_counters[3]
self._itOOOCorr = it_err_counters[4]
self._itSyncCorr = it_err_counters[5]
self._itSyncOOO = it_err_counters[6]
self._itLinkOOO = it_err_counters[7]
self._itLinkCorr = it_err_counters[8]
self._itLinkSync = it_err_counters[9]
self._itMultiple = it_err_counters[10]
self._balanceMismatches = balance_mismatches
self._hardDetec = err_counters[4]
self._itHardDetec = it_err_counters[11]
| 6,548 | 1,693 | 23 |
eaba22f48d9843e48f5005a0b916bbec79ddb699 | 25 | py | Python | packages/raspi_ip/raspi_ip/__init__.py | atoy322/PiDrive | 8758f4b5dae4a0187ce0a769c4146628c88015de | [
"MIT"
] | null | null | null | packages/raspi_ip/raspi_ip/__init__.py | atoy322/PiDrive | 8758f4b5dae4a0187ce0a769c4146628c88015de | [
"MIT"
] | 2 | 2021-09-21T06:32:58.000Z | 2021-09-22T23:15:18.000Z | packages/raspi_ip/raspi_ip/__init__.py | atoy322/PiDrive | 8758f4b5dae4a0187ce0a769c4146628c88015de | [
"MIT"
] | null | null | null | from .raspi_ip import IP
| 12.5 | 24 | 0.8 | from .raspi_ip import IP
| 0 | 0 | 0 |
ac4d125ce541406a626cb354c5f68c7526fe5daf | 1,371 | py | Python | rising/transforms/functional/channel.py | NKPmedia/rising | 2a580e9c74c8fb690e27e8bacf09ab97184ab1ee | [
"MIT"
] | 1 | 2020-11-10T11:03:33.000Z | 2020-11-10T11:03:33.000Z | rising/transforms/functional/channel.py | NKPmedia/rising | 2a580e9c74c8fb690e27e8bacf09ab97184ab1ee | [
"MIT"
] | null | null | null | rising/transforms/functional/channel.py | NKPmedia/rising | 2a580e9c74c8fb690e27e8bacf09ab97184ab1ee | [
"MIT"
] | null | null | null | from typing import Optional
import torch
from rising.ops import torch_one_hot
__all__ = ["one_hot_batch"]
def one_hot_batch(target: torch.Tensor,
num_classes: Optional[int] = None,
dtype: Optional[torch.dtype] = None) -> torch.Tensor:
"""
Compute one hot for input tensor (assumed to a be batch and thus saved
into first dimension -> input should only have one channel)
Args:
target: long tensor to be converted
num_classes: number of classes.
If :attr:`num_classes` is None, the maximum of target is used
dtype: optionally changes the dtype of the onehot encoding
Returns:
torch.Tensor: one hot encoded tensor
"""
if target.dtype != torch.long:
raise TypeError(
f"Target tensor needs to be of type torch.long, found {target.dtype}")
if target.ndim in [0, 1]:
return torch_one_hot(target, num_classes)
else:
if num_classes is None:
num_classes = int(target.max().detach().item() + 1)
_dtype, device, shape = target.dtype, target.device, target.shape
if dtype is None:
dtype = _dtype
target_onehot = torch.zeros(shape[0], num_classes, *shape[2:],
dtype=dtype, device=device)
return target_onehot.scatter_(1, target, 1.0)
| 34.275 | 82 | 0.624362 | from typing import Optional
import torch
from rising.ops import torch_one_hot
__all__ = ["one_hot_batch"]
def one_hot_batch(target: torch.Tensor,
num_classes: Optional[int] = None,
dtype: Optional[torch.dtype] = None) -> torch.Tensor:
"""
Compute one hot for input tensor (assumed to a be batch and thus saved
into first dimension -> input should only have one channel)
Args:
target: long tensor to be converted
num_classes: number of classes.
If :attr:`num_classes` is None, the maximum of target is used
dtype: optionally changes the dtype of the onehot encoding
Returns:
torch.Tensor: one hot encoded tensor
"""
if target.dtype != torch.long:
raise TypeError(
f"Target tensor needs to be of type torch.long, found {target.dtype}")
if target.ndim in [0, 1]:
return torch_one_hot(target, num_classes)
else:
if num_classes is None:
num_classes = int(target.max().detach().item() + 1)
_dtype, device, shape = target.dtype, target.device, target.shape
if dtype is None:
dtype = _dtype
target_onehot = torch.zeros(shape[0], num_classes, *shape[2:],
dtype=dtype, device=device)
return target_onehot.scatter_(1, target, 1.0)
| 0 | 0 | 0 |
36f0c59e08e6d5af57d119b9412a2304554292ca | 307 | py | Python | music/distance/aural/simple.py | jedhsu/music | dea68c4a82296cd4910e786f533b2cbf861377c3 | [
"MIT"
] | null | null | null | music/distance/aural/simple.py | jedhsu/music | dea68c4a82296cd4910e786f533b2cbf861377c3 | [
"MIT"
] | null | null | null | music/distance/aural/simple.py | jedhsu/music | dea68c4a82296cd4910e786f533b2cbf861377c3 | [
"MIT"
] | null | null | null | """
*Simple Aural Interval*
An interval that is less than equal to an octave.
Simple / compound intervals partition by ordering.
"""
from abc import ABCMeta
from ._interval import AuralInterval
__all__ = ["SimpleInterval"]
| 13.954545 | 52 | 0.716612 | """
*Simple Aural Interval*
An interval that is less than equal to an octave.
Simple / compound intervals partition by ordering.
"""
from abc import ABCMeta
from ._interval import AuralInterval
__all__ = ["SimpleInterval"]
class SimpleInterval(
Interval,
):
__metaclass__ = ABCMeta
| 0 | 45 | 23 |
7977ac24bcafeb338b85e0748caeb55e887c5ea1 | 1,522 | py | Python | test/test_add_contact.py | ivangrebeniuk/python_training | bcf459539556d7d7152f7891e0d65bb744b4b475 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | ivangrebeniuk/python_training | bcf459539556d7d7152f7891e0d65bb744b4b475 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | ivangrebeniuk/python_training | bcf459539556d7d7152f7891e0d65bb744b4b475 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact
import pytest
import random
import string
testdata = [Contact(first_name=random_string("first name", 10), middle_name=random_string("Middle name", 10), last_name=random_string("Last name", 10), nickname=random_string("Nickname", 10), title=random_string("Title", 5), company=random_string("Company", 15), address=random_string("Address", 20), home_phone=random_string("Home", 10),
mobile_phone=random_string("Mobile", 12), work_phone=random_string("Work", 15), fax=random_string("Fax", 10), email=random_string("Email", 20), email2=random_string("Email2", 20),
email3="", home_page="", b_day="5", b_month="February", b_year="1984", a_day="8", a_month="October", a_year="1999",
secondary_address=random_string("Addres2", 20), phone2=random_string("Phone2", 10), note=random_string("Note", 20)) for i in range(2)]
@pytest.mark.parametrize("contact", testdata, ids=[repr(x) for x in testdata])
| 58.538462 | 338 | 0.676084 | # -*- coding: utf-8 -*-
from model.contact import Contact
import pytest
import random
import string
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " " * 5
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(first_name=random_string("first name", 10), middle_name=random_string("Middle name", 10), last_name=random_string("Last name", 10), nickname=random_string("Nickname", 10), title=random_string("Title", 5), company=random_string("Company", 15), address=random_string("Address", 20), home_phone=random_string("Home", 10),
mobile_phone=random_string("Mobile", 12), work_phone=random_string("Work", 15), fax=random_string("Fax", 10), email=random_string("Email", 20), email2=random_string("Email2", 20),
email3="", home_page="", b_day="5", b_month="February", b_year="1984", a_day="8", a_month="October", a_year="1999",
secondary_address=random_string("Addres2", 20), phone2=random_string("Phone2", 10), note=random_string("Note", 20)) for i in range(2)]
@pytest.mark.parametrize("contact", testdata, ids=[repr(x) for x in testdata])
def test_add_contact(app, contact):
#old_contacts = app.contact.get_contact_list()
app.contact.add_new_contact(contact)
#new_contacts = app.contact.get_contact_list()
#assert len(old_contacts) + 1 == len(new_contacts)
| 402 | 0 | 45 |
28d567f2f90d422a7180e2b2898eb93681b8f4f1 | 1,049 | py | Python | No choice line edit.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | No choice line edit.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | No choice line edit.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Скрипт показывает пример ограничение ввода только тем текстом, что был указан.
"""
from PyQt5.QtWidgets import *
if __name__ == '__main__':
app = QApplication([])
w = Widget()
w.show()
app.exec()
| 21.854167 | 82 | 0.673975 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Скрипт показывает пример ограничение ввода только тем текстом, что был указан.
"""
from PyQt5.QtWidgets import *
class Widget(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('No choice')
self.line_edit_no_choice = QLineEdit()
self.line_edit_no_choice.textEdited.connect(self.on_text_edited_no_choice)
self.line_edit_source = QLineEdit('Я придурок')
self.line_edit_source.textEdited.connect(self.line_edit_no_choice.clear)
layout = QFormLayout()
layout.addRow('Скажи:', self.line_edit_source)
layout.addWidget(self.line_edit_no_choice)
self.setLayout(layout)
self.line_edit_no_choice.setFocus()
def on_text_edited_no_choice(self, text):
text = self.line_edit_source.text()[:len(text)]
self.line_edit_no_choice.setText(text)
if __name__ == '__main__':
app = QApplication([])
w = Widget()
w.show()
app.exec()
| 688 | 1 | 76 |
edf44f21f459c6ec4d199865de2cbb5e5d8dd291 | 2,091 | py | Python | run_neutronics.py | RemDelaporteMathurin/T_transport_LIBRA | 5002c3f670f85053751026cfa8079188cd4eb7ab | [
"MIT"
] | null | null | null | run_neutronics.py | RemDelaporteMathurin/T_transport_LIBRA | 5002c3f670f85053751026cfa8079188cd4eb7ab | [
"MIT"
] | null | null | null | run_neutronics.py | RemDelaporteMathurin/T_transport_LIBRA | 5002c3f670f85053751026cfa8079188cd4eb7ab | [
"MIT"
] | null | null | null | import openmc
import openmc_dagmc_wrapper as odw
import openmc_plasma_source as ops
import neutronics_material_maker as nmm
import math
import numpy as np
my_h5m_filename = "dagmc_not_merged.h5m"
# materials
material_tag_to_material_dict = {
"lead": "Lead",
"flibe": nmm.Material.from_library(name="FLiBe", enrichment=90, temperature=650+273.15, pressure=1e5, temperature_to_neutronics_code=False),
"inner_tank_wall": "SS_316L_N_IG",
"outer_tank_wall": "SS_316L_N_IG",
}
materials = odw.Materials(
h5m_filename=my_h5m_filename,
correspondence_dict=material_tag_to_material_dict,
)
# Geometry
geometry = odw.Geometry(
h5m_filename=my_h5m_filename,
reflective_angles=[0, math.pi/2]
)
bounding_box = geometry.corners()
# Tallies
t_prod = odw.MeshTally2D(tally_type="(n,Xt)", plane="xz", bounding_box=bounding_box)
t_prod.name = "(n,Xt)_regular"
cylindrical_mesh = openmc.CylindricalMesh()
cylindrical_mesh.r_grid = np.linspace(bounding_box[0][0], bounding_box[1][0], num=400)
cylindrical_mesh.phi_grid = [0, math.pi/2]
cylindrical_mesh.z_grid = np.linspace(bounding_box[0][2], bounding_box[1][2], num=400)
t_prod_cyl = openmc.Tally(name="(n,Xt)_cylindrical")
t_prod_cyl.scores = ["(n,Xt)"]
t_prod_cyl.filters.append(openmc.MeshFilter(cylindrical_mesh))
heating_cyl = openmc.Tally(name="heating_cylindrical")
heating_cyl.scores = ["heating"]
heating_cyl.filters.append(openmc.MeshFilter(cylindrical_mesh))
heating = odw.MeshTally2D(tally_type="heating", plane="yz", bounding_box=bounding_box)
tbr = odw.CellTally(tally_type="TBR")
tallies = openmc.Tallies([t_prod, t_prod_cyl, heating, tbr, heating_cyl])
# settings
settings = odw.FusionSettings()
settings.batches = 4
settings.particles = 1000
settings.source = ops.FusionPointSource(fuel="DT", coordinate=(0.1, 0.1, 66))
my_model = openmc.Model(
materials=materials, geometry=geometry, settings=settings, tallies=tallies
)
# Run
statepoint_file = my_model.run()
print(f'neutronics results are saved in {statepoint_file}')
| 31.208955 | 145 | 0.74462 | import openmc
import openmc_dagmc_wrapper as odw
import openmc_plasma_source as ops
import neutronics_material_maker as nmm
import math
import numpy as np
my_h5m_filename = "dagmc_not_merged.h5m"
# materials
material_tag_to_material_dict = {
"lead": "Lead",
"flibe": nmm.Material.from_library(name="FLiBe", enrichment=90, temperature=650+273.15, pressure=1e5, temperature_to_neutronics_code=False),
"inner_tank_wall": "SS_316L_N_IG",
"outer_tank_wall": "SS_316L_N_IG",
}
materials = odw.Materials(
h5m_filename=my_h5m_filename,
correspondence_dict=material_tag_to_material_dict,
)
# Geometry
geometry = odw.Geometry(
h5m_filename=my_h5m_filename,
reflective_angles=[0, math.pi/2]
)
bounding_box = geometry.corners()
# Tallies
t_prod = odw.MeshTally2D(tally_type="(n,Xt)", plane="xz", bounding_box=bounding_box)
t_prod.name = "(n,Xt)_regular"
cylindrical_mesh = openmc.CylindricalMesh()
cylindrical_mesh.r_grid = np.linspace(bounding_box[0][0], bounding_box[1][0], num=400)
cylindrical_mesh.phi_grid = [0, math.pi/2]
cylindrical_mesh.z_grid = np.linspace(bounding_box[0][2], bounding_box[1][2], num=400)
t_prod_cyl = openmc.Tally(name="(n,Xt)_cylindrical")
t_prod_cyl.scores = ["(n,Xt)"]
t_prod_cyl.filters.append(openmc.MeshFilter(cylindrical_mesh))
heating_cyl = openmc.Tally(name="heating_cylindrical")
heating_cyl.scores = ["heating"]
heating_cyl.filters.append(openmc.MeshFilter(cylindrical_mesh))
heating = odw.MeshTally2D(tally_type="heating", plane="yz", bounding_box=bounding_box)
tbr = odw.CellTally(tally_type="TBR")
tallies = openmc.Tallies([t_prod, t_prod_cyl, heating, tbr, heating_cyl])
# settings
settings = odw.FusionSettings()
settings.batches = 4
settings.particles = 1000
settings.source = ops.FusionPointSource(fuel="DT", coordinate=(0.1, 0.1, 66))
my_model = openmc.Model(
materials=materials, geometry=geometry, settings=settings, tallies=tallies
)
# Run
statepoint_file = my_model.run()
print(f'neutronics results are saved in {statepoint_file}')
| 0 | 0 | 0 |
68a91ef9f02cff56942c124b854af3a22afde949 | 6,092 | py | Python | empty_my_fridge/empty_my_fridge/model/recipes.py | edwarddubi/empty_my_fridge_django | 213c8be8de525df1c952f200fd0be5a3e37fdfe5 | [
"MIT"
] | null | null | null | empty_my_fridge/empty_my_fridge/model/recipes.py | edwarddubi/empty_my_fridge_django | 213c8be8de525df1c952f200fd0be5a3e37fdfe5 | [
"MIT"
] | 1 | 2020-07-31T06:09:12.000Z | 2020-07-31T06:09:12.000Z | empty_my_fridge/empty_my_fridge/model/recipes.py | edwarddubi/empty_my_fridge_django | 213c8be8de525df1c952f200fd0be5a3e37fdfe5 | [
"MIT"
] | null | null | null |
# get all recipes
# get individual recipe as Json | 29.429952 | 99 | 0.574852 |
class Recipes:
def __init__(self):
self.recipe_list = None
self.pos = 0
self.liked = False
self.searched = False
self.recipe_name_to_find = None
self.recipes_current_page = "1"
self.db = None
self.m_user = None
self.food_network = None
self.visited_pages = ""
self.scraped = False
self.filter_list = None
self.sorting_type = "name_A"
self.fridge_recipes = None
self.is_fridge = False
self.fridge_sorting_type = "name_A"
self.isExact = False
def __init__(self, db, m_user, food_network):
self.recipe_list = None
self.pos = 0
self.liked = False
self.searched = False
self.recipe_name_to_find = None
self.recipes_current_page = "1"
self.db = db
self.m_user = m_user
self.food_network = food_network
self.visited_pages = ""
self.scraped = False
self.sorting_type = "name_A"
self.filter_list = None
self.fridge_recipes = None
self.is_fridge = False
self.fridge_sorting_type = "name_A"
self.isExact = False
def get_fridge_recipes(self):
return self.fridge_recipes
def set_fridge_recipes(self, fridge_recipes):
self.fridge_recipes = fridge_recipes
def get_is_fridge(self):
return self.is_fridge
def set_is_fridge(self, is_fridge):
self.is_fridge = is_fridge
def get_sorting_type(self):
return self.sorting_type
def set_sorting_type(self, type):
self.sorting_type = type
def get_fridge_sorting_type(self):
return self.fridge_sorting_type
def set_fridge_sorting_type(self, type):
self.fridge_sorting_type = type
def set_scraped(self, scraped):
self.scraped = scraped
def get_scraped(self):
return self.scraped
def set_visited_pages(self, page):
self.visited_pages += "," + page
def get_visited_pages(self, page):
return self.visited_pages.find(page)
def get_all_recipes(self):
return self.recipe_list
def set_all_recipes(self, recipe_list):
self.recipe_list = recipe_list
def set_recipe_list_position(self, pos):
self.pos = pos
def get_recipe_list_position(self):
return self.pos
def set_is_recipe_liked(self, liked):
self.liked = liked
def get_is_recipe_liked(self):
return self.liked
def set_is_searched_for_recipes(self, searched):
self.searched = searched
def get_is_searched_for_recipes(self):
return self.searched
def set_recipe_name_to_find(self, word):
self.recipe_name_to_find = word
def get_recipe_name_to_find(self):
return self.recipe_name_to_find
def set_recipes_current_page(self, page):
self.recipes_current_page = page
def get_recipes_current_page(self):
return self.recipes_current_page
def get_isExact(self):
return self.isExact
def set_isExact(self, isComplete):
self.isExact = isComplete
def set_recipe_liked(self, key):
for recipe in self.recipe_list:
if recipe["recipe_id"] == key:
recipe["user_saved"] = True
recipe["likes"] = recipe["likes"] + 1
break
def set_recipe_unLiked(self, key):
for recipe in self.recipe_list:
if recipe["recipe_id"] == key:
recipe["user_saved"] = False
recipe["likes"] = recipe["likes"] - 1
break
def get_all_likes(self, uid, page):
page_num = int(page)
start = (page_num - 1) * 48
favorite = False
while start < len(self.recipe_list):
recipe = self.recipe_list[start]
recipe["no_user_signed_in"] = True
if start == len(self.recipe_list) or start == page_num * 48:
break
try:
key = recipe["recipe_id"]
if uid:
recipe["no_user_signed_in"] = False
favorite = recipe["stars"][uid] != None
recipe["user_saved"] = favorite
except KeyError:
pass
start+=1
# get all recipes
def _get_all_recipes_(self):
#self.db.child('all_ingredients').remove()
#self.food_network.food_network(self.db)
all_recipes = self.db.child("recipe").get()
recipe_list = []
if all_recipes.each() != None:
for recipe in all_recipes.each():
key = str(recipe.key())
try:
name = recipe.val()["recipe_name"]
_recipe_ = self.get_recipe(dict(recipe.val()), key, self.m_user._getUser_Id_())
recipe_list.append(_recipe_)
except KeyError:
self.db.child("recipe").child(key).remove()
continue
self.recipe_list = recipe_list
# get individual recipe as Json
def get_recipe(self, recipe, key, uid):
num_of_stars = 0
favorite = False
recipe["recipe_id"] = key
recipe["no_user_signed_in"] = True
try:
if uid:
recipe["no_user_signed_in"] = False
favorite = recipe["stars"][uid] != None
recipe["user_saved"] = favorite
except KeyError:
recipe["user_saved"] = False
pass
try:
num_of_stars = len(recipe["stars"].items())
recipe["likes"] = num_of_stars
except KeyError:
recipe["likes"] = 0
pass
return recipe
def set_filter_list(self, filters):
if not filters:
self.filter_list = None
elif len(filters) == 0:
self.filter_list = None
else:
self.filter_list = filters
def get_filter_list(self):
return self.filter_list | 5,006 | -7 | 1,020 |
66c9af06eca2765f9b6c080889efdb3d978ecc05 | 752 | py | Python | axes/management/commands/axes_reset_logs.py | adamchainz/django-axes | bb33a0c5da91bd1424360ff0d391fbff7087f323 | [
"MIT"
] | null | null | null | axes/management/commands/axes_reset_logs.py | adamchainz/django-axes | bb33a0c5da91bd1424360ff0d391fbff7087f323 | [
"MIT"
] | null | null | null | axes/management/commands/axes_reset_logs.py | adamchainz/django-axes | bb33a0c5da91bd1424360ff0d391fbff7087f323 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand
from django.utils import timezone
from axes.models import AccessLog
| 28.923077 | 79 | 0.62234 | from django.core.management.base import BaseCommand
from django.utils import timezone
from axes.models import AccessLog
class Command(BaseCommand):
help = 'Reset access log records older than given days.'
def add_arguments(self, parser):
parser.add_argument(
'--age',
type=int,
default=30,
help='Maximum age for records to keep in days',
)
def handle(self, *args, **options):
limit = timezone.now().date() - timezone.timedelta(days=options['age'])
count, _ = AccessLog.objects.filter(attempt_time__lte=limit).delete()
if count:
self.stdout.write(f'{count} logs removed.')
else:
self.stdout.write('No logs found.')
| 486 | 121 | 23 |
2afd5a24146c0c51425b992586dbcad2ddda8c4d | 1,538 | py | Python | predict_pennfudan_with_loss.py | lilinxi/210429_YoloV3_Experiment | 60b2c8f348f7e101353ae0440f6c948f6bd03c04 | [
"MIT"
] | null | null | null | predict_pennfudan_with_loss.py | lilinxi/210429_YoloV3_Experiment | 60b2c8f348f7e101353ae0440f6c948f6bd03c04 | [
"MIT"
] | null | null | null | predict_pennfudan_with_loss.py | lilinxi/210429_YoloV3_Experiment | 60b2c8f348f7e101353ae0440f6c948f6bd03c04 | [
"MIT"
] | null | null | null | import conf.config
import dataset.pennfudan_dataset
import model.yolov3
"""
(1)demo weights:
67.91% = person AP || score_threhold=0.5 : F1=0.80 ; Recall=99.09% ; Precision=66.87%
mAP = 67.91%
---------------------------------------
tensor(2.9181) tensor(2.8104)
tensor(0.1682) tensor(0.1877)
tensor(20.8298) tensor(0.0001)
---------------------------------------
loss: tensor(13.4571)
(2)trained weights(Pennfudan_Test1_Epoch88-Train_Loss5.0447-Val_Loss2.9787.pth):
---------------------------------------
tensor(0.0108) tensor(0.0102)
tensor(0.0013) tensor(0.0012)
tensor(0.0731) tensor(0.)
---------------------------------------
loss: tensor(0.0483)
"""
if __name__ == "__main__":
# 1. 配置文件
Config = conf.config.PennFudanConfig
# 2. 验证集
BATCH_SIZE = 8
pennfudan_dataloader = dataset.pennfudan_dataset.PennFudanDataset.TrainDataloader(
config=Config,
batch_size=BATCH_SIZE
)
# 3. 初始化模型
yolov3 = model.yolov3.YoloV3(Config)
# 4. 遍历数据集
EPOCH = 1
for epoch in range(EPOCH):
print("Epoch:", epoch)
for batch_index, (tensord_images, tensord_boxes_list) in enumerate(pennfudan_dataloader):
print("batch_index:", batch_index)
for step in range(BATCH_SIZE):
print("step:", step)
# 4. 预测结果并记录
image = yolov3.predict_with_loss(
tensord_images[step],
tensord_boxes_list[step],
)
image.show()
exit(-1)
| 26.067797 | 97 | 0.556567 | import conf.config
import dataset.pennfudan_dataset
import model.yolov3
"""
(1)demo weights:
67.91% = person AP || score_threhold=0.5 : F1=0.80 ; Recall=99.09% ; Precision=66.87%
mAP = 67.91%
---------------------------------------
tensor(2.9181) tensor(2.8104)
tensor(0.1682) tensor(0.1877)
tensor(20.8298) tensor(0.0001)
---------------------------------------
loss: tensor(13.4571)
(2)trained weights(Pennfudan_Test1_Epoch88-Train_Loss5.0447-Val_Loss2.9787.pth):
---------------------------------------
tensor(0.0108) tensor(0.0102)
tensor(0.0013) tensor(0.0012)
tensor(0.0731) tensor(0.)
---------------------------------------
loss: tensor(0.0483)
"""
if __name__ == "__main__":
# 1. 配置文件
Config = conf.config.PennFudanConfig
# 2. 验证集
BATCH_SIZE = 8
pennfudan_dataloader = dataset.pennfudan_dataset.PennFudanDataset.TrainDataloader(
config=Config,
batch_size=BATCH_SIZE
)
# 3. 初始化模型
yolov3 = model.yolov3.YoloV3(Config)
# 4. 遍历数据集
EPOCH = 1
for epoch in range(EPOCH):
print("Epoch:", epoch)
for batch_index, (tensord_images, tensord_boxes_list) in enumerate(pennfudan_dataloader):
print("batch_index:", batch_index)
for step in range(BATCH_SIZE):
print("step:", step)
# 4. 预测结果并记录
image = yolov3.predict_with_loss(
tensord_images[step],
tensord_boxes_list[step],
)
image.show()
exit(-1)
| 0 | 0 | 0 |
42eaf6217f866f78656c77414228daa7e8a7c960 | 11,297 | py | Python | ephypype/nodes/import_data.py | EtienneCmb/ephypype | fdc13efb79545f072585d1e180d03702efd9e326 | [
"BSD-3-Clause"
] | null | null | null | ephypype/nodes/import_data.py | EtienneCmb/ephypype | fdc13efb79545f072585d1e180d03702efd9e326 | [
"BSD-3-Clause"
] | 1 | 2018-09-03T20:08:46.000Z | 2018-09-03T21:00:55.000Z | ephypype/nodes/import_data.py | EtienneCmb/ephypype | fdc13efb79545f072585d1e180d03702efd9e326 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""All nodes for import that are NOT specific to a ephy package."""
import os
from nipype.interfaces.base import BaseInterface,\
BaseInterfaceInputSpec, traits, TraitedSpec, isdefined
from nipype.interfaces.base import File
# ----------------- ImportMat ----------------------------- #
class ImportMatInputSpec(BaseInterfaceInputSpec):
"""Input specification for ImportMat."""
tsmat_file = traits.File(exists=True,
desc='time series in .mat (matlab format)',
mandatory=True)
data_field_name = traits.String('F', desc='Name of structure in matlab',
usedefault=True)
good_channels_field_name = traits.String('ChannelFlag',
desc='Boolean structure for\
choosing nodes, name of\
structure in matlab file')
class ImportMatOutputSpec(TraitedSpec):
"""Output spec for Import Mat."""
ts_file = traits.File(exists=True, desc="time series in .npy format")
class ImportMat(BaseInterface):
"""Import matlab file to numpy ndarry, and save it as numpy file .npy.
Parameters
----------
tsmat_file:
type = File, exists=True, desc='nodes * time series
in .mat (matlab format format', mandatory=True
data_field_name
type = String, default = 'F', desc='Name of the structure in matlab',
usedefault=True
good_channels_field_name
type = String, default = 'ChannelFlag',
desc='Boolean structure for choosing nodes,
name of structure in matlab file'
Returns
-------
ts_file
type = File, exists=True, desc="time series in .npy format"
"""
input_spec = ImportMatInputSpec
output_spec = ImportMatOutputSpec
# ------------------- ImportBrainVisionAscii -------------------
class ImportBrainVisionAsciiInputSpec(BaseInterfaceInputSpec):
"""Import brainvision ascii input spec."""
txt_file = File(exists=True,
desc='Ascii text file exported from BrainVision',
mandatory=True)
sample_size = traits.Float(desc='Size (nb of time points) of all samples',
mandatory=True)
sep_label_name = traits.String("",
desc='Separator between electrode name \
(normally a capital letter) and \
contact numbers',
usedefault=True)
repair = traits.Bool(True,
desc='Repair file if behaves strangely (adding \
space sometimes...)',
usedefault=True)
sep = traits.Str(
";", desc="Separator between time points", usedefault=True)
keep_electrodes = traits.String("",
desc='keep_electrodes',
usedefault=True)
class ImportBrainVisionAsciiOutputSpec(TraitedSpec):
"""Output specification for ImportBrainVisionAscii."""
splitted_ts_file = traits.File(
exists=True, desc='splitted time series in .npy format')
elec_names_file = traits.File(
exists=True, desc='electrode names in txt format')
class ImportBrainVisionAscii(BaseInterface):
"""Import IntraEEG Brain Vision (unsplitted) ascii time series txt file.
The splitted time series in .npy format, as well as electrode names in txt
format
Parameters
----------
txt_file
type = File, exists=True, desc='Ascii text file exported from
BrainVision', mandatory=True
sample_size
type = Int, desc = "Size (number of time points) of all samples",
mandatory = True
sep_label_name
type = String, default = "", desc='Separator between electrode name
(normally a capital letter) and contact numbers', usedefault=True
repair
type = Bool, default = True, desc='Repair file if behaves strangely
(adding space sometimes...)', usedefault = True
sep
type = String, default = ";","Separator between time points",
usedefault = True)
Returns
-------
splitted_ts_file
type = File, exists=True, desc="splitted time series in .npy format"
elec_names_file
type = File, exists=True, desc="electrode names in txt format"
"""
input_spec = ImportBrainVisionAsciiInputSpec
output_spec = ImportBrainVisionAsciiOutputSpec
# ------------------- ImportBrainVisionVhdr -------------------
class ImportBrainVisionVhdrInputSpec(BaseInterfaceInputSpec):
"""Import brainvision vhdr inut spec."""
vhdr_file = File(exists=True,
desc='Vhdr file exported from BrainVision',
mandatory=True)
sample_size = traits.Float(desc='Size (number of time points) of all \
samples', mandatory=True)
keep_electrodes = traits.String("",
desc='keep_electrodes',
usedefault=True)
class ImportBrainVisionVhdrOutputSpec(TraitedSpec):
"""Output specification for ImportBrainVisionVhdr."""
splitted_ts_file = traits.File(
exists=True, desc='splitted time series in .npy format')
elec_names_file = traits.File(
exists=True, desc='electrode names in txt format')
class ImportBrainVisionVhdr(BaseInterface):
"""Import IntraEEG Brain Vision (unsplitted) vhdr time series txt file.
Then splitted time series in .npy format, as well as electrode names in txt
format
Parameters
----------
vhdr_file
type = File, exists=True, desc='Ascii text file exported from
BrainVision', mandatory=True
sample_size
type = Int, desc = "Size (number of time points) of all samples",
mandatory = True
Returns
-------
splitted_ts_file
type = File, exists=True, desc="splitted time series in .npy format"
elec_names_file
type = File, exists=True, desc="electrode names in txt format"
"""
input_spec = ImportBrainVisionVhdrInputSpec
output_spec = ImportBrainVisionVhdrOutputSpec
# ------------------- Ep2ts -------------------
class Ep2tsInputSpec(BaseInterfaceInputSpec):
"""Input specification for Ep2ts."""
fif_file = File(exists=True, desc='fif file with epochs', mandatory=True)
class Ep2tsOutputSpec(TraitedSpec):
"""Output specification for Ep2ts."""
ts_file = traits.File(exists=True, desc="time series in .npy format")
class Ep2ts(BaseInterface):
"""Convert electa fif raw or epochs file to numpy matrix format."""
input_spec = Ep2tsInputSpec
output_spec = Ep2tsOutputSpec
class ConvertDs2FifInputSpec(BaseInterfaceInputSpec):
"""Input specification for ImportMat."""
ds_file = traits.Directory(exists=True,
desc='raw .ds file',
mandatory=True)
class ConvertDs2FifOutputSpec(TraitedSpec):
"""Output spec for Import Mat."""
fif_file = traits.File(exists=True, desc='raw .fif file')
class ConvertDs2Fif(BaseInterface):
""".ds to fif conversion."""
input_spec = ConvertDs2FifInputSpec
output_spec = ConvertDs2FifOutputSpec
| 28.966667 | 79 | 0.619368 | # -*- coding: utf-8 -*-
"""All nodes for import that are NOT specific to a ephy package."""
import os
from nipype.interfaces.base import BaseInterface,\
BaseInterfaceInputSpec, traits, TraitedSpec, isdefined
from nipype.interfaces.base import File
# ----------------- ImportMat ----------------------------- #
class ImportMatInputSpec(BaseInterfaceInputSpec):
"""Input specification for ImportMat."""
tsmat_file = traits.File(exists=True,
desc='time series in .mat (matlab format)',
mandatory=True)
data_field_name = traits.String('F', desc='Name of structure in matlab',
usedefault=True)
good_channels_field_name = traits.String('ChannelFlag',
desc='Boolean structure for\
choosing nodes, name of\
structure in matlab file')
class ImportMatOutputSpec(TraitedSpec):
"""Output spec for Import Mat."""
ts_file = traits.File(exists=True, desc="time series in .npy format")
class ImportMat(BaseInterface):
"""Import matlab file to numpy ndarry, and save it as numpy file .npy.
Parameters
----------
tsmat_file:
type = File, exists=True, desc='nodes * time series
in .mat (matlab format format', mandatory=True
data_field_name
type = String, default = 'F', desc='Name of the structure in matlab',
usedefault=True
good_channels_field_name
type = String, default = 'ChannelFlag',
desc='Boolean structure for choosing nodes,
name of structure in matlab file'
Returns
-------
ts_file
type = File, exists=True, desc="time series in .npy format"
"""
input_spec = ImportMatInputSpec
output_spec = ImportMatOutputSpec
def _run_interface(self, runtime):
from ephypype.import_mat import import_tsmat_to_ts
tsmat_file = self.inputs.tsmat_file
data_field_name = self.inputs.data_field_name
good_channels_field_name = self.inputs.good_channels_field_name
if not isdefined(good_channels_field_name):
good_channels_field_name = None
self.ts_file = import_tsmat_to_ts(
tsmat_file, data_field_name, good_channels_field_name)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['ts_file'] = self.ts_file
return outputs
# ------------------- ImportBrainVisionAscii -------------------
class ImportBrainVisionAsciiInputSpec(BaseInterfaceInputSpec):
"""Import brainvision ascii input spec."""
txt_file = File(exists=True,
desc='Ascii text file exported from BrainVision',
mandatory=True)
sample_size = traits.Float(desc='Size (nb of time points) of all samples',
mandatory=True)
sep_label_name = traits.String("",
desc='Separator between electrode name \
(normally a capital letter) and \
contact numbers',
usedefault=True)
repair = traits.Bool(True,
desc='Repair file if behaves strangely (adding \
space sometimes...)',
usedefault=True)
sep = traits.Str(
";", desc="Separator between time points", usedefault=True)
keep_electrodes = traits.String("",
desc='keep_electrodes',
usedefault=True)
class ImportBrainVisionAsciiOutputSpec(TraitedSpec):
"""Output specification for ImportBrainVisionAscii."""
splitted_ts_file = traits.File(
exists=True, desc='splitted time series in .npy format')
elec_names_file = traits.File(
exists=True, desc='electrode names in txt format')
class ImportBrainVisionAscii(BaseInterface):
"""Import IntraEEG Brain Vision (unsplitted) ascii time series txt file.
The splitted time series in .npy format, as well as electrode names in txt
format
Parameters
----------
txt_file
type = File, exists=True, desc='Ascii text file exported from
BrainVision', mandatory=True
sample_size
type = Int, desc = "Size (number of time points) of all samples",
mandatory = True
sep_label_name
type = String, default = "", desc='Separator between electrode name
(normally a capital letter) and contact numbers', usedefault=True
repair
type = Bool, default = True, desc='Repair file if behaves strangely
(adding space sometimes...)', usedefault = True
sep
type = String, default = ";","Separator between time points",
usedefault = True)
Returns
-------
splitted_ts_file
type = File, exists=True, desc="splitted time series in .npy format"
elec_names_file
type = File, exists=True, desc="electrode names in txt format"
"""
input_spec = ImportBrainVisionAsciiInputSpec
output_spec = ImportBrainVisionAsciiOutputSpec
def _run_interface(self, runtime):
from ephypype.import_txt import split_txt
txt_file = self.inputs.txt_file
sample_size = self.inputs.sample_size
sep_label_name = self.inputs.sep_label_name
repair = self.inputs.repair
sep = self.inputs.sep
keep_electrodes = self.inputs.keep_electrodes
print(keep_electrodes)
split_txt(txt_file=txt_file, sample_size=sample_size,
sep_label_name=sep_label_name, repair=repair, sep=sep,
keep_electrodes=keep_electrodes)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['elec_names_file'] = os.path.abspath(
'correct_channel_names.txt')
outputs['splitted_ts_file'] = os.path.abspath('splitted_ts.npy')
return outputs
# ------------------- ImportBrainVisionVhdr -------------------
class ImportBrainVisionVhdrInputSpec(BaseInterfaceInputSpec):
"""Import brainvision vhdr inut spec."""
vhdr_file = File(exists=True,
desc='Vhdr file exported from BrainVision',
mandatory=True)
sample_size = traits.Float(desc='Size (number of time points) of all \
samples', mandatory=True)
keep_electrodes = traits.String("",
desc='keep_electrodes',
usedefault=True)
class ImportBrainVisionVhdrOutputSpec(TraitedSpec):
"""Output specification for ImportBrainVisionVhdr."""
splitted_ts_file = traits.File(
exists=True, desc='splitted time series in .npy format')
elec_names_file = traits.File(
exists=True, desc='electrode names in txt format')
class ImportBrainVisionVhdr(BaseInterface):
"""Import IntraEEG Brain Vision (unsplitted) vhdr time series txt file.
Then splitted time series in .npy format, as well as electrode names in txt
format
Parameters
----------
vhdr_file
type = File, exists=True, desc='Ascii text file exported from
BrainVision', mandatory=True
sample_size
type = Int, desc = "Size (number of time points) of all samples",
mandatory = True
Returns
-------
splitted_ts_file
type = File, exists=True, desc="splitted time series in .npy format"
elec_names_file
type = File, exists=True, desc="electrode names in txt format"
"""
input_spec = ImportBrainVisionVhdrInputSpec
output_spec = ImportBrainVisionVhdrOutputSpec
def _run_interface(self, runtime):
from ephypype.import_txt import read_brainvision_vhdr
import numpy as np
vhdr_file = self.inputs.vhdr_file
sample_size = self.inputs.sample_size
keep_electrodes = self.inputs.keep_electrodes
np_splitted_ts, ch_names = read_brainvision_vhdr(
vhdr_file=vhdr_file, sample_size=sample_size)
np_ch_names = np.array(ch_names, dtype='str')
if keep_electrodes != "":
print(keep_electrodes)
list_keep_electrodes = keep_electrodes.split("-")
print(list_keep_electrodes)
lst = [ch_name in list_keep_electrodes for ch_name in ch_names]
keep = np.array(lst, dtype='int')
print(keep)
keep_ch_names = np_ch_names[keep == 1]
print(keep_ch_names)
keep_np_splitted_ts = np_splitted_ts[:, keep == 1, :]
print(keep_np_splitted_ts.shape)
np_splitted_ts = keep_np_splitted_ts
np_ch_names = keep_ch_names
# saving
ch_names_file = os.path.abspath("correct_channel_names.txt")
np.savetxt(ch_names_file, np_ch_names, fmt="%s")
splitted_ts_file = os.path.abspath("splitted_ts.npy")
np.save(splitted_ts_file, np_splitted_ts)
# return splitted_ts_file,ch_names_file
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['elec_names_file'] = os.path.abspath(
'correct_channel_names.txt')
outputs['splitted_ts_file'] = os.path.abspath('splitted_ts.npy')
return outputs
# ------------------- Ep2ts -------------------
class Ep2tsInputSpec(BaseInterfaceInputSpec):
"""Input specification for Ep2ts."""
fif_file = File(exists=True, desc='fif file with epochs', mandatory=True)
class Ep2tsOutputSpec(TraitedSpec):
"""Output specification for Ep2ts."""
ts_file = traits.File(exists=True, desc="time series in .npy format")
class Ep2ts(BaseInterface):
"""Convert electa fif raw or epochs file to numpy matrix format."""
input_spec = Ep2tsInputSpec
output_spec = Ep2tsOutputSpec
def _run_interface(self, runtime):
from ephypype.fif2ts import ep2ts
fif_file = self.inputs.fif_file
self.ts_file = ep2ts(fif_file=fif_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['ts_file'] = self.ts_file
return outputs
class ConvertDs2FifInputSpec(BaseInterfaceInputSpec):
"""Input specification for ImportMat."""
ds_file = traits.Directory(exists=True,
desc='raw .ds file',
mandatory=True)
class ConvertDs2FifOutputSpec(TraitedSpec):
"""Output spec for Import Mat."""
fif_file = traits.File(exists=True, desc='raw .fif file')
class ConvertDs2Fif(BaseInterface):
""".ds to fif conversion."""
input_spec = ConvertDs2FifInputSpec
output_spec = ConvertDs2FifOutputSpec
def _run_interface(self, runtime):
from ephypype.import_ctf import convert_ds_to_raw_fif
ds_file = self.inputs.ds_file
self.fif_file = convert_ds_to_raw_fif(ds_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["fif_file"] = self.fif_file
return outputs
| 3,586 | 0 | 270 |
7ffd18602922ec689066ec7889bc59c75a5ca252 | 3,799 | py | Python | experiments/torch_topics_deepfool.py | nibydlo/modAL | c0fe0200001c8c34e3fabb099fb70cf1e4bfb680 | [
"MIT"
] | 2 | 2020-01-22T14:34:01.000Z | 2020-01-22T14:51:18.000Z | experiments/torch_topics_deepfool.py | nibydlo/modAL | c0fe0200001c8c34e3fabb099fb70cf1e4bfb680 | [
"MIT"
] | null | null | null | experiments/torch_topics_deepfool.py | nibydlo/modAL | c0fe0200001c8c34e3fabb099fb70cf1e4bfb680 | [
"MIT"
] | null | null | null | from copy import deepcopy
from functools import partial, update_wrapper
import torch
import numpy as np
import torch.optim as optim
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from experiments.models.topics_torch_models import NormModel, NormModelTrident
from experiments.datasets.topics_ds import get_unpacked_data
from experiments.al_experiment import Experiment
from experiments.models.torch_topics_decorator import TopicsDecorator, TridentDecorator
from modAL import KerasActiveLearner
from modAL.deepfool import deepfool_sampling
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(1)
print('cuda device count:', torch.cuda.device_count())
x_img, x_txt, y = get_unpacked_data()
# print('data loaded')
x_img_train, x_img_test, x_txt_train, x_txt_test, y_train, y_test = train_test_split(
x_img,
x_txt,
y,
test_size=0.2,
random_state=42,
stratify=y
)
x_img_train, x_img_val, x_txt_train, x_txt_val, y_train, y_val = train_test_split(
x_img_train,
x_txt_train,
y_train,
test_size=0.2,
random_state=42,
stratify=y_train
)
# print('data splited')
img_sscaler = StandardScaler()
img_sscaler.fit(x_img_train)
x_img_train = img_sscaler.transform(x_img_train)
x_img_val = img_sscaler.transform(x_img_val)
x_img_test = img_sscaler.transform(x_img_test)
txt_sscaler = StandardScaler()
txt_sscaler.fit(x_txt_train)
x_txt_train = txt_sscaler.transform(x_txt_train)
x_txt_val = txt_sscaler.transform(x_txt_val)
x_txt_test = txt_sscaler.transform(x_txt_test)
# print('data scaled')
n_labeled_examples = x_img_train.shape[0]
POOL_SIZE = 100000
INIT_SIZE = 2000
BATCH_SIZE = 20
N_QUERIES = 100
INIT_EPOCHS = 45
preset_deepfool = update_wrapper(partial(deepfool_sampling, n_instances=BATCH_SIZE, with_dropout=False), deepfool_sampling)
query_dict = {
'deepfool_cuda': preset_deepfool
}
for i in range(1, 6):
print('i=', i)
np.random.seed(i)
training_indices = np.random.randint(low=0, high=n_labeled_examples, size=INIT_SIZE)
x_init_train = [x_img_train[training_indices], x_txt_train[training_indices]]
y_init_train = y_train[training_indices]
general_model = NormModel(drop=0.5, d=128)
general_optimizer = optim.Adam(general_model.parameters(), lr=1e-3, weight_decay=0.0005)
general_decorated_model = TopicsDecorator(general_model, general_optimizer)
general_decorated_model.fit(
X=x_init_train,
y=y_init_train,
epochs=INIT_EPOCHS,
validation_data=([x_img_val, x_txt_val], y_val)
)
x_pool = [np.delete(x_img_train, training_indices, axis=0), np.delete(x_txt_train, training_indices, axis=0)]
y_pool = np.delete(y_train, training_indices, axis=0)
for query_name in query_dict:
print('query name =', query_name)
decorated_model = deepcopy(general_decorated_model)
# now here is KerasActiveLearner because maybe it is suitable also for decorated pytorch models
learner = KerasActiveLearner(
estimator=decorated_model,
X_training=x_init_train,
y_training=y_init_train,
query_strategy=query_dict[query_name],
epochs=0
)
experiment = Experiment(
learner=learner,
X_pool=x_pool.copy(),
y_pool=y_pool.copy(),
X_val=[x_img_val, x_txt_val],
y_val=y_val,
n_queries=N_QUERIES,
random_seed=i,
pool_size=POOL_SIZE,
name='torch_topics_d128_' + query_name + '_i2000_b20_q100_sf512_' + str(i),
bootstrap=False,
epochs=1
)
experiment.run()
experiment.save_state('statistic/topics/torch/d128/' + query_name + '_i2000_b20_q100_sf512_' + str(i))
| 29.679688 | 123 | 0.726244 | from copy import deepcopy
from functools import partial, update_wrapper
import torch
import numpy as np
import torch.optim as optim
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from experiments.models.topics_torch_models import NormModel, NormModelTrident
from experiments.datasets.topics_ds import get_unpacked_data
from experiments.al_experiment import Experiment
from experiments.models.torch_topics_decorator import TopicsDecorator, TridentDecorator
from modAL import KerasActiveLearner
from modAL.deepfool import deepfool_sampling
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(1)
print('cuda device count:', torch.cuda.device_count())
x_img, x_txt, y = get_unpacked_data()
# print('data loaded')
x_img_train, x_img_test, x_txt_train, x_txt_test, y_train, y_test = train_test_split(
x_img,
x_txt,
y,
test_size=0.2,
random_state=42,
stratify=y
)
x_img_train, x_img_val, x_txt_train, x_txt_val, y_train, y_val = train_test_split(
x_img_train,
x_txt_train,
y_train,
test_size=0.2,
random_state=42,
stratify=y_train
)
# print('data splited')
img_sscaler = StandardScaler()
img_sscaler.fit(x_img_train)
x_img_train = img_sscaler.transform(x_img_train)
x_img_val = img_sscaler.transform(x_img_val)
x_img_test = img_sscaler.transform(x_img_test)
txt_sscaler = StandardScaler()
txt_sscaler.fit(x_txt_train)
x_txt_train = txt_sscaler.transform(x_txt_train)
x_txt_val = txt_sscaler.transform(x_txt_val)
x_txt_test = txt_sscaler.transform(x_txt_test)
# print('data scaled')
n_labeled_examples = x_img_train.shape[0]
POOL_SIZE = 100000
INIT_SIZE = 2000
BATCH_SIZE = 20
N_QUERIES = 100
INIT_EPOCHS = 45
preset_deepfool = update_wrapper(partial(deepfool_sampling, n_instances=BATCH_SIZE, with_dropout=False), deepfool_sampling)
query_dict = {
'deepfool_cuda': preset_deepfool
}
for i in range(1, 6):
print('i=', i)
np.random.seed(i)
training_indices = np.random.randint(low=0, high=n_labeled_examples, size=INIT_SIZE)
x_init_train = [x_img_train[training_indices], x_txt_train[training_indices]]
y_init_train = y_train[training_indices]
general_model = NormModel(drop=0.5, d=128)
general_optimizer = optim.Adam(general_model.parameters(), lr=1e-3, weight_decay=0.0005)
general_decorated_model = TopicsDecorator(general_model, general_optimizer)
general_decorated_model.fit(
X=x_init_train,
y=y_init_train,
epochs=INIT_EPOCHS,
validation_data=([x_img_val, x_txt_val], y_val)
)
x_pool = [np.delete(x_img_train, training_indices, axis=0), np.delete(x_txt_train, training_indices, axis=0)]
y_pool = np.delete(y_train, training_indices, axis=0)
for query_name in query_dict:
print('query name =', query_name)
decorated_model = deepcopy(general_decorated_model)
# now here is KerasActiveLearner because maybe it is suitable also for decorated pytorch models
learner = KerasActiveLearner(
estimator=decorated_model,
X_training=x_init_train,
y_training=y_init_train,
query_strategy=query_dict[query_name],
epochs=0
)
experiment = Experiment(
learner=learner,
X_pool=x_pool.copy(),
y_pool=y_pool.copy(),
X_val=[x_img_val, x_txt_val],
y_val=y_val,
n_queries=N_QUERIES,
random_seed=i,
pool_size=POOL_SIZE,
name='torch_topics_d128_' + query_name + '_i2000_b20_q100_sf512_' + str(i),
bootstrap=False,
epochs=1
)
experiment.run()
experiment.save_state('statistic/topics/torch/d128/' + query_name + '_i2000_b20_q100_sf512_' + str(i))
| 0 | 0 | 0 |
43befbf17ea0a9cbbc089006fafe91f4a9b1e2b7 | 8,873 | py | Python | json_logging/json_logging/__init__.py | harrystech/arthur-tools | b4d1772d3d7bba423efa74b7e5d697c40a095cd2 | [
"MIT"
] | 2 | 2020-01-23T18:05:15.000Z | 2020-05-26T14:53:08.000Z | json_logging/json_logging/__init__.py | harrystech/arthur-tools | b4d1772d3d7bba423efa74b7e5d697c40a095cd2 | [
"MIT"
] | 194 | 2020-01-13T22:26:34.000Z | 2022-03-29T12:02:58.000Z | json_logging/json_logging/__init__.py | harrystech/arthur-tools | b4d1772d3d7bba423efa74b7e5d697c40a095cd2 | [
"MIT"
] | null | null | null | """
Enable logging (from Lambda functions) in JSON format which makes post-processing much easier.
Since we assume this will be used by Lambda functions, we also add the request id in log lines.
"""
import json
import logging
import logging.config
import sys
import traceback
from contextlib import ContextDecorator
from datetime import datetime, timezone
from logging import NullHandler # noqa: F401
from typing import Any, Dict, Optional, Tuple, Union
class ContextFilter(logging.Filter):
"""
Logging Filter class that adds contextual information to log records.
We assume there will be only one instance of this filter for any runtime which
means that we will store some values with the class, not the instances.
"""
_context: Dict[str, Optional[str]] = {
"aws_request_id": None,
"correlation_id": None,
"function_name": None,
"function_version": None,
"invoked_function_arn": None,
"log_group_name": None,
"log_stream_name": None,
"request_id": None,
}
def filter(self, record: logging.LogRecord) -> bool:
"""Modify record in place for additional fields, then return True to continue processing."""
for field, value in self._context.items():
if value is not None:
setattr(record, field, value)
return True
@classmethod
def update_from_lambda_context(cls, context: Any) -> None:
"""Update fields stored in the global context filter based on attributes of the context."""
for field, value in cls._context.items():
cls._context[field] = getattr(context, field, value)
@classmethod
def update_context(cls, **kwargs: Optional[str]) -> None:
"""
Update any of the fields stored in the global context filter.
Note that trying to set a field that's not been defined raises a ValueError.
Setting a field to None removes it from the output.
"""
for field, value in kwargs.items():
if field in cls._context:
cls._context[field] = value
else:
raise ValueError(f"unexpected field: '{field}'")
class DefaultJsonFormat(json.JSONEncoder):
"""Default to using 'str()' except for dates which are ISO 8601."""
class JsonFormatter(logging.Formatter):
"""
Format the message to be easily reverted into an object by using JSON format.
Notes:
* The "format" is ignored since we convert based on available info.
* The timestamps are in UTC.
This format of "gmtime" is compatible with "strict_date_time" in Elasticsearch,
(as "yyyy-MM-dd'T'HH:mm:ss.SSSZZ") and other log collection tools.
"""
attribute_mapping = {
# LogRecord attributes for which we want new names:
"filename": "source.filename",
"funcName": "source.function",
"levelname": "log_level",
"levelno": "log_severity",
"lineno": "source.line_number",
"module": "source.module",
"name": "logger",
"pathname": "source.pathname",
"process": "process.id",
"processName": "process.name",
"threadName": "thread.name",
# Common context attributes which we want to rename:
"function_name": "lambda.function_name",
"function_version": "lambda.function_version",
"invoked_function_arn": "lambda.invoked_function_arn",
"log_group_name": "cwl.log_group_name",
"log_stream_name": "cwl.log_stream_name",
# LogRecord attributes which we want to suppress or rewrite ourselves:
"args": None,
"created": None,
"msecs": None,
"msg": None,
"relativeCreated": None,
"thread": None,
}
# Use "set_output_format()" to change this value.
output_format = "compact"
@property
@property
def format(self, record: logging.LogRecord) -> str:
"""Format log record by creating a JSON-format in a string."""
assembled = {}
for attr, value in record.__dict__.items():
if value is None:
continue
if attr in self.attribute_mapping:
new_name = self.attribute_mapping[attr]
if new_name is not None:
assembled[new_name] = value
continue
# This lets anything, I mean anything, from "extra={}" slip through.
assembled[attr] = value
# The "message" is added here so an accidentally specified message in the extra kwargs
# is ignored.
assembled["message"] = record.getMessage()
# We show elapsed milliseconds as int, not float.
assembled["elapsed_ms"] = int(record.relativeCreated)
# Finally, always add a timestamp as epoch msecs and in a human readable format.
# (Go to https://www.epochconverter.com/ to convert the timestamp in milliseconds.)
assembled["timestamp"] = int(record.created * 1000.0)
assembled["gmtime"] = datetime.fromtimestamp(record.created, timezone.utc)
return json.dumps(
assembled,
cls=DefaultJsonFormat,
indent=self.indent,
separators=self.separators,
sort_keys=True,
)
# We don't create this dict earlier so that we can use the classes (instead of their names
# as strings).
LOGGING_STREAM_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"json_formatter": {"()": JsonFormatter}},
"filters": {"context_filter": {"()": ContextFilter}},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "json_formatter",
"filters": ["context_filter"],
"stream": "ext://sys.stdout",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
# Loggers from packages that we use and want to be less noisy:
"botocore": {
"qualname": "botocore",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
"elasticsearch": {
"qualname": "elasticsearch",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
"urllib3": {
"qualname": "urllib3",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
},
}
def configure_logging(level: Union[int, str] = "INFO") -> None:
"""Configure logging module to use JSON formatter for logs."""
logging.config.dictConfig(LOGGING_STREAM_CONFIG)
logging.captureWarnings(True)
logging.root.setLevel(level)
# Just for developer convenience -- this avoids having too many imports of "logging" packages.
def update_from_lambda_context(context: Any) -> None:
"""Update values in the logging context from the context of a AWS Lambda function."""
ContextFilter.update_from_lambda_context(context)
def update_context(**kwargs: Optional[str]) -> None:
"""Update values in the logging context to be included with every log record."""
ContextFilter.update_context(**kwargs)
class log_stack_trace(ContextDecorator):
"""This context enables logging a stacktrace automatically when an exception occurs."""
| 35.778226 | 100 | 0.618618 | """
Enable logging (from Lambda functions) in JSON format which makes post-processing much easier.
Since we assume this will be used by Lambda functions, we also add the request id in log lines.
"""
import json
import logging
import logging.config
import sys
import traceback
from contextlib import ContextDecorator
from datetime import datetime, timezone
from logging import NullHandler # noqa: F401
from typing import Any, Dict, Optional, Tuple, Union
class ContextFilter(logging.Filter):
"""
Logging Filter class that adds contextual information to log records.
We assume there will be only one instance of this filter for any runtime which
means that we will store some values with the class, not the instances.
"""
_context: Dict[str, Optional[str]] = {
"aws_request_id": None,
"correlation_id": None,
"function_name": None,
"function_version": None,
"invoked_function_arn": None,
"log_group_name": None,
"log_stream_name": None,
"request_id": None,
}
def filter(self, record: logging.LogRecord) -> bool:
"""Modify record in place for additional fields, then return True to continue processing."""
for field, value in self._context.items():
if value is not None:
setattr(record, field, value)
return True
@classmethod
def update_from_lambda_context(cls, context: Any) -> None:
"""Update fields stored in the global context filter based on attributes of the context."""
for field, value in cls._context.items():
cls._context[field] = getattr(context, field, value)
@classmethod
def update_context(cls, **kwargs: Optional[str]) -> None:
"""
Update any of the fields stored in the global context filter.
Note that trying to set a field that's not been defined raises a ValueError.
Setting a field to None removes it from the output.
"""
for field, value in kwargs.items():
if field in cls._context:
cls._context[field] = value
else:
raise ValueError(f"unexpected field: '{field}'")
class DefaultJsonFormat(json.JSONEncoder):
"""Default to using 'str()' except for dates which are ISO 8601."""
def default(self, obj: Any) -> str:
if isinstance(obj, datetime):
s = obj.isoformat(timespec="milliseconds")
if s.endswith("+00:00"):
# Make 99% of our timestamps easier to read by replacing the time offset with "Z".
return s[:-6] + "Z"
return s
else:
return str(obj)
class JsonFormatter(logging.Formatter):
"""
Format the message to be easily reverted into an object by using JSON format.
Notes:
* The "format" is ignored since we convert based on available info.
* The timestamps are in UTC.
This format of "gmtime" is compatible with "strict_date_time" in Elasticsearch,
(as "yyyy-MM-dd'T'HH:mm:ss.SSSZZ") and other log collection tools.
"""
attribute_mapping = {
# LogRecord attributes for which we want new names:
"filename": "source.filename",
"funcName": "source.function",
"levelname": "log_level",
"levelno": "log_severity",
"lineno": "source.line_number",
"module": "source.module",
"name": "logger",
"pathname": "source.pathname",
"process": "process.id",
"processName": "process.name",
"threadName": "thread.name",
# Common context attributes which we want to rename:
"function_name": "lambda.function_name",
"function_version": "lambda.function_version",
"invoked_function_arn": "lambda.invoked_function_arn",
"log_group_name": "cwl.log_group_name",
"log_stream_name": "cwl.log_stream_name",
# LogRecord attributes which we want to suppress or rewrite ourselves:
"args": None,
"created": None,
"msecs": None,
"msg": None,
"relativeCreated": None,
"thread": None,
}
# Use "set_output_format()" to change this value.
output_format = "compact"
@property
def indent(self) -> Optional[str]:
return {"compact": None, "pretty": " "}[self.output_format]
@property
def separators(self) -> Tuple[str, str]:
return {"compact": (",", ":"), "pretty": (",", ": ")}[self.output_format]
def format(self, record: logging.LogRecord) -> str:
"""Format log record by creating a JSON-format in a string."""
assembled = {}
for attr, value in record.__dict__.items():
if value is None:
continue
if attr in self.attribute_mapping:
new_name = self.attribute_mapping[attr]
if new_name is not None:
assembled[new_name] = value
continue
# This lets anything, I mean anything, from "extra={}" slip through.
assembled[attr] = value
# The "message" is added here so an accidentally specified message in the extra kwargs
# is ignored.
assembled["message"] = record.getMessage()
# We show elapsed milliseconds as int, not float.
assembled["elapsed_ms"] = int(record.relativeCreated)
# Finally, always add a timestamp as epoch msecs and in a human readable format.
# (Go to https://www.epochconverter.com/ to convert the timestamp in milliseconds.)
assembled["timestamp"] = int(record.created * 1000.0)
assembled["gmtime"] = datetime.fromtimestamp(record.created, timezone.utc)
return json.dumps(
assembled,
cls=DefaultJsonFormat,
indent=self.indent,
separators=self.separators,
sort_keys=True,
)
# We don't create this dict earlier so that we can use the classes (instead of their names
# as strings).
LOGGING_STREAM_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"json_formatter": {"()": JsonFormatter}},
"filters": {"context_filter": {"()": ContextFilter}},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "json_formatter",
"filters": ["context_filter"],
"stream": "ext://sys.stdout",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
# Loggers from packages that we use and want to be less noisy:
"botocore": {
"qualname": "botocore",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
"elasticsearch": {
"qualname": "elasticsearch",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
"urllib3": {
"qualname": "urllib3",
"handlers": ["console"],
"level": "WARNING",
"propagate": 0,
},
},
}
def configure_logging(level: Union[int, str] = "INFO") -> None:
"""Configure logging module to use JSON formatter for logs."""
logging.config.dictConfig(LOGGING_STREAM_CONFIG)
logging.captureWarnings(True)
logging.root.setLevel(level)
# Just for developer convenience -- this avoids having too many imports of "logging" packages.
def getLogger(name: str = None) -> logging.Logger:
return logging.getLogger(name)
def set_output_format(pretty: bool = False, pretty_if_tty: bool = False) -> None:
if pretty or (pretty_if_tty and sys.stdout.isatty()):
JsonFormatter.output_format = "pretty"
else:
JsonFormatter.output_format = "compact"
def update_from_lambda_context(context: Any) -> None:
"""Update values in the logging context from the context of a AWS Lambda function."""
ContextFilter.update_from_lambda_context(context)
def update_context(**kwargs: Optional[str]) -> None:
"""Update values in the logging context to be included with every log record."""
ContextFilter.update_context(**kwargs)
class log_stack_trace(ContextDecorator):
"""This context enables logging a stacktrace automatically when an exception occurs."""
def __init__(self, logger: logging.Logger) -> None:
self._logger = logger
def __enter__(self) -> "log_stack_trace":
return self
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
if exc_type:
tb = traceback.TracebackException(exc_type, exc_val, exc_tb)
message = next(tb.format_exception_only()).strip()
stack_trace_lines = ("".join(tb.format())).splitlines()
self._logger.error(message, extra={"stack_trace": stack_trace_lines})
return None
| 1,282 | 0 | 205 |
5fa7626bffa829023dff17d79a49ca9e81fba492 | 2,149 | py | Python | hacker_earth/python_problems/string_addition.py | Faraaz54/python_training_problems | 24c7b42daaf54366759e1d7c4b42f9936316e94b | [
"MIT"
] | null | null | null | hacker_earth/python_problems/string_addition.py | Faraaz54/python_training_problems | 24c7b42daaf54366759e1d7c4b42f9936316e94b | [
"MIT"
] | null | null | null | hacker_earth/python_problems/string_addition.py | Faraaz54/python_training_problems | 24c7b42daaf54366759e1d7c4b42f9936316e94b | [
"MIT"
] | null | null | null |
sol = []
for _ in xrange(0, int(raw_input())):
s = raw_input()
#w = [i for i in s]
#print w
#t = [s[len(s) - 1 - count] for count in xrange(len(s))]
#print t
T = []
st = 0
for i in xrange(0, len(s)):
v = (ord(s[i]) + ord(s[len(s) - 1 - i])) - 96
if v <= 122:
st = chr(v)
elif v > 122:
v = v - 26
st = chr(v)
'''k = v % 218
a = 97
if k == 194 or k == 2:
st = chr(a+1)
elif k == 195 or k == 3:
st = chr(a+2)
elif k == 196 or k == 4:
st = chr(a+3)
elif k == 197 or k == 5:
st = chr(a+4)
elif k == 198 or k == 6:
st = chr(a+5)
elif k == 199 or k == 7:
st = chr(a+6)
elif k == 200 or k == 8:
st = chr(a+7)
elif k == 201 or k == 9:
st = chr(a+8)
elif k == 202 or k == 10:
st = chr(a+9)
elif k == 203 or k == 11:
st = chr(a+10)
elif k == 204 or k == 12:
st = chr(a+11)
elif k == 205 or k == 13:
st = chr(a+12)
elif k == 206 or k == 14:
st = chr(a+13)
elif k == 207 or k == 15:
st = chr(a+14)
elif k == 208 or k == 16:
st = chr(a+15)
elif k == 209 or k == 17:
st = chr(a+16)
elif k == 210 or k == 18:
st = chr(a+17)
elif k == 211 or k == 19:
st = chr(a+18)
elif k == 212 or k == 20:
st = chr(a+19)
elif k == 213 or k == 21:
st = chr(a+20)
elif k == 214 or k == 22:
st = chr(a+21)
elif k == 215 or k == 23:
st = chr(a+22)
elif k == 216 or k == 24:
st = chr(a+23)
elif k == 217 or k == 25:
st = chr(a+24)
elif k == 0 or k == 26:
st = chr(a+25)
else:
st = chr(a)'''
T.append(st)
sol.append(''.join(T))
for i in sol:
print i
| 25.891566 | 61 | 0.33504 |
sol = []
for _ in xrange(0, int(raw_input())):
s = raw_input()
#w = [i for i in s]
#print w
#t = [s[len(s) - 1 - count] for count in xrange(len(s))]
#print t
T = []
st = 0
for i in xrange(0, len(s)):
v = (ord(s[i]) + ord(s[len(s) - 1 - i])) - 96
if v <= 122:
st = chr(v)
elif v > 122:
v = v - 26
st = chr(v)
'''k = v % 218
a = 97
if k == 194 or k == 2:
st = chr(a+1)
elif k == 195 or k == 3:
st = chr(a+2)
elif k == 196 or k == 4:
st = chr(a+3)
elif k == 197 or k == 5:
st = chr(a+4)
elif k == 198 or k == 6:
st = chr(a+5)
elif k == 199 or k == 7:
st = chr(a+6)
elif k == 200 or k == 8:
st = chr(a+7)
elif k == 201 or k == 9:
st = chr(a+8)
elif k == 202 or k == 10:
st = chr(a+9)
elif k == 203 or k == 11:
st = chr(a+10)
elif k == 204 or k == 12:
st = chr(a+11)
elif k == 205 or k == 13:
st = chr(a+12)
elif k == 206 or k == 14:
st = chr(a+13)
elif k == 207 or k == 15:
st = chr(a+14)
elif k == 208 or k == 16:
st = chr(a+15)
elif k == 209 or k == 17:
st = chr(a+16)
elif k == 210 or k == 18:
st = chr(a+17)
elif k == 211 or k == 19:
st = chr(a+18)
elif k == 212 or k == 20:
st = chr(a+19)
elif k == 213 or k == 21:
st = chr(a+20)
elif k == 214 or k == 22:
st = chr(a+21)
elif k == 215 or k == 23:
st = chr(a+22)
elif k == 216 or k == 24:
st = chr(a+23)
elif k == 217 or k == 25:
st = chr(a+24)
elif k == 0 or k == 26:
st = chr(a+25)
else:
st = chr(a)'''
T.append(st)
sol.append(''.join(T))
for i in sol:
print i
| 0 | 0 | 0 |
839d371d56939d9dda6a230c854a47fd94faa3e6 | 5,063 | py | Python | esphomeyaml/components/logger.py | Bierchermuesli/esphomeyaml | f087e313d4ca5370b14f7e8173c8f1719e90dfa6 | [
"MIT"
] | null | null | null | esphomeyaml/components/logger.py | Bierchermuesli/esphomeyaml | f087e313d4ca5370b14f7e8173c8f1719e90dfa6 | [
"MIT"
] | null | null | null | esphomeyaml/components/logger.py | Bierchermuesli/esphomeyaml | f087e313d4ca5370b14f7e8173c8f1719e90dfa6 | [
"MIT"
] | null | null | null | import re
import voluptuous as vol
from esphomeyaml.automation import ACTION_REGISTRY, LambdaAction
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_ARGS, CONF_BAUD_RATE, CONF_FORMAT, CONF_ID, CONF_LEVEL, \
CONF_LOGS, CONF_TAG, CONF_TX_BUFFER_SIZE
from esphomeyaml.core import ESPHomeYAMLError, Lambda
from esphomeyaml.helpers import App, Pvariable, RawExpression, TemplateArguments, add, \
esphomelib_ns, global_ns, process_lambda, statement, Component
LOG_LEVELS = {
'NONE': global_ns.ESPHOMELIB_LOG_LEVEL_NONE,
'ERROR': global_ns.ESPHOMELIB_LOG_LEVEL_ERROR,
'WARN': global_ns.ESPHOMELIB_LOG_LEVEL_WARN,
'INFO': global_ns.ESPHOMELIB_LOG_LEVEL_INFO,
'DEBUG': global_ns.ESPHOMELIB_LOG_LEVEL_DEBUG,
'VERBOSE': global_ns.ESPHOMELIB_LOG_LEVEL_VERBOSE,
'VERY_VERBOSE': global_ns.ESPHOMELIB_LOG_LEVEL_VERY_VERBOSE,
}
LOG_LEVEL_TO_ESP_LOG = {
'ERROR': global_ns.ESP_LOGE,
'WARN': global_ns.ESP_LOGW,
'INFO': global_ns.ESP_LOGI,
'DEBUG': global_ns.ESP_LOGD,
'VERBOSE': global_ns.ESP_LOGV,
'VERY_VERBOSE': global_ns.ESP_LOGVV,
}
LOG_LEVEL_SEVERITY = ['NONE', 'ERROR', 'WARN', 'INFO', 'DEBUG', 'VERBOSE', 'VERY_VERBOSE']
# pylint: disable=invalid-name
is_log_level = vol.All(vol.Upper, cv.one_of(*LOG_LEVELS))
LogComponent = esphomelib_ns.class_('LogComponent', Component)
CONFIG_SCHEMA = vol.All(vol.Schema({
cv.GenerateID(): cv.declare_variable_id(LogComponent),
vol.Optional(CONF_BAUD_RATE, default=115200): cv.positive_int,
vol.Optional(CONF_TX_BUFFER_SIZE): cv.validate_bytes,
vol.Optional(CONF_LEVEL): is_log_level,
vol.Optional(CONF_LOGS): vol.Schema({
cv.string: is_log_level,
})
}), validate_local_no_higher_than_global)
CONF_LOGGER_LOG = 'logger.log'
LOGGER_LOG_ACTION_SCHEMA = vol.All(maybe_simple_message({
vol.Required(CONF_FORMAT): cv.string,
vol.Optional(CONF_ARGS, default=list): vol.All(cv.ensure_list, [cv.lambda_]),
vol.Optional(CONF_LEVEL, default="DEBUG"): vol.All(vol.Upper, cv.one_of(*LOG_LEVEL_TO_ESP_LOG)),
vol.Optional(CONF_TAG, default="main"): cv.string,
}), validate_printf)
@ACTION_REGISTRY.register(CONF_LOGGER_LOG, LOGGER_LOG_ACTION_SCHEMA)
| 38.946154 | 100 | 0.673119 | import re
import voluptuous as vol
from esphomeyaml.automation import ACTION_REGISTRY, LambdaAction
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_ARGS, CONF_BAUD_RATE, CONF_FORMAT, CONF_ID, CONF_LEVEL, \
CONF_LOGS, CONF_TAG, CONF_TX_BUFFER_SIZE
from esphomeyaml.core import ESPHomeYAMLError, Lambda
from esphomeyaml.helpers import App, Pvariable, RawExpression, TemplateArguments, add, \
esphomelib_ns, global_ns, process_lambda, statement, Component
LOG_LEVELS = {
'NONE': global_ns.ESPHOMELIB_LOG_LEVEL_NONE,
'ERROR': global_ns.ESPHOMELIB_LOG_LEVEL_ERROR,
'WARN': global_ns.ESPHOMELIB_LOG_LEVEL_WARN,
'INFO': global_ns.ESPHOMELIB_LOG_LEVEL_INFO,
'DEBUG': global_ns.ESPHOMELIB_LOG_LEVEL_DEBUG,
'VERBOSE': global_ns.ESPHOMELIB_LOG_LEVEL_VERBOSE,
'VERY_VERBOSE': global_ns.ESPHOMELIB_LOG_LEVEL_VERY_VERBOSE,
}
LOG_LEVEL_TO_ESP_LOG = {
'ERROR': global_ns.ESP_LOGE,
'WARN': global_ns.ESP_LOGW,
'INFO': global_ns.ESP_LOGI,
'DEBUG': global_ns.ESP_LOGD,
'VERBOSE': global_ns.ESP_LOGV,
'VERY_VERBOSE': global_ns.ESP_LOGVV,
}
LOG_LEVEL_SEVERITY = ['NONE', 'ERROR', 'WARN', 'INFO', 'DEBUG', 'VERBOSE', 'VERY_VERBOSE']
# pylint: disable=invalid-name
is_log_level = vol.All(vol.Upper, cv.one_of(*LOG_LEVELS))
def validate_local_no_higher_than_global(value):
global_level = value.get(CONF_LEVEL, 'DEBUG')
for tag, level in value.get(CONF_LOGS, {}).iteritems():
if LOG_LEVEL_SEVERITY.index(level) > LOG_LEVEL_SEVERITY.index(global_level):
raise ESPHomeYAMLError(u"The local log level {} for {} must be less severe than the "
u"global log level {}.".format(level, tag, global_level))
return value
LogComponent = esphomelib_ns.class_('LogComponent', Component)
CONFIG_SCHEMA = vol.All(vol.Schema({
cv.GenerateID(): cv.declare_variable_id(LogComponent),
vol.Optional(CONF_BAUD_RATE, default=115200): cv.positive_int,
vol.Optional(CONF_TX_BUFFER_SIZE): cv.validate_bytes,
vol.Optional(CONF_LEVEL): is_log_level,
vol.Optional(CONF_LOGS): vol.Schema({
cv.string: is_log_level,
})
}), validate_local_no_higher_than_global)
def to_code(config):
rhs = App.init_log(config.get(CONF_BAUD_RATE))
log = Pvariable(config[CONF_ID], rhs)
if CONF_TX_BUFFER_SIZE in config:
add(log.set_tx_buffer_size(config[CONF_TX_BUFFER_SIZE]))
if CONF_LEVEL in config:
add(log.set_global_log_level(LOG_LEVELS[config[CONF_LEVEL]]))
for tag, level in config.get(CONF_LOGS, {}).iteritems():
add(log.set_log_level(tag, LOG_LEVELS[level]))
def required_build_flags(config):
if CONF_LEVEL in config:
return u'-DESPHOMELIB_LOG_LEVEL={}'.format(str(LOG_LEVELS[config[CONF_LEVEL]]))
return None
def maybe_simple_message(schema):
def validator(value):
if isinstance(value, dict):
return vol.Schema(schema)(value)
return vol.Schema(schema)({CONF_FORMAT: value})
return validator
def validate_printf(value):
# https://stackoverflow.com/questions/30011379/how-can-i-parse-a-c-format-string-in-python
# pylint: disable=anomalous-backslash-in-string
cfmt = u"""\
( # start of capture group 1
% # literal "%"
(?: # first option
(?:[-+0 #]{0,5}) # optional flags
(?:\d+|\*)? # width
(?:\.(?:\d+|\*))? # precision
(?:h|l|ll|w|I|I32|I64)? # size
[cCdiouxXeEfgGaAnpsSZ] # type
) | # OR
%%) # literal "%%"
"""
matches = re.findall(cfmt, value[CONF_FORMAT], flags=re.X)
if len(matches) != len(value[CONF_ARGS]):
raise vol.Invalid(u"Found {} printf-patterns ({}), but {} args were given!"
u"".format(len(matches), u', '.join(matches), len(value[CONF_ARGS])))
return value
CONF_LOGGER_LOG = 'logger.log'
LOGGER_LOG_ACTION_SCHEMA = vol.All(maybe_simple_message({
vol.Required(CONF_FORMAT): cv.string,
vol.Optional(CONF_ARGS, default=list): vol.All(cv.ensure_list, [cv.lambda_]),
vol.Optional(CONF_LEVEL, default="DEBUG"): vol.All(vol.Upper, cv.one_of(*LOG_LEVEL_TO_ESP_LOG)),
vol.Optional(CONF_TAG, default="main"): cv.string,
}), validate_printf)
@ACTION_REGISTRY.register(CONF_LOGGER_LOG, LOGGER_LOG_ACTION_SCHEMA)
def logger_log_action_to_code(config, action_id, arg_type):
template_arg = TemplateArguments(arg_type)
esp_log = LOG_LEVEL_TO_ESP_LOG[config[CONF_LEVEL]]
args = [RawExpression(unicode(x)) for x in config[CONF_ARGS]]
text = unicode(statement(esp_log(config[CONF_TAG], config[CONF_FORMAT], *args)))
for lambda_ in process_lambda(Lambda(text), [(arg_type, 'x')]):
yield None
rhs = LambdaAction.new(template_arg, lambda_)
type = LambdaAction.template(template_arg)
yield Pvariable(action_id, rhs, type=type)
| 2,707 | 0 | 137 |
df28fa0ee16b08f7c8e724f545139a01db78d3c1 | 3,695 | py | Python | celery_conf/tasks.py | kwrobert/qualipy | a514e8d2e7d3b24d111556a1af91689ace36af9a | [
"MIT"
] | null | null | null | celery_conf/tasks.py | kwrobert/qualipy | a514e8d2e7d3b24d111556a1af91689ace36af9a | [
"MIT"
] | null | null | null | celery_conf/tasks.py | kwrobert/qualipy | a514e8d2e7d3b24d111556a1af91689ace36af9a | [
"MIT"
] | null | null | null | import json
import inspect
import qualipy
from celery import Celery
from celery import group
app = Celery()
# celery config file here:
app.config_from_object('sampleconfig')
@app.task
def process_image(image, filters, ROI=None, return_predictions=False,
combine_results=False, sort_filters=True):
"""Processes one image with process-function and returns the resulting value.
"""
return qualipy.process(image, filters, ROI, return_predictions,
combine_results, sort_filters)
def celery_process(images, filters, ROIs=None, return_predictions=False,
combine_results=False, sort_filters=True):
"""Process a list of images by dividing the task into smaller celery-tasks.
Returns a celery.result.ResultSet
"""
if ROIs is None:
return group(process_image.s(img, filters, None, return_predictions,
combine_results, sort_filters)
for img in images)()
if len(images) != len(ROIs):
raise ValueError("image and ROI lists need to be of same length")
return group(process_image.s(img, filters, ROI, return_predictions,
combine_results, sort_filters)
for img, ROI in zip(images, ROIs))()
def get_job_status(job):
"""Returns the status of the job(celery.result.ResultSet) as a percentage
of completed tasks
"""
total = len(job.results)
return (float(job.completed_count()) / total) * 100
def celery_process_request(request_json):
"""Works the same as process_request-function, but
returns a celery.result.ResultSet instead of list of results.
"""
import qualipy.filters
filter_classes = inspect.getmembers(qualipy.filters, inspect.isclass)
try:
request = json.loads(request_json)
except:
raise ValueError("Invalid JSON format")
if 'images' not in request or 'filters' not in request:
raise ValueError("images or filters array not in JSON")
images, ROIs = __parse_images_and_ROIs(request['images'])
filters = __collect_filters(request['filters'], filter_classes)
return_predictions = __get_argument(request, 'return_predictions', False)
combine_results = __get_argument(request, 'combine_results', True)
sort_filters = __get_argument(request, 'sort_filters', True)
return group(process_image.s(img, filters, ROI, return_predictions,
combine_results, sort_filters)
for img, ROI in zip(images, ROIs))()
| 32.412281 | 81 | 0.665765 | import json
import inspect
import qualipy
from celery import Celery
from celery import group
app = Celery()
# celery config file here:
app.config_from_object('sampleconfig')
@app.task
def process_image(image, filters, ROI=None, return_predictions=False,
combine_results=False, sort_filters=True):
"""Processes one image with process-function and returns the resulting value.
"""
return qualipy.process(image, filters, ROI, return_predictions,
combine_results, sort_filters)
def celery_process(images, filters, ROIs=None, return_predictions=False,
combine_results=False, sort_filters=True):
"""Process a list of images by dividing the task into smaller celery-tasks.
Returns a celery.result.ResultSet
"""
if ROIs is None:
return group(process_image.s(img, filters, None, return_predictions,
combine_results, sort_filters)
for img in images)()
if len(images) != len(ROIs):
raise ValueError("image and ROI lists need to be of same length")
return group(process_image.s(img, filters, ROI, return_predictions,
combine_results, sort_filters)
for img, ROI in zip(images, ROIs))()
def get_job_status(job):
"""Returns the status of the job(celery.result.ResultSet) as a percentage
of completed tasks
"""
total = len(job.results)
return (float(job.completed_count()) / total) * 100
def celery_process_request(request_json):
"""Works the same as process_request-function, but
returns a celery.result.ResultSet instead of list of results.
"""
import qualipy.filters
filter_classes = inspect.getmembers(qualipy.filters, inspect.isclass)
try:
request = json.loads(request_json)
except:
raise ValueError("Invalid JSON format")
if 'images' not in request or 'filters' not in request:
raise ValueError("images or filters array not in JSON")
images, ROIs = __parse_images_and_ROIs(request['images'])
filters = __collect_filters(request['filters'], filter_classes)
return_predictions = __get_argument(request, 'return_predictions', False)
combine_results = __get_argument(request, 'combine_results', True)
sort_filters = __get_argument(request, 'sort_filters', True)
return group(process_image.s(img, filters, ROI, return_predictions,
combine_results, sort_filters)
for img, ROI in zip(images, ROIs))()
def __get_filter(name, filter_classes):
for _, filter in filter_classes:
if filter.name == name:
return filter
return None
def __get_argument(request, arg_name, default):
if arg_name in request:
return request[arg_name]
return default
def __parse_images_and_ROIs(request_images):
images, ROIs = [], []
for image, ROI in request_images.iteritems():
if ROI is None or (isinstance(ROI, list) and len(ROI) == 4):
images.append(image)
ROIs.append(None if ROI is None else tuple(ROI))
else:
raise ValueError("invalid ROI for image %s" % image)
return images, ROIs
def __collect_filters(request_filters, filter_classes):
filters = []
for filter_name, params in request_filters.iteritems():
filter_obj = __get_filter(filter_name, filter_classes)
if filter_obj is None:
raise ValueError
try:
filters.append(filter_obj(**params))
except TypeError:
raise ValueError("Invalid parameters for filter %s" % filter_name)
return filters
| 1,022 | 0 | 92 |
a269b81a626604236eb11f9b26a66abc6af99700 | 3,545 | py | Python | VISION/CalibrationStereo/StereoCalibration.py | Beam-create/S4_projet_QUADRUS_V2 | 304b434b552a4a5377dd2f49c4bb4c8e9fcb714b | [
"MIT"
] | null | null | null | VISION/CalibrationStereo/StereoCalibration.py | Beam-create/S4_projet_QUADRUS_V2 | 304b434b552a4a5377dd2f49c4bb4c8e9fcb714b | [
"MIT"
] | 52 | 2022-02-12T17:12:17.000Z | 2022-03-30T00:39:18.000Z | VISION/CalibrationStereo/StereoCalibration.py | Beam-create/S4_projet_QUADRUS_V2 | 304b434b552a4a5377dd2f49c4bb4c8e9fcb714b | [
"MIT"
] | 2 | 2022-01-19T15:44:53.000Z | 2022-01-20T21:44:41.000Z | import numpy as np
import cv2
import glob
################ ÉCHÉQUIER #############################
chessboardSize = (8,6)
frameSize = (720,540)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((chessboardSize[0] * chessboardSize[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)
objp = objp*26.2 #On multiplie par la largeur des carrés en mm
objpoints = []
imgpointsL = []
imgpointsR = []
imagesLeft = sorted(glob.glob('Images/Gauche/*.png'))
imagesRight = sorted(glob.glob('Images/Droite/*.png'))
counter = 0
for imgLeft, imgRight in zip(imagesLeft, imagesRight):
imgL = cv2.imread(imgLeft)
imgR = cv2.imread(imgRight)
grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
retL, cornersL = cv2.findChessboardCorners(grayL, chessboardSize, None)
retR, cornersR = cv2.findChessboardCorners(grayR, chessboardSize, None)
if retL and retR == True:
objpoints.append(objp)
cornersL = cv2.cornerSubPix(grayL, cornersL, (11,11), (-1,-1), criteria)
imgpointsL.append(cornersL)
cornersR = cv2.cornerSubPix(grayR, cornersR, (11,11), (-1,-1), criteria)
imgpointsR.append(cornersR)
print(counter)
counter += 1
cv2.drawChessboardCorners(imgL, chessboardSize, cornersL, retL)
cv2.imshow('img left', imgL)
cv2.waitKey(300)
cv2.destroyAllWindows()
############## CALIBRATION #######################################################
print("Calibration des cameras")
retL, cameraMatrixL, distL, rvecsL, tvecsL = cv2.calibrateCamera(objpoints, imgpointsL, frameSize, None, None)
heightL, widthL, channelsL = imgL.shape
newCameraMatrixL, roi_L = cv2.getOptimalNewCameraMatrix(cameraMatrixL, distL, (widthL, heightL), 1, (widthL, heightL))
retR, cameraMatrixR, distR, rvecsR, tvecsR = cv2.calibrateCamera(objpoints, imgpointsR, frameSize, None, None)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv2.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))
########## CALIBRATION STEREO #############################################
print("Stereo Calibration")
flags = cv2.CALIB_FIX_INTRINSIC + cv2.CALIB_SAME_FOCAL_LENGTH
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv2.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)
########## RECTIFICATION STEREO #################################################
print("Stereo Rectification")
rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv2.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
stereoMapL = cv2.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv2.CV_16SC2)
stereoMapR = cv2.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv2.CV_16SC2)
print("Saving parameters!")
cv2_file = cv2.FileStorage('stereoMap.xml', cv2.FILE_STORAGE_WRITE)
cv2_file.write('stereoMapL_x',stereoMapL[0])
cv2_file.write('stereoMapL_y',stereoMapL[1])
cv2_file.write('stereoMapR_x',stereoMapR[0])
cv2_file.write('stereoMapR_y',stereoMapR[1])
cv2_file.write('q',Q)
cv2_file.release()
| 33.130841 | 257 | 0.691396 | import numpy as np
import cv2
import glob
################ ÉCHÉQUIER #############################
chessboardSize = (8,6)
frameSize = (720,540)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((chessboardSize[0] * chessboardSize[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)
objp = objp*26.2 #On multiplie par la largeur des carrés en mm
objpoints = []
imgpointsL = []
imgpointsR = []
imagesLeft = sorted(glob.glob('Images/Gauche/*.png'))
imagesRight = sorted(glob.glob('Images/Droite/*.png'))
counter = 0
for imgLeft, imgRight in zip(imagesLeft, imagesRight):
imgL = cv2.imread(imgLeft)
imgR = cv2.imread(imgRight)
grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
retL, cornersL = cv2.findChessboardCorners(grayL, chessboardSize, None)
retR, cornersR = cv2.findChessboardCorners(grayR, chessboardSize, None)
if retL and retR == True:
objpoints.append(objp)
cornersL = cv2.cornerSubPix(grayL, cornersL, (11,11), (-1,-1), criteria)
imgpointsL.append(cornersL)
cornersR = cv2.cornerSubPix(grayR, cornersR, (11,11), (-1,-1), criteria)
imgpointsR.append(cornersR)
print(counter)
counter += 1
cv2.drawChessboardCorners(imgL, chessboardSize, cornersL, retL)
cv2.imshow('img left', imgL)
cv2.waitKey(300)
cv2.destroyAllWindows()
############## CALIBRATION #######################################################
print("Calibration des cameras")
retL, cameraMatrixL, distL, rvecsL, tvecsL = cv2.calibrateCamera(objpoints, imgpointsL, frameSize, None, None)
heightL, widthL, channelsL = imgL.shape
newCameraMatrixL, roi_L = cv2.getOptimalNewCameraMatrix(cameraMatrixL, distL, (widthL, heightL), 1, (widthL, heightL))
retR, cameraMatrixR, distR, rvecsR, tvecsR = cv2.calibrateCamera(objpoints, imgpointsR, frameSize, None, None)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv2.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))
########## CALIBRATION STEREO #############################################
print("Stereo Calibration")
flags = cv2.CALIB_FIX_INTRINSIC + cv2.CALIB_SAME_FOCAL_LENGTH
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv2.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)
########## RECTIFICATION STEREO #################################################
print("Stereo Rectification")
rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv2.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
stereoMapL = cv2.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv2.CV_16SC2)
stereoMapR = cv2.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv2.CV_16SC2)
print("Saving parameters!")
cv2_file = cv2.FileStorage('stereoMap.xml', cv2.FILE_STORAGE_WRITE)
cv2_file.write('stereoMapL_x',stereoMapL[0])
cv2_file.write('stereoMapL_y',stereoMapL[1])
cv2_file.write('stereoMapR_x',stereoMapR[0])
cv2_file.write('stereoMapR_y',stereoMapR[1])
cv2_file.write('q',Q)
cv2_file.release()
| 0 | 0 | 0 |
af5d2d20165054a32572a1b993db106fb4ae9b6e | 2,992 | py | Python | experiment-Gaussians.py | rougier/VSOM | 78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa | [
"BSD-3-Clause"
] | 17 | 2020-11-20T06:27:15.000Z | 2022-01-11T22:20:28.000Z | experiment-Gaussians.py | rougier/VSOM | 78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa | [
"BSD-3-Clause"
] | null | null | null | experiment-Gaussians.py | rougier/VSOM | 78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa | [
"BSD-3-Clause"
] | 1 | 2022-01-03T04:41:57.000Z | 2022-01-03T04:41:57.000Z | # -----------------------------------------------------------------------------
# Copyright (c) 2019 Nicolas P. Rougier
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import sys
import som, mnist, plot
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
seed = 1
topology = "random"
n_unit = 512
n_neighbor = 3
n_samples = 25000
n_epochs = 25000
sigma = 0.25, 0.01
lrate = 0.50, 0.01
if seed is None:
seed = np.random.randint(0,1000)
np.random.seed(seed)
print("Building network (might take some time)... ", end="")
sys.stdout.flush()
som = som.SOM(n_unit, topology, n_neighbor)
print("done!")
print("Random seed: {0}".format(seed))
print("Number of units: {0}".format(som.size))
if type == "random":
print("Number of neighbors: {0}".format(n_neighbor))
rows, cols = 16,16
xshape = rows, cols
X = np.zeros((n_samples,rows*cols))
Y = None
T = np.random.uniform(low=-np.pi/2, high=np.pi/2, size=n_samples)
S = np.random.uniform(low=0.5, high=2.0, size=n_samples)
for i in range(n_samples):
X[i] = gaussian(shape=(rows,cols),
sigma=(S[i],2), theta=T[i]).ravel()
som.fit(X, Y, n_epochs, sigma=sigma, lrate=lrate)
figsize = 2.5*np.array([6,7])
fig = plt.figure(figsize=figsize, dpi=50)
ax = plt.subplot2grid((7, 6), (0, 0), colspan=3, rowspan=3, aspect=1)
plot.network(ax, som)
plot.letter(ax, "A")
ax = plt.subplot2grid((7, 6), (0, 3), colspan=3, rowspan=3, aspect=1)
plot.weights_img(ax, som, xshape, zoom=1.0)
plot.letter(ax, "B")
# Collect minimal/maximal response from the map across all stimuli
# vmin, vmax = None, None
# for x in X:
# D = -np.sqrt(((som.codebook["X"] - x.ravel())**2).sum(axis=-1))
# vmin = D.min() if vmin is None else min(D.min(), vmin)
# vmax = D.max() if vmax is None else max(D.max(), vmax)
X = X[np.random.randint(0,len(X),6)]
for i,x in enumerate(X):
ax = plt.subplot2grid((7, 6), (3+2*(i//3), 2*(i%3)),
colspan=2, rowspan=2, aspect=1)
plot.activation(ax, som, np.array(x).reshape(xshape), zoom=2)
plot.letter(ax, chr(ord("C")+i))
plt.tight_layout()
plt.savefig("experiment-Gaussians.pdf", dpi=300)
plt.show()
| 34.790698 | 82 | 0.541778 | # -----------------------------------------------------------------------------
# Copyright (c) 2019 Nicolas P. Rougier
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import sys
import som, mnist, plot
import numpy as np
import matplotlib.pyplot as plt
def gaussian(shape=(16,16), center=(0,0), sigma=(1,1), theta=0):
A = 1
x0, y0 = center
sigma_x, sigma_y = sigma
a = np.cos(theta)**2/2/sigma_x**2 + np.sin(theta)**2/2/sigma_y**2
b = -np.sin(2*theta)/4/sigma_x**2 + np.sin(2*theta)/4/sigma_y**2
c = np.sin(theta)**2/2/sigma_x**2 + np.cos(theta)**2/2/sigma_y**2
X,Y = np.meshgrid(np.arange(-5,+5,10./shape[0]),np.arange(-5,+5,10./shape[1]))
return A*np.exp( - (a*(X-x0)**2 + 2*b*(X-x0)*(Y-y0) + c*(Y-y0)**2))
if __name__ == '__main__':
seed = 1
topology = "random"
n_unit = 512
n_neighbor = 3
n_samples = 25000
n_epochs = 25000
sigma = 0.25, 0.01
lrate = 0.50, 0.01
if seed is None:
seed = np.random.randint(0,1000)
np.random.seed(seed)
print("Building network (might take some time)... ", end="")
sys.stdout.flush()
som = som.SOM(n_unit, topology, n_neighbor)
print("done!")
print("Random seed: {0}".format(seed))
print("Number of units: {0}".format(som.size))
if type == "random":
print("Number of neighbors: {0}".format(n_neighbor))
rows, cols = 16,16
xshape = rows, cols
X = np.zeros((n_samples,rows*cols))
Y = None
T = np.random.uniform(low=-np.pi/2, high=np.pi/2, size=n_samples)
S = np.random.uniform(low=0.5, high=2.0, size=n_samples)
for i in range(n_samples):
X[i] = gaussian(shape=(rows,cols),
sigma=(S[i],2), theta=T[i]).ravel()
som.fit(X, Y, n_epochs, sigma=sigma, lrate=lrate)
figsize = 2.5*np.array([6,7])
fig = plt.figure(figsize=figsize, dpi=50)
ax = plt.subplot2grid((7, 6), (0, 0), colspan=3, rowspan=3, aspect=1)
plot.network(ax, som)
plot.letter(ax, "A")
ax = plt.subplot2grid((7, 6), (0, 3), colspan=3, rowspan=3, aspect=1)
plot.weights_img(ax, som, xshape, zoom=1.0)
plot.letter(ax, "B")
# Collect minimal/maximal response from the map across all stimuli
# vmin, vmax = None, None
# for x in X:
# D = -np.sqrt(((som.codebook["X"] - x.ravel())**2).sum(axis=-1))
# vmin = D.min() if vmin is None else min(D.min(), vmin)
# vmax = D.max() if vmax is None else max(D.max(), vmax)
X = X[np.random.randint(0,len(X),6)]
for i,x in enumerate(X):
ax = plt.subplot2grid((7, 6), (3+2*(i//3), 2*(i%3)),
colspan=2, rowspan=2, aspect=1)
plot.activation(ax, som, np.array(x).reshape(xshape), zoom=2)
plot.letter(ax, chr(ord("C")+i))
plt.tight_layout()
plt.savefig("experiment-Gaussians.pdf", dpi=300)
plt.show()
| 466 | 0 | 23 |
23434da633307b6722c28b1fdadb1f0d38de4516 | 1,243 | py | Python | tests/test_nfa.py | peppermintpatty5/regex-eq | e1665c0e3292547d0ce5727756bf9646f53d1247 | [
"MIT"
] | null | null | null | tests/test_nfa.py | peppermintpatty5/regex-eq | e1665c0e3292547d0ce5727756bf9646f53d1247 | [
"MIT"
] | null | null | null | tests/test_nfa.py | peppermintpatty5/regex-eq | e1665c0e3292547d0ce5727756bf9646f53d1247 | [
"MIT"
] | null | null | null | """
Unit tests for module `regular.nfa`
"""
import unittest
from regular.nfa import NFA
class TestNFA(unittest.TestCase):
"""
Test cases for class `NFA`
"""
def test_overlapping_states(self):
"""
The operations for union and concatenation cannot be performed if the NFAs have
any states in common.
"""
q1, q2 = object(), object()
a = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, {q2}))
b = NFA(({q1, q2}, {"b"}, {(q1, "b"): {q2}}, q1, {q2}))
self.assertRaises(ValueError, a.update_concat, b)
self.assertRaises(ValueError, a.update_union, b)
def test_emptiness(self):
"""
The language of an NFA which has no reachable accepting states is the empty
language.
"""
q1, q2 = object(), object()
n1 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, set()))
n2 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q1}}, q1, {q2}))
n3 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, {q2}))
self.assertTrue(n1.is_empty())
self.assertTrue(n2.is_empty())
self.assertFalse(n3.is_empty())
self.assertTrue(NFA.empty().is_empty())
if __name__ == "__main__":
unittest.main()
| 25.895833 | 87 | 0.529364 | """
Unit tests for module `regular.nfa`
"""
import unittest
from regular.nfa import NFA
class TestNFA(unittest.TestCase):
"""
Test cases for class `NFA`
"""
def test_overlapping_states(self):
"""
The operations for union and concatenation cannot be performed if the NFAs have
any states in common.
"""
q1, q2 = object(), object()
a = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, {q2}))
b = NFA(({q1, q2}, {"b"}, {(q1, "b"): {q2}}, q1, {q2}))
self.assertRaises(ValueError, a.update_concat, b)
self.assertRaises(ValueError, a.update_union, b)
def test_emptiness(self):
"""
The language of an NFA which has no reachable accepting states is the empty
language.
"""
q1, q2 = object(), object()
n1 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, set()))
n2 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q1}}, q1, {q2}))
n3 = NFA(({q1, q2}, {"a"}, {(q1, "a"): {q2}}, q1, {q2}))
self.assertTrue(n1.is_empty())
self.assertTrue(n2.is_empty())
self.assertFalse(n3.is_empty())
self.assertTrue(NFA.empty().is_empty())
if __name__ == "__main__":
unittest.main()
| 0 | 0 | 0 |
7305eb789285f29b3032a57825aed59ae37428c1 | 6,407 | py | Python | loldib/getratings/models/NA/na_xayah/na_xayah_jng.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_xayah/na_xayah_jng.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_xayah/na_xayah_jng.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
| 15.364508 | 46 | 0.761667 | from getratings.models.ratings import Ratings
class NA_Xayah_Jng_Aatrox(Ratings):
pass
class NA_Xayah_Jng_Ahri(Ratings):
pass
class NA_Xayah_Jng_Akali(Ratings):
pass
class NA_Xayah_Jng_Alistar(Ratings):
pass
class NA_Xayah_Jng_Amumu(Ratings):
pass
class NA_Xayah_Jng_Anivia(Ratings):
pass
class NA_Xayah_Jng_Annie(Ratings):
pass
class NA_Xayah_Jng_Ashe(Ratings):
pass
class NA_Xayah_Jng_AurelionSol(Ratings):
pass
class NA_Xayah_Jng_Azir(Ratings):
pass
class NA_Xayah_Jng_Bard(Ratings):
pass
class NA_Xayah_Jng_Blitzcrank(Ratings):
pass
class NA_Xayah_Jng_Brand(Ratings):
pass
class NA_Xayah_Jng_Braum(Ratings):
pass
class NA_Xayah_Jng_Caitlyn(Ratings):
pass
class NA_Xayah_Jng_Camille(Ratings):
pass
class NA_Xayah_Jng_Cassiopeia(Ratings):
pass
class NA_Xayah_Jng_Chogath(Ratings):
pass
class NA_Xayah_Jng_Corki(Ratings):
pass
class NA_Xayah_Jng_Darius(Ratings):
pass
class NA_Xayah_Jng_Diana(Ratings):
pass
class NA_Xayah_Jng_Draven(Ratings):
pass
class NA_Xayah_Jng_DrMundo(Ratings):
pass
class NA_Xayah_Jng_Ekko(Ratings):
pass
class NA_Xayah_Jng_Elise(Ratings):
pass
class NA_Xayah_Jng_Evelynn(Ratings):
pass
class NA_Xayah_Jng_Ezreal(Ratings):
pass
class NA_Xayah_Jng_Fiddlesticks(Ratings):
pass
class NA_Xayah_Jng_Fiora(Ratings):
pass
class NA_Xayah_Jng_Fizz(Ratings):
pass
class NA_Xayah_Jng_Galio(Ratings):
pass
class NA_Xayah_Jng_Gangplank(Ratings):
pass
class NA_Xayah_Jng_Garen(Ratings):
pass
class NA_Xayah_Jng_Gnar(Ratings):
pass
class NA_Xayah_Jng_Gragas(Ratings):
pass
class NA_Xayah_Jng_Graves(Ratings):
pass
class NA_Xayah_Jng_Hecarim(Ratings):
pass
class NA_Xayah_Jng_Heimerdinger(Ratings):
pass
class NA_Xayah_Jng_Illaoi(Ratings):
pass
class NA_Xayah_Jng_Irelia(Ratings):
pass
class NA_Xayah_Jng_Ivern(Ratings):
pass
class NA_Xayah_Jng_Janna(Ratings):
pass
class NA_Xayah_Jng_JarvanIV(Ratings):
pass
class NA_Xayah_Jng_Jax(Ratings):
pass
class NA_Xayah_Jng_Jayce(Ratings):
pass
class NA_Xayah_Jng_Jhin(Ratings):
pass
class NA_Xayah_Jng_Jinx(Ratings):
pass
class NA_Xayah_Jng_Kalista(Ratings):
pass
class NA_Xayah_Jng_Karma(Ratings):
pass
class NA_Xayah_Jng_Karthus(Ratings):
pass
class NA_Xayah_Jng_Kassadin(Ratings):
pass
class NA_Xayah_Jng_Katarina(Ratings):
pass
class NA_Xayah_Jng_Kayle(Ratings):
pass
class NA_Xayah_Jng_Kayn(Ratings):
pass
class NA_Xayah_Jng_Kennen(Ratings):
pass
class NA_Xayah_Jng_Khazix(Ratings):
pass
class NA_Xayah_Jng_Kindred(Ratings):
pass
class NA_Xayah_Jng_Kled(Ratings):
pass
class NA_Xayah_Jng_KogMaw(Ratings):
pass
class NA_Xayah_Jng_Leblanc(Ratings):
pass
class NA_Xayah_Jng_LeeSin(Ratings):
pass
class NA_Xayah_Jng_Leona(Ratings):
pass
class NA_Xayah_Jng_Lissandra(Ratings):
pass
class NA_Xayah_Jng_Lucian(Ratings):
pass
class NA_Xayah_Jng_Lulu(Ratings):
pass
class NA_Xayah_Jng_Lux(Ratings):
pass
class NA_Xayah_Jng_Malphite(Ratings):
pass
class NA_Xayah_Jng_Malzahar(Ratings):
pass
class NA_Xayah_Jng_Maokai(Ratings):
pass
class NA_Xayah_Jng_MasterYi(Ratings):
pass
class NA_Xayah_Jng_MissFortune(Ratings):
pass
class NA_Xayah_Jng_MonkeyKing(Ratings):
pass
class NA_Xayah_Jng_Mordekaiser(Ratings):
pass
class NA_Xayah_Jng_Morgana(Ratings):
pass
class NA_Xayah_Jng_Nami(Ratings):
pass
class NA_Xayah_Jng_Nasus(Ratings):
pass
class NA_Xayah_Jng_Nautilus(Ratings):
pass
class NA_Xayah_Jng_Nidalee(Ratings):
pass
class NA_Xayah_Jng_Nocturne(Ratings):
pass
class NA_Xayah_Jng_Nunu(Ratings):
pass
class NA_Xayah_Jng_Olaf(Ratings):
pass
class NA_Xayah_Jng_Orianna(Ratings):
pass
class NA_Xayah_Jng_Ornn(Ratings):
pass
class NA_Xayah_Jng_Pantheon(Ratings):
pass
class NA_Xayah_Jng_Poppy(Ratings):
pass
class NA_Xayah_Jng_Quinn(Ratings):
pass
class NA_Xayah_Jng_Rakan(Ratings):
pass
class NA_Xayah_Jng_Rammus(Ratings):
pass
class NA_Xayah_Jng_RekSai(Ratings):
pass
class NA_Xayah_Jng_Renekton(Ratings):
pass
class NA_Xayah_Jng_Rengar(Ratings):
pass
class NA_Xayah_Jng_Riven(Ratings):
pass
class NA_Xayah_Jng_Rumble(Ratings):
pass
class NA_Xayah_Jng_Ryze(Ratings):
pass
class NA_Xayah_Jng_Sejuani(Ratings):
pass
class NA_Xayah_Jng_Shaco(Ratings):
pass
class NA_Xayah_Jng_Shen(Ratings):
pass
class NA_Xayah_Jng_Shyvana(Ratings):
pass
class NA_Xayah_Jng_Singed(Ratings):
pass
class NA_Xayah_Jng_Sion(Ratings):
pass
class NA_Xayah_Jng_Sivir(Ratings):
pass
class NA_Xayah_Jng_Skarner(Ratings):
pass
class NA_Xayah_Jng_Sona(Ratings):
pass
class NA_Xayah_Jng_Soraka(Ratings):
pass
class NA_Xayah_Jng_Swain(Ratings):
pass
class NA_Xayah_Jng_Syndra(Ratings):
pass
class NA_Xayah_Jng_TahmKench(Ratings):
pass
class NA_Xayah_Jng_Taliyah(Ratings):
pass
class NA_Xayah_Jng_Talon(Ratings):
pass
class NA_Xayah_Jng_Taric(Ratings):
pass
class NA_Xayah_Jng_Teemo(Ratings):
pass
class NA_Xayah_Jng_Thresh(Ratings):
pass
class NA_Xayah_Jng_Tristana(Ratings):
pass
class NA_Xayah_Jng_Trundle(Ratings):
pass
class NA_Xayah_Jng_Tryndamere(Ratings):
pass
class NA_Xayah_Jng_TwistedFate(Ratings):
pass
class NA_Xayah_Jng_Twitch(Ratings):
pass
class NA_Xayah_Jng_Udyr(Ratings):
pass
class NA_Xayah_Jng_Urgot(Ratings):
pass
class NA_Xayah_Jng_Varus(Ratings):
pass
class NA_Xayah_Jng_Vayne(Ratings):
pass
class NA_Xayah_Jng_Veigar(Ratings):
pass
class NA_Xayah_Jng_Velkoz(Ratings):
pass
class NA_Xayah_Jng_Vi(Ratings):
pass
class NA_Xayah_Jng_Viktor(Ratings):
pass
class NA_Xayah_Jng_Vladimir(Ratings):
pass
class NA_Xayah_Jng_Volibear(Ratings):
pass
class NA_Xayah_Jng_Warwick(Ratings):
pass
class NA_Xayah_Jng_Xayah(Ratings):
pass
class NA_Xayah_Jng_Xerath(Ratings):
pass
class NA_Xayah_Jng_XinZhao(Ratings):
pass
class NA_Xayah_Jng_Yasuo(Ratings):
pass
class NA_Xayah_Jng_Yorick(Ratings):
pass
class NA_Xayah_Jng_Zac(Ratings):
pass
class NA_Xayah_Jng_Zed(Ratings):
pass
class NA_Xayah_Jng_Ziggs(Ratings):
pass
class NA_Xayah_Jng_Zilean(Ratings):
pass
class NA_Xayah_Jng_Zyra(Ratings):
pass
| 0 | 2,908 | 3,450 |
be5b801ffebc8f1fe9e4eeadfe3427720a90651a | 8,234 | py | Python | tarterus/door.py | redcartel/tarterus-webclient | 79939a365a29ba85728066265dc665a9af04120a | [
"MIT"
] | 1 | 2019-01-14T18:55:14.000Z | 2019-01-14T18:55:14.000Z | tarterus/door.py | redcartel/tarterus-webclient | 79939a365a29ba85728066265dc665a9af04120a | [
"MIT"
] | null | null | null | tarterus/door.py | redcartel/tarterus-webclient | 79939a365a29ba85728066265dc665a9af04120a | [
"MIT"
] | 1 | 2019-06-18T20:42:35.000Z | 2019-06-18T20:42:35.000Z | from tarterus.graphpaper import right, left, back #turn_positive
# from tarterus.graphpaper import is_positive, turn_across
from tarterus.graphpaper import advance # , empty, middle_value
from tarterus.passage import passage_width_table
from tarterus.room import find_loc
# TODO: stairs & trapped doors
# TODO: "blocking void"
DICE_ARRAY = [18, 20, 12]
# TODO: passages from actual door table
# def table_passage(engine, origin, x, y, direction, width, dsquare, dice):
# engine.log(":: door: table_passage")
# simp = is_simple(engine.maparray, origin, x, y, direction)
# if simp == "forbidden":
# return (False,)
# # if the next tile is a wall, make two doors back to back
# elif simp == "wall":
# engine.maparray[x, y] = dsquare
# x0, y0 = advance(x, y, direction, 1)
# engine.immediate_add(['door', 'door', x0, y0,
# direction, 1, (dsquare[0], -1)])
# engine.dispatch_immediate()
# return (True,)
# # if the next tile is a room or hall floor, just connect the door
# elif simp == "simple":
# engine.maparray[x, y] = dsquare
# return (True,)
# # reach this point, other side is void
# # TODO: table_width_passage, not so many 5' halls
# width0 = passage_width_table(dice[1])['width']
# engine.immediate_add(['hall', 'door', x, y,
# direction, width0, ('hall', -1)])
# engine.log(":: immediate add hall from door")
# if engine.dispatch_immediate()[0] is True:
# engine.log("\tsuccess in table_passage")
# engine.maparray[x, y] = dsquare
# return (True,)
# else:
# engine.log("\tfail")
# return (False,)
# test if a minimal room (20' x 20') will fit originating from the door
# TODO: add priority elements to the engine queue, draw room immediately after
# the door
# passage extends 10 feet, then T intersection 10 ft to left and to right
# draws a door if there is an immediate exit on the other side
# returns (a, b) a True if the door is drawn, if not, b is True if a further
# passage can be drawn
| 34.165975 | 79 | 0.568496 | from tarterus.graphpaper import right, left, back #turn_positive
# from tarterus.graphpaper import is_positive, turn_across
from tarterus.graphpaper import advance # , empty, middle_value
from tarterus.passage import passage_width_table
from tarterus.room import find_loc
# TODO: stairs & trapped doors
# TODO: "blocking void"
DICE_ARRAY = [18, 20, 12]
def is_simple(maparray, origin, x, y, direction):
forbidden_tiles = ['bcor', 'tcor']
x0, y0 = advance(x, y, direction, 1)
if x0 <= 1 or x0 + 1 >= maparray.w:
return "forbidden"
elif y0 <= 1 or y0 + 1 >= maparray.h:
return "forbidden"
dsquare = maparray[x0, y0][0]
if dsquare in forbidden_tiles:
return "forbidden"
elif dsquare == "void":
return "void"
elif dsquare in ['hwal', 'vwal']:
return "wall"
else:
return "simple"
# TODO: passages from actual door table
# def table_passage(engine, origin, x, y, direction, width, dsquare, dice):
# engine.log(":: door: table_passage")
# simp = is_simple(engine.maparray, origin, x, y, direction)
# if simp == "forbidden":
# return (False,)
# # if the next tile is a wall, make two doors back to back
# elif simp == "wall":
# engine.maparray[x, y] = dsquare
# x0, y0 = advance(x, y, direction, 1)
# engine.immediate_add(['door', 'door', x0, y0,
# direction, 1, (dsquare[0], -1)])
# engine.dispatch_immediate()
# return (True,)
# # if the next tile is a room or hall floor, just connect the door
# elif simp == "simple":
# engine.maparray[x, y] = dsquare
# return (True,)
# # reach this point, other side is void
# # TODO: table_width_passage, not so many 5' halls
# width0 = passage_width_table(dice[1])['width']
# engine.immediate_add(['hall', 'door', x, y,
# direction, width0, ('hall', -1)])
# engine.log(":: immediate add hall from door")
# if engine.dispatch_immediate()[0] is True:
# engine.log("\tsuccess in table_passage")
# engine.maparray[x, y] = dsquare
# return (True,)
# else:
# engine.log("\tfail")
# return (False,)
# test if a minimal room (20' x 20') will fit originating from the door
def room_will_fit(engine, x, y, direction):
x, _ = find_loc(engine.maparray, x, y, 6, 6, direction, 1, [5, 6])
engine.log(":: room_will_fit : {}, {}, {}".format(x, y, direction))
if x is False:
engine.log("\tfailed")
return False
else:
engine.log("\tpassed")
return True
# TODO: add priority elements to the engine queue, draw room immediately after
# the door
def table_chamber_9_18(engine, origin, x, y, direction, width, dsquare, dice):
engine.log(":: door: table_room")
if not room_will_fit(engine, x, y, direction):
return (False,)
else:
engine.immediate_add(['room', 'door', x, y,
direction, 1, ('room', -1)])
if engine.dispatch_immediate()[0] is True:
engine.maparray[x, y] = dsquare
else:
return (False,)
# passage extends 10 feet, then T intersection 10 ft to left and to right
def table_passage_1_2(engine, origin, x, y, direction, width, dsquare, dice):
engine.log(":: door: table_passage_1_2")
if origin == "exit":
width0 = passage_width_table(dice[0])['width']
else:
width0 = passage_width_table(dice[1])['width']
engine.immediate_add(['hall', 'do12', x, y,
direction, width0, ('hall', -1)])
ret = engine.dispatch_immediate()
if ret[0] is True:
engine.maparray[x, y] = dsquare
return (True,)
else:
engine.log("\tfail")
return (False,)
def table_passage_3_8(engine, origin, x, y, direction, width, dsquare, dice):
engine.log(":: door: table_passage_1_2")
if origin == "exit":
width0 = passage_width_table(dice[0])['width']
else:
width0 = passage_width_table(dice[1])['width']
engine.immediate_add(['hall', 'do38', x, y,
direction, width0, ('hall', -1)])
ret = engine.dispatch_immediate()
if ret[0] is True:
engine.maparray[x, y] = dsquare
return (True,)
else:
engine.log("\tfail")
return (False,)
# draws a door if there is an immediate exit on the other side
# returns (a, b) a True if the door is drawn, if not, b is True if a further
# passage can be drawn
def simple_door(engine, origin, x, y, direction, width, dsquare, dice):
simp = is_simple(engine.maparray, origin, x, y, direction)
if simp == "forbidden":
return (False, False)
elif simp == "wall":
x0, y0 = advance(x, y, direction, 1)
engine.immediate_add(['door', 'door', x0, y0,
direction, 1, (dsquare[0], -1)])
if engine.dispatch_immediate()[0] is True:
engine.maparray[x, y] = dsquare
return (True, True)
else:
return (False, False)
elif simp == "simple":
engine.maparray[x, y] = dsquare
return (True, True)
elif simp == "void":
return (False, True)
def dispatch_door(engine, element, dice):
origin = element[1]
x = element[2]
y = element[3]
direction = element[4]
if x <= 1 or x >= engine.maparray.w:
return (False,)
elif y <= 1 or y >= engine.maparray.h:
return (False,)
if engine.maparray[x, y][0] not in ["void", "vwal", "hwal"]:
return (False,)
# don't build doors next to doors or open spaces
x0, y0 = advance(x, y, left(direction), 1)
x1, y1 = advance(x, y, right(direction), 1)
if engine.maparray[x0, y0][0] in ['room', 'door', 'hall', 'open']:
return (False,)
if engine.maparray[x1, y1][0] in ['room', 'door', 'hall', 'open']:
return (False,)
width = element[5]
dsquare = element[6]
die_roll = dice[0]
dice = dice[1:]
if dsquare[1] == -1:
dsquare = engine.generate_description(dsquare)
engine.describe(dsquare[1], {"type": "door",
"d": direction})
r1, r2 = simple_door(engine, origin, x, y, direction, width, dsquare, dice)
if r1 is True:
return (True,)
elif r1 is False and r2 is False:
return (False,)
# (False, True) use normal tables
if die_roll <= 2:
return table_passage_1_2(engine, origin, x, y,
direction, width, dsquare, dice)
elif die_roll <= 8:
return table_passage_3_8(engine, origin, x, y,
direction, width, dsquare, dice)
elif die_roll <= 18:
return table_chamber_9_18(engine, origin, x, y,
direction, width, dsquare, dice)
def describe_door(engine, d):
dice = engine.roll([20, 20])
door_description = door_type(dice, d['d'])
return door_description
def door_type(dice, direction):
dword = {"n": "north", "e": "east", "s": "south", "w": "west"}
if dice[1] % 2 == 0:
side = dword[direction]
else:
side = dword[back(direction)]
if dice[1] // 2 >= 10:
lockword = "locked from the "
else:
lockword = "barred from the "
d_pass = dword[back(direction)] + " / " + dword[direction]
die = dice[0]
if die <= 10:
return "<p> A wooden door going " + d_pass + "</p>"
elif die <= 12:
return "<p> A wooden door " + lockword + side + "</p>"
elif die <= 13:
return "<p> A stone door going " + d_pass + "</p>"
elif die <= 14:
return "<p> A stone door " + lockword + side + "</p>"
elif die <= 15:
return "<p> An iron door going " + d_pass + "</p>"
elif die <= 16:
return "<p> An iron door " + lockword + side + "</p>"
elif die <= 17:
return "<p> A portcullis going " + d_pass + "</p>"
elif die <= 18:
return "<p> A portcullis locked from the " + side + "</p>"
elif die <= 19:
return "<p> A secret door going " + d_pass + "</p>"
elif die <= 20:
return "<p> A secret door " + lockword + side + "</p>"
| 5,897 | 0 | 203 |
7f340048ad5a0970872ba10e09820f668d89059f | 750 | py | Python | test.py | rilango/anndata | 0621bb947ea2dde45827ad493e7947ad5f97eeba | [
"BSD-3-Clause"
] | null | null | null | test.py | rilango/anndata | 0621bb947ea2dde45827ad493e7947ad5f97eeba | [
"BSD-3-Clause"
] | null | null | null | test.py | rilango/anndata | 0621bb947ea2dde45827ad493e7947ad5f97eeba | [
"BSD-3-Clause"
] | null | null | null | import rapids_scanpy_funcs
import anndata
import cupy
import scanpy as sc
min_genes_per_cell = 200
max_genes_per_cell = 6000
adata = sc.read('/data/anndata/krasnow_hlca_10x_UMIs.sparse.h5ad')
a = adata.T
genes = a.var_names
# print(type(genes))
# print(genes)
#a = anndata.read_h5ad('/data/anndata/krasnow_hlca_10x_UMIs.sparse.h5ad', as_sparse_fmt=cupy.sparse.csr_matrix)
#print(type(a.X))
#print(dir(a.X))
#print(a.X.shape)
#print(a.X.nnz)
#print(a.X)
# print(type(a.X))
sparse_gpu_array = rapids_scanpy_funcs.filter_cells(\
a.X, \
min_genes=min_genes_per_cell, \
max_genes=max_genes_per_cell)
#print(sparse_gpu_array)
#print('-----')
#print(type(a.var))
#print(a.var)
#print(type(a.var_names))
#print(a.var_names)
| 19.230769 | 111 | 0.725333 | import rapids_scanpy_funcs
import anndata
import cupy
import scanpy as sc
min_genes_per_cell = 200
max_genes_per_cell = 6000
adata = sc.read('/data/anndata/krasnow_hlca_10x_UMIs.sparse.h5ad')
a = adata.T
genes = a.var_names
# print(type(genes))
# print(genes)
#a = anndata.read_h5ad('/data/anndata/krasnow_hlca_10x_UMIs.sparse.h5ad', as_sparse_fmt=cupy.sparse.csr_matrix)
#print(type(a.X))
#print(dir(a.X))
#print(a.X.shape)
#print(a.X.nnz)
#print(a.X)
# print(type(a.X))
sparse_gpu_array = rapids_scanpy_funcs.filter_cells(\
a.X, \
min_genes=min_genes_per_cell, \
max_genes=max_genes_per_cell)
#print(sparse_gpu_array)
#print('-----')
#print(type(a.var))
#print(a.var)
#print(type(a.var_names))
#print(a.var_names)
| 0 | 0 | 0 |
a1b35406f7e59655f593a0a9f524cbff400ee90a | 850 | py | Python | directoalartista/apps/plancontrol/models.py | mpampols/directoalartista.com | 833eea7f4db5a2343dba4314793d593cd66cf1fb | [
"MIT"
] | null | null | null | directoalartista/apps/plancontrol/models.py | mpampols/directoalartista.com | 833eea7f4db5a2343dba4314793d593cd66cf1fb | [
"MIT"
] | null | null | null | directoalartista/apps/plancontrol/models.py | mpampols/directoalartista.com | 833eea7f4db5a2343dba4314793d593cd66cf1fb | [
"MIT"
] | 1 | 2018-03-29T02:16:18.000Z | 2018-03-29T02:16:18.000Z | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
User = settings.AUTH_USER_MODEL
from datetime import datetime, timedelta
from directoalartista.apps.genericuser.models import GenericUser
from django.contrib.auth import get_user_model
#User = get_user_model()
| 26.5625 | 80 | 0.722353 | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
User = settings.AUTH_USER_MODEL
from datetime import datetime, timedelta
from directoalartista.apps.genericuser.models import GenericUser
from django.contrib.auth import get_user_model
#User = get_user_model()
class ArtistPlan(models.Model):
# Fields for Artist user
ARTIST_PLANS = (
("4", "Free"),
("3", "Starter"),
("2", "Unlimited")
)
user = models.ForeignKey(User)
new_plan = models.CharField(max_length=15, blank=True, choices=ARTIST_PLANS)
subscr_id = models.CharField(max_length=20, blank=True, null=True)
change_date = models.DateField(auto_now_add=True)
expiration_date = models.DateField(blank=True, null=True)
def __unicode__(self):
return self.user.email | 32 | 473 | 23 |
8f22a8ccbf3f5deb0ac40abecf7cf72f45a67953 | 808 | py | Python | Python/CLRS/2-3_Horner.py | mixterjim/Learn | f1f2132f2d90d50d6963ba4563f037186c9dc955 | [
"MIT"
] | 3 | 2015-05-14T14:32:27.000Z | 2019-05-01T08:24:17.000Z | Python/CLRS/2-3_Horner.py | mixterjim/Learn | f1f2132f2d90d50d6963ba4563f037186c9dc955 | [
"MIT"
] | 1 | 2015-05-14T10:54:20.000Z | 2015-05-14T10:54:47.000Z | Python/CLRS/2-3_Horner.py | mixterjim/Learn | f1f2132f2d90d50d6963ba4563f037186c9dc955 | [
"MIT"
] | null | null | null | import random
import time
x = 2
A = random_int_list(1, 100000, 1000)
B = A
start = time.clock()
HORNER(B, x)
end = time.clock()
print("Horner: %f s" % (end - start))
B = A
start = time.clock()
NAIVE(B, x)
end = time.clock()
print("Navie: %f s" % (end - start))
| 19.238095 | 65 | 0.576733 | import random
import time
def random_int_list(start, stop, length):
start, stop = (int(start), int(stop)) if start <= stop else (
int(stop), int(start))
length = int(abs(length)) if length else 0
random_list = []
for i in range(length):
random_list.append(random.randint(start, stop))
return random_list
def HORNER(list, x):
y = 0
for i in range(len(list) - 1, -1, -1):
y = list[i] + x * y
return y
def NAIVE(list, x):
y = 0
for i in range(0, len(list)):
y += list[i] * (x ** i)
return y
x = 2
A = random_int_list(1, 100000, 1000)
B = A
start = time.clock()
HORNER(B, x)
end = time.clock()
print("Horner: %f s" % (end - start))
B = A
start = time.clock()
NAIVE(B, x)
end = time.clock()
print("Navie: %f s" % (end - start))
| 472 | 0 | 69 |
d42adb0f53f3d2a64f24cc2b95a0007e8e05e75d | 1,426 | py | Python | app/weather/__init__.py | zhounanshu/semd | 42a893ddf441bc770126bef1d62a5840ef1f8dc7 | [
"MIT"
] | 1 | 2016-05-31T05:56:12.000Z | 2016-05-31T05:56:12.000Z | app/weather/__init__.py | zhounanshu/SEMD | 42a893ddf441bc770126bef1d62a5840ef1f8dc7 | [
"MIT"
] | null | null | null | app/weather/__init__.py | zhounanshu/SEMD | 42a893ddf441bc770126bef1d62a5840ef1f8dc7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask.ext.restful import Api
Weather = Blueprint('Weather', __name__)
weather_api = Api(Weather)
from .views import *
# weather_api.add_resource(viewRelti, '/v1/weather/realtime')
# weather_api.add_resource(viewForecast, '/v1/wather/forecast')
weather_api.add_resource(alarm, '/v1/weather/alarm')
# weather_api.add_resource(rain, '/v1/weather/rain')
weather_api.add_resource(get_realtime, '/v1/weather/realtime')
weather_api.add_resource(get_forecast, '/v1/weather/forecast')
# weather_api.add_resource(get_alarm, '/v1/weather/alarm')
weather_api.add_resource(get_rain, '/v1/weather/rain')
# weather_api.add_resource(autoStation, '/v1/map/view/autostation')
weather_api.add_resource(get_qpf, '/v1/weather/qpf')
weather_api.add_resource(alarm_img, '/v1/alarm/img')
# upload weather station
weather_api.add_resource(realWether, '/v1/weather/realtime/upload')
weather_api.add_resource(realAqi, '/v1/aqi/realtime/upload')
weather_api.add_resource(foreWeat, '/v1/weather/forecast/upload')
weather_api.add_resource(wea_Station, '/v1/weather/station/upload')
# 根据经纬度自动定位
weather_api.add_resource(weatherLocation, '/v1/weather/location/realtime')
# 获取区县天气信息
weather_api.add_resource(get_disAla, '/v1/weather/area/alarm')
# 扫描区县天气情况
weather_api.add_resource(hasAlarm, '/v1/weather/has/alarm')
weather_api.add_resource(threeHour, '/v1/threehour/weather')
| 43.212121 | 74 | 0.792426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask.ext.restful import Api
Weather = Blueprint('Weather', __name__)
weather_api = Api(Weather)
from .views import *
# weather_api.add_resource(viewRelti, '/v1/weather/realtime')
# weather_api.add_resource(viewForecast, '/v1/wather/forecast')
weather_api.add_resource(alarm, '/v1/weather/alarm')
# weather_api.add_resource(rain, '/v1/weather/rain')
weather_api.add_resource(get_realtime, '/v1/weather/realtime')
weather_api.add_resource(get_forecast, '/v1/weather/forecast')
# weather_api.add_resource(get_alarm, '/v1/weather/alarm')
weather_api.add_resource(get_rain, '/v1/weather/rain')
# weather_api.add_resource(autoStation, '/v1/map/view/autostation')
weather_api.add_resource(get_qpf, '/v1/weather/qpf')
weather_api.add_resource(alarm_img, '/v1/alarm/img')
# upload weather station
weather_api.add_resource(realWether, '/v1/weather/realtime/upload')
weather_api.add_resource(realAqi, '/v1/aqi/realtime/upload')
weather_api.add_resource(foreWeat, '/v1/weather/forecast/upload')
weather_api.add_resource(wea_Station, '/v1/weather/station/upload')
# 根据经纬度自动定位
weather_api.add_resource(weatherLocation, '/v1/weather/location/realtime')
# 获取区县天气信息
weather_api.add_resource(get_disAla, '/v1/weather/area/alarm')
# 扫描区县天气情况
weather_api.add_resource(hasAlarm, '/v1/weather/has/alarm')
weather_api.add_resource(threeHour, '/v1/threehour/weather')
| 0 | 0 | 0 |
4d3a56a9d3b747e92621da1b436cf297bbe05c05 | 1,154 | py | Python | tests/helpers.py | rekt-hard/invenio-userprofiles | 69cbe381a35aca98d398a9673af351b672d41b70 | [
"MIT"
] | null | null | null | tests/helpers.py | rekt-hard/invenio-userprofiles | 69cbe381a35aca98d398a9673af351b672d41b70 | [
"MIT"
] | null | null | null | tests/helpers.py | rekt-hard/invenio-userprofiles | 69cbe381a35aca98d398a9673af351b672d41b70 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Helper functions for tests."""
from flask import url_for
def sign_up(app, client, email=None, password=None):
"""Register a user."""
with app.test_request_context():
register_url = url_for('security.register')
res = client.post(register_url, data=dict(
email=email or app.config['TEST_USER_EMAIL'],
password=password or app.config['TEST_USER_PASSWORD'],
), environ_base={'REMOTE_ADDR': '127.0.0.1'})
assert res.status_code == 302 # redirect after signedup
def login(app, client, email=None, password=None):
"""Log the user in with the test client."""
with app.test_request_context():
login_url = url_for('security.login')
res = client.post(login_url, data=dict(
email=email or app.config['TEST_USER_EMAIL'],
password=password or app.config['TEST_USER_PASSWORD'],
))
assert res.status_code == 302 # redirect after login
| 32.055556 | 72 | 0.681976 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Helper functions for tests."""
from flask import url_for
def sign_up(app, client, email=None, password=None):
"""Register a user."""
with app.test_request_context():
register_url = url_for('security.register')
res = client.post(register_url, data=dict(
email=email or app.config['TEST_USER_EMAIL'],
password=password or app.config['TEST_USER_PASSWORD'],
), environ_base={'REMOTE_ADDR': '127.0.0.1'})
assert res.status_code == 302 # redirect after signedup
def login(app, client, email=None, password=None):
"""Log the user in with the test client."""
with app.test_request_context():
login_url = url_for('security.login')
res = client.post(login_url, data=dict(
email=email or app.config['TEST_USER_EMAIL'],
password=password or app.config['TEST_USER_PASSWORD'],
))
assert res.status_code == 302 # redirect after login
| 0 | 0 | 0 |
2ef8e91a362c0314b58c09f0e9bbfe4f66eed0a0 | 526 | py | Python | hydra/plugins/__init__.py | blackc03r/hydra | 3c30c201b8d0791752bdcf2c80414f83ffe1dc1a | [
"MIT"
] | null | null | null | hydra/plugins/__init__.py | blackc03r/hydra | 3c30c201b8d0791752bdcf2c80414f83ffe1dc1a | [
"MIT"
] | null | null | null | hydra/plugins/__init__.py | blackc03r/hydra | 3c30c201b8d0791752bdcf2c80414f83ffe1dc1a | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Plugins API package
"""
from hydra.plugins.completion_plugin import CompletionPlugin
from hydra.plugins.launcher import Launcher
from hydra.plugins.plugin import Plugin
from hydra.plugins.search_path_plugin import SearchPathPlugin
from hydra.plugins.step_sweeper import StepSweeper
from hydra.plugins.sweeper import Sweeper
__all__ = [
"CompletionPlugin",
"Launcher",
"Plugin",
"SearchPathPlugin",
"StepSweeper",
"Sweeper",
]
| 26.3 | 70 | 0.771863 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Plugins API package
"""
from hydra.plugins.completion_plugin import CompletionPlugin
from hydra.plugins.launcher import Launcher
from hydra.plugins.plugin import Plugin
from hydra.plugins.search_path_plugin import SearchPathPlugin
from hydra.plugins.step_sweeper import StepSweeper
from hydra.plugins.sweeper import Sweeper
__all__ = [
"CompletionPlugin",
"Launcher",
"Plugin",
"SearchPathPlugin",
"StepSweeper",
"Sweeper",
]
| 0 | 0 | 0 |
07577bb0e7fed97b97efc99c236826232b74cb72 | 1,316 | py | Python | config/dev.py | BruceWW/flask-basic | 3252f47e6b3fca170b57819f8fdbdeb0f868654e | [
"MIT"
] | 1 | 2019-09-28T17:03:39.000Z | 2019-09-28T17:03:39.000Z | config/dev.py | BruceWW/flask-basic | 3252f47e6b3fca170b57819f8fdbdeb0f868654e | [
"MIT"
] | null | null | null | config/dev.py | BruceWW/flask-basic | 3252f47e6b3fca170b57819f8fdbdeb0f868654e | [
"MIT"
] | 1 | 2019-11-19T02:39:40.000Z | 2019-11-19T02:39:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/11 20:20
# @Author : Lin Luo
# @Site :
# @File : dev
# @Software: PyCharm
from . import BaseConfig
from redis import Redis
class Config(BaseConfig):
"""开发环境配置参数"""
SQLALCHEMY_ECHO = True
DEBUG = True
EMV = 'dev'
# DB
DB_USERNAME = 'root'
DB_PASSWORD = 'root'
DB_HOST = '127.0.0.1'
DB_PORT = '3306'
DB_DATABASE = 'api_manager'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://%s:%s@%s:%s/%s' % (
DB_USERNAME, DB_PASSWORD, DB_HOST, DB_PORT, DB_DATABASE)
# redis 库
STORAGE_REDIS_HOST = '127.0.0.1'
STORAGE_REDIS_PORT = '6379'
STORAGE_REDIS_PASSWORD = ''
STORAGE_REDIS_NUM = '1'
STORAGE_REDIS_URL = 'redis://%s@%s:%s/%s' % (
STORAGE_REDIS_PASSWORD, STORAGE_REDIS_HOST, STORAGE_REDIS_PORT, STORAGE_REDIS_NUM)
# 缓存配置
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = '127.0.0.1'
CACHE_REDIS_PORT = '6379'
CACHE_REDIS_PASSWORD = ''
CACHE_REDIS_DB = '2'
# 会话管理库
SESSION_REDIS_HOST = '127.0.0.1'
SESSION_REDIS_PORT = '6379'
SESSION_REDIS_PASSWORD = ''
SESSION_REDIS_NUM = '3'
SESSION_REDIS = Redis(host=SESSION_REDIS_HOST, port=SESSION_REDIS_PORT, db=SESSION_REDIS_NUM,
password=SESSION_REDIS_PASSWORD)
| 26.32 | 97 | 0.636018 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/11 20:20
# @Author : Lin Luo
# @Site :
# @File : dev
# @Software: PyCharm
from . import BaseConfig
from redis import Redis
class Config(BaseConfig):
"""开发环境配置参数"""
SQLALCHEMY_ECHO = True
DEBUG = True
EMV = 'dev'
# DB
DB_USERNAME = 'root'
DB_PASSWORD = 'root'
DB_HOST = '127.0.0.1'
DB_PORT = '3306'
DB_DATABASE = 'api_manager'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://%s:%s@%s:%s/%s' % (
DB_USERNAME, DB_PASSWORD, DB_HOST, DB_PORT, DB_DATABASE)
# redis 库
STORAGE_REDIS_HOST = '127.0.0.1'
STORAGE_REDIS_PORT = '6379'
STORAGE_REDIS_PASSWORD = ''
STORAGE_REDIS_NUM = '1'
STORAGE_REDIS_URL = 'redis://%s@%s:%s/%s' % (
STORAGE_REDIS_PASSWORD, STORAGE_REDIS_HOST, STORAGE_REDIS_PORT, STORAGE_REDIS_NUM)
# 缓存配置
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = '127.0.0.1'
CACHE_REDIS_PORT = '6379'
CACHE_REDIS_PASSWORD = ''
CACHE_REDIS_DB = '2'
# 会话管理库
SESSION_REDIS_HOST = '127.0.0.1'
SESSION_REDIS_PORT = '6379'
SESSION_REDIS_PASSWORD = ''
SESSION_REDIS_NUM = '3'
SESSION_REDIS = Redis(host=SESSION_REDIS_HOST, port=SESSION_REDIS_PORT, db=SESSION_REDIS_NUM,
password=SESSION_REDIS_PASSWORD)
| 0 | 0 | 0 |
623e7e5221c700158814fc6dc6ff3ae2884806bf | 651 | py | Python | images/tests.py | chelseaayoo/Personal-Gallery | 1c1d8a8c494751b87428866c6a5d029d4b136ce1 | [
"MIT"
] | null | null | null | images/tests.py | chelseaayoo/Personal-Gallery | 1c1d8a8c494751b87428866c6a5d029d4b136ce1 | [
"MIT"
] | null | null | null | images/tests.py | chelseaayoo/Personal-Gallery | 1c1d8a8c494751b87428866c6a5d029d4b136ce1 | [
"MIT"
] | null | null | null | from django.test import TestCase
from .models import Image, Category, Location
# Create your tests here.
#Set up method | 31 | 109 | 0.683564 | from django.test import TestCase
from .models import Image, Category, Location
# Create your tests here.
class ImageTestClass(TestCase):
#Set up method
def setUp(self):
self.location = Location(name="Kisumu")
self.location.save()
self.category = Category(name="Vacation")
self.category.save()
self.image = Image(image = "",name = "newI", desc = "Image",loc=self.location,category=self.category)
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
self.assertTrue(isinstance(self.location,Location))
self.assertTrue(isinstance(self.category,Category)) | 441 | 10 | 76 |
5abde49cc8d6f0837cc649c0b25fec4d80eb1bd3 | 4,850 | py | Python | bin/prepare_diann_parameters.py | ypriverol/quantms | 2626e228c1276a8fa5b41a2eda00a68253e657e6 | [
"MIT"
] | 1 | 2022-03-14T02:02:01.000Z | 2022-03-14T02:02:01.000Z | bin/prepare_diann_parameters.py | ypriverol/quantms | 2626e228c1276a8fa5b41a2eda00a68253e657e6 | [
"MIT"
] | 14 | 2022-03-15T21:46:45.000Z | 2022-03-31T11:12:42.000Z | bin/prepare_diann_parameters.py | ypriverol/quantms | 2626e228c1276a8fa5b41a2eda00a68253e657e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re
import click
from sdrf_pipelines.openms.unimod import UnimodDatabase
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.command('generate')
@click.option("--enzyme", "-e", help="")
@click.option("--fix_mod", "-f", help="")
@click.option("--var_mod", "-v", help="")
@click.option("--precursor_tolerence", "-p", help="")
@click.option("--precursor_tolerence_unit", "-pu", help="")
@click.option("--fragment_tolerence", "-fr", help="")
@click.option("--fragment_tolerence_unit", "-fu", help="")
@click.pass_context
cli.add_command(generate_cfg)
if __name__ == "__main__":
cli()
| 36.19403 | 140 | 0.569485 | #!/usr/bin/env python
import re
import click
from sdrf_pipelines.openms.unimod import UnimodDatabase
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
@click.command('generate')
@click.option("--enzyme", "-e", help="")
@click.option("--fix_mod", "-f", help="")
@click.option("--var_mod", "-v", help="")
@click.option("--precursor_tolerence", "-p", help="")
@click.option("--precursor_tolerence_unit", "-pu", help="")
@click.option("--fragment_tolerence", "-fr", help="")
@click.option("--fragment_tolerence_unit", "-fu", help="")
@click.pass_context
def generate_cfg(ctx, enzyme, fix_mod, var_mod, precursor_tolerence, precursor_tolerence_unit, fragment_tolerence, fragment_tolerence_unit):
cut = enzyme_cut(enzyme)
unimod_database = UnimodDatabase()
fix_ptm, var_ptm = convert_mod(unimod_database, fix_mod, var_mod)
mass_acc, mass_acc_ms1 = mass_tolerence(precursor_tolerence, precursor_tolerence_unit, fragment_tolerence, fragment_tolerence_unit)
mass_acc = " --mass-acc " + str(mass_acc)
mass_acc_ms1 = " --mass-acc-ms1 " + str(mass_acc_ms1)
var_ptm_str = " --var-mod "
fix_ptm_str = " --fixed-mod "
diann_fix_ptm = ""
diann_var_ptm = ""
for mod in fix_ptm:
diann_fix_ptm += (fix_ptm_str + mod)
for mod in var_ptm:
diann_var_ptm += (var_ptm_str + mod)
with open("diann_config.cfg", "w") as f:
f.write("--dir ./mzMLs --cut " + cut + diann_fix_ptm + diann_var_ptm + mass_acc + mass_acc_ms1 +
" --matrices --report-lib-info")
with open("library_config.cfg", "w") as f:
f.write("--cut " + cut + diann_fix_ptm + diann_var_ptm + " --gen-spec-lib --smart-profiling")
def convert_mod(unimod_database, fix_mod, var_mod):
pattern = re.compile("\((.*?)\)")
var_ptm = []
fix_ptm = []
if fix_mod != "":
for mod in fix_mod.split(","):
tag = 0
for m in unimod_database.modifications:
if m.get_name() == mod.split(" ")[0]:
diann_mod = m.get_name() + "," + str(m._delta_mono_mass)
tag = 1
break
if tag == 0:
print("Warning: Currently only supported unimod modifications for DIA pipeline. Skipped: " + mod)
continue
site = re.findall(pattern, " ".join(mod.split(" ")[1:]))[0]
if site == "Protein N-term":
site = "*n"
elif site == "N-term":
site = "n"
if "TMT" in diann_mod or "Label" in diann_mod or "iTRAQ" in diann_mod or "mTRAQ" in diann_mod:
fix_ptm.append(diann_mod + "," + site + "," + "label")
else:
fix_ptm.append(diann_mod + "," + site)
if var_mod != "":
for mod in var_mod.split(","):
tag = 0
for m in unimod_database.modifications:
if m.get_name() == mod.split(" ")[0]:
diann_mod = m.get_name() + "," + str(m._delta_mono_mass)
tag = 1
break
if tag == 0:
print("Warning: Currently only supported unimod modifications for DIA pipeline. Skipped: " + mod)
continue
site = re.findall(pattern, " ".join(mod.split(" ")[1:]))[0]
if site == "Protein N-term":
site = "*n"
elif site == "N-term":
site = "n"
if "TMT" in diann_mod or "Label" in diann_mod or "iTRAQ" in diann_mod or "mTRAQ" in diann_mod:
var_ptm.append(diann_mod + "," + site + "," + "label")
else:
var_ptm.append(diann_mod + "," + site)
return fix_ptm, var_ptm
def enzyme_cut(enzyme):
if enzyme == "Trypsin":
cut = "K*,R*,!*P"
elif enzyme == "Trypsin/P":
cut = "K*,R*,*P"
elif enzyme == "Arg-C":
cut = "R*,!*P"
elif enzyme == "Asp-N":
cut = "*B,*D"
elif enzyme == "Chymotrypsin":
cut = "F*,W*,Y*,L*,!*P"
elif enzyme == "Lys-C":
cut="K*,!*P"
else:
cut = "--cut"
return cut
def mass_tolerence(prec, precursor_tolerence_unit, frag, fragment_tolerence_unit):
if precursor_tolerence_unit == "ppm":
ms1_tolerence = prec
else:
# Default 10 ppm
print("Warning: " + precursor_tolerence_unit + " unit not supported for DIA-NN. Default 10 ppm")
ms1_tolerence = 10
if fragment_tolerence_unit == "ppm":
ms2_tolerence = frag
else:
# Default 20 ppm
ms2_tolerence = 20
print("Warning: " + fragment_tolerence_unit + " unit not supported for DIA-NN. Default 20 ppm")
return ms1_tolerence, ms2_tolerence
cli.add_command(generate_cfg)
if __name__ == "__main__":
cli()
| 4,057 | 0 | 113 |
13c14d38a65bb64feeef34679a6670837801479a | 878 | py | Python | var/spack/repos/builtin/packages/py-python-memcached/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/py-python-memcached/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/py-python-memcached/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPythonMemcached(PythonPackage):
"""This software is a 100% Python interface to the memcached memory cache
daemon. It is the client side software which allows storing values in one
or more, possibly remote, memcached servers. Search google for memcached
for more information."""
homepage = "https://pypi.org/project/python-memcached/"
url = "https://pypi.io/packages/source/p/python-memcached/python-memcached-1.59.tar.gz"
version('1.59', sha256='a2e28637be13ee0bf1a8b6843e7490f9456fd3f2a4cb60471733c7b5d5557e4f')
depends_on('py-setuptools', type='build')
depends_on('py-six@1.4.0:', type=('build', 'run'))
| 39.909091 | 96 | 0.740319 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPythonMemcached(PythonPackage):
"""This software is a 100% Python interface to the memcached memory cache
daemon. It is the client side software which allows storing values in one
or more, possibly remote, memcached servers. Search google for memcached
for more information."""
homepage = "https://pypi.org/project/python-memcached/"
url = "https://pypi.io/packages/source/p/python-memcached/python-memcached-1.59.tar.gz"
version('1.59', sha256='a2e28637be13ee0bf1a8b6843e7490f9456fd3f2a4cb60471733c7b5d5557e4f')
depends_on('py-setuptools', type='build')
depends_on('py-six@1.4.0:', type=('build', 'run'))
| 0 | 0 | 0 |
4770c53a3edd8e714e805978e1cd2db93ad17272 | 2,431 | py | Python | Indicators/__init__.py | Desil-sketch/Indicators-for-Jesse | ffe33a217002ea3034696fe38acfa72611d52b4f | [
"MIT"
] | 1 | 2021-12-08T06:34:48.000Z | 2021-12-08T06:34:48.000Z | Indicators/__init__.py | Desil-sketch/Indicators-for-Jesse | ffe33a217002ea3034696fe38acfa72611d52b4f | [
"MIT"
] | null | null | null | Indicators/__init__.py | Desil-sketch/Indicators-for-Jesse | ffe33a217002ea3034696fe38acfa72611d52b4f | [
"MIT"
] | 1 | 2021-11-04T17:40:19.000Z | 2021-11-04T17:40:19.000Z | from .wto import wto
from .weis_wave_volume import weis
from .weighted_std import wstd
from .vwmacd import vwmacd
from .vv_ma_angles import vv_ma_angle
from .voss import vpf
from .volatility import volatility
from .vix_fix import vixfix
from .vidyatv import vidyatv
from .vhf import vhf
from .var_macd import varmacd
from .vama_volume_adjusted_moving_average import vama
from .tva import tva
from .trader_pressure_index import tpx
from .tmo_with_ttm_squeeze import tmo
from .supply_demand_volume import sdv
from .stochcrsi import stochcrsi
from .smi import smi
from .roofing_filters import ef
from .reverse_engineered_bands import reb
from .relative_strength_volatility_variable_bands import rsvv
from .rangefilter import rangefilter
from .quadratic_regression_slope import qrs
from .qqe import qqe
from .priceratio import priceratio
from .pivots import pivots
from .pine_rsi import rsi
from .percentile_trend_channel import ptc
from .optimized_trend_tracker import ott
from .nick_rypock_trailing_reverse import nrtr
from .murreys_math_oscillator import mm
from .multi_z_score import zscore
from .mesatrig import mesatrig
from .mcginley_dynamic_improved import mgd
from .macz import macz
from .lowpass import lowpass
from .lelec import lelec
from .jmarsx import jmarsx
from .jma import jma
from .index_adaptive_keltner_channels import akc
from .hvpsma import hvpsma
from .highest_lowest_stoch import hlstoch
from .halftrend import halftrend
from .godmode_osc import godmode
from .fisher_multi_pack_dw import fishmulti
from .ehlers_predictive_moving_average import epma
from .ehlers_modified_optimum_elliptic_filter import moef
from .ehlers_kalman_crossover import ekc
from .dvdiqqe import dvdiqqe
from .doublemom import doublemom
from .double_weighted_moving_average import dwma
from .donchian_hl_width_cycles import dhl
from .dickinson_moving_average import dima
from .decaying_rate_of_change_non_linear_filter import drfilt
from .correlation_trend_john_ehlers import cti
from .consolidation import consolidation
from .compound_ratio_ma import compma
from .cmotv import cmotv
from .chandelierexit import chandelierexit
from .cci_cycle_schaff_trend import cst
from .bernoulli_process_binary_entropy import bpbe
from .better_bollinger_bands import bbb
from .average_sentiment_oscillator import avo
from .alma import alma
from .ehlernet import ehlernet
from .ehlers_distance_coefficient_filter import edc
| 36.283582 | 62 | 0.852735 | from .wto import wto
from .weis_wave_volume import weis
from .weighted_std import wstd
from .vwmacd import vwmacd
from .vv_ma_angles import vv_ma_angle
from .voss import vpf
from .volatility import volatility
from .vix_fix import vixfix
from .vidyatv import vidyatv
from .vhf import vhf
from .var_macd import varmacd
from .vama_volume_adjusted_moving_average import vama
from .tva import tva
from .trader_pressure_index import tpx
from .tmo_with_ttm_squeeze import tmo
from .supply_demand_volume import sdv
from .stochcrsi import stochcrsi
from .smi import smi
from .roofing_filters import ef
from .reverse_engineered_bands import reb
from .relative_strength_volatility_variable_bands import rsvv
from .rangefilter import rangefilter
from .quadratic_regression_slope import qrs
from .qqe import qqe
from .priceratio import priceratio
from .pivots import pivots
from .pine_rsi import rsi
from .percentile_trend_channel import ptc
from .optimized_trend_tracker import ott
from .nick_rypock_trailing_reverse import nrtr
from .murreys_math_oscillator import mm
from .multi_z_score import zscore
from .mesatrig import mesatrig
from .mcginley_dynamic_improved import mgd
from .macz import macz
from .lowpass import lowpass
from .lelec import lelec
from .jmarsx import jmarsx
from .jma import jma
from .index_adaptive_keltner_channels import akc
from .hvpsma import hvpsma
from .highest_lowest_stoch import hlstoch
from .halftrend import halftrend
from .godmode_osc import godmode
from .fisher_multi_pack_dw import fishmulti
from .ehlers_predictive_moving_average import epma
from .ehlers_modified_optimum_elliptic_filter import moef
from .ehlers_kalman_crossover import ekc
from .dvdiqqe import dvdiqqe
from .doublemom import doublemom
from .double_weighted_moving_average import dwma
from .donchian_hl_width_cycles import dhl
from .dickinson_moving_average import dima
from .decaying_rate_of_change_non_linear_filter import drfilt
from .correlation_trend_john_ehlers import cti
from .consolidation import consolidation
from .compound_ratio_ma import compma
from .cmotv import cmotv
from .chandelierexit import chandelierexit
from .cci_cycle_schaff_trend import cst
from .bernoulli_process_binary_entropy import bpbe
from .better_bollinger_bands import bbb
from .average_sentiment_oscillator import avo
from .alma import alma
from .ehlernet import ehlernet
from .ehlers_distance_coefficient_filter import edc
| 0 | 0 | 0 |
c7093f3820d04b8103121cee9f4cdaf96e748579 | 23,970 | py | Python | python/uw/utilities/fitter.py | coclar/pointlike | 7088724b5a40cf787371aff69e64c9bec701f578 | [
"BSD-3-Clause"
] | null | null | null | python/uw/utilities/fitter.py | coclar/pointlike | 7088724b5a40cf787371aff69e64c9bec701f578 | [
"BSD-3-Clause"
] | null | null | null | python/uw/utilities/fitter.py | coclar/pointlike | 7088724b5a40cf787371aff69e64c9bec701f578 | [
"BSD-3-Clause"
] | null | null | null | """
Basic fitter utilities
Authors: Matthew Kerr, Toby Burnett
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/utilities/fitter.py,v 1.10 2013/07/28 15:27:44 burnett Exp $
"""
import types
import numpy as np
from scipy import optimize #for fmin,fmin_powell,fmin_bfgs
from numpy import linalg #for inv
import numdifftools
class Fitted(object):
""" base class for a function object to define fit properties """
@property
@property
def minimize(self, **kwargs):
""" minimize the function using optimize.fmin_l_bfgs_b
"""
use_gradient = kwargs.pop('use_gradient',True)#, self.gradient(self.get_parameters()) is None)
ret =optimize.fmin_l_bfgs_b(self, self.get_parameters(),
bounds=self.bounds,
fprime= None, # expect gradient calculated by function
approx_grad = not use_gradient,
args = (use_gradient,), # pass to the function
**kwargs)
if ret[2]['warnflag']==0:
self.set_parameters(ret[0])
else:
print ('Fit failure:\n%s' % ret[2])
return ret
def hessian(self, pars=None, **kwargs):
"""
Return the Hessian matrix
For sigmas and correlation coefficients, invert to covariance
cov = self.hessian().I
sigs = np.sqrt(cov.diagonal())
corr = cov / np.outer(sigs,sigs)
"""
if pars is None: pars = self.get_parameters()
return np.matrix(numdifftools.Hessian(self, **kwargs)(pars))
class Minimizer(object):
""" this is mostly extracted as is from uw.like.specfitter and turned into a utility
"""
def __init__(self, fn, parameters=None, args=(), quiet=True):
""" fn : function object
note that it will be minimized, so should be negative of log likelihood
"""
self.quiet = quiet
self.par = parameters
self.args = args
self.fn = fn
npar = len(self.get_parameters())
self.cov_matrix=np.zeros([npar,npar])
def gradient(self,parameters,*args):
""" access gradient if defined by the function
"""
assert hasattr(self.fn, 'gradient'), 'Minimize: use_gradient set, but function did not define a gradient'
return self.fn.gradient(parameters)
def get_free_errors(self):
"""Return the diagonal elements of the covariance matrix -- useful for step sizes in minimization, if known.
"""
assert False, 'get_free_errors not implemented yet'
def __call__(self, method='simplex', tolerance = 0.01, save_values = True,
estimate_errors=True, error_for_steps=False,
use_gradient = True, gtol = 1e-1, **kwargs):
"""Maximize likelihood and estimate errors.
method -- ['simplex'] fitter; 'powell' or 'simplex' or 'minuit'
tolerance -- (approximate) absolute tolerance
"""
if method.lower() not in ['simplex','powell','minuit', 'l-bfgs-b']:
raise Exception('Unknown fitting method for F.fit(): "%s"' % method)
use_gradient = use_gradient and hasattr(self.fn, 'gradient')
use_bounds = kwargs.pop('use_bounds', self.fn.bounds is not None)
if method == 'minuit':
return self.minuit()
# scipy
ll_0 = self.fn(self.get_parameters(), *self.args)
if ll_0==0: ll_0=1.0
if use_gradient and not use_bounds:
f0 = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,full_output=1,maxiter=500,gtol=gtol,disp=0)
for i in range(10):
f = self._save_bfgs = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,
full_output=1,maxiter=500,gtol=gtol,disp=0)
if abs(f0[1] - f[1]) < tolerance: break # note absolute tolerance
if not self.quiet:
print ('Did not converge on first gradient iteration. Trying again.')
print (f0[1],f[1],abs(f0[1]-f[1]))
f0 = f
elif use_gradient:
if not self.quiet: print ('using optimize.fmin_l_bfgs_b with parameter bounds %s\n, kw= %s'% (self.fn.bounds, kwargs))
ret = optimize.fmin_l_bfgs_b(self.fn, self.get_parameters(),
bounds=self.fn.bounds,
fprime=self.gradient ,
**kwargs)
if ret[2]['warnflag']>0:
print ('Fit failure:\n%s' % ret[2])
if not self.quiet:
print (ret[2])
f = ret
else:
minimizer = optimize.fmin_powell if method == 'powell' else optimize.fmin
f = minimizer(self.fn, self.get_parameters(),full_output=1,
maxiter=10000, maxfun=20000, ftol=0.01/abs(ll_0), disp=0 if self.quiet else 1)
if not self.quiet: print ('Function value at minimum: %.8g'%f[1])
self.set_parameters(f[0])
self.fitvalue=f[1]
if estimate_errors:
self.__set_error__(use_gradient)
if estimate_errors:
diag = self.cov_matrix.diagonal().copy()
bad = diag<0
if np.any(bad):
if not self.quiet: print ('Minimizer warning: bad errors for values %s'\
%np.asarray(self.fn.parameter_names)[bad]) # %np.arange(len(bad))[bad]
diag[bad]=np.nan
return f[1], f[0], np.sqrt(diag)
return f[1], f[0]
def __set_error_minuit(self,m,method='HESSE'):
"""Compute errors for minuit fit."""
#Not sure yet if there will be problems with including the backgrounds.
self.cov_matrix = m.errors(method=method)
print ('Minuit error not done?')
#self.bgm.set_covariance_matrix(self.cov_matrix,current_position = 0)
#self.psm.set_covariance_matrix(self.cov_matrix,current_position = len(self.bgm.parameters()))
def sigmas(self):
""" quietly return nan for negative diagonal terms """
diag = self.cov_matrix.diagonal()
bad = diag<0
if np.any(bad): diag[bad]=np.nan
return np.sqrt(diag)
def correlations(self, percent=False):
"""Return the linear correlation coefficients for the estimated covariance matrix.
any rows or columns with a zero error (failed fit) will be nan
"""
s = self.sigmas()
s[s==0] = np.nan
t =self.cov_matrix / np.outer(s,s)
return t*100. if percent else t
@staticmethod
def hessian(mf, pars, quiet=True, *args):
"""Calculate the Hessian matrix using finite differences (adapted from specfitter.SpectralModelFitter.hessian)
mf: minimizing function
pars: parameters at the minimum,
args: additional arguments for mf.
returns matrix, error code array
"""
p = pars.copy()
npar = len(pars)
deltas = np.abs(0.01 * p) #initial guess
hessian = np.zeros([npar,npar])
bad_mask = np.asarray([False] * npar)
return_code = np.zeros(npar)
l0 = mf(p, *args)
#find good values with which to estimate the covariance matrix -- look at diagonal deviations
#iterate until change in function consistent with ~1 sigma conditional error
for i in range(npar):
if not quiet: print ('Working on parameter %d'%(i))
h,l = p.copy(),p.copy()
for j in range(10):
h[:] = p[:]; l[:] = p[:];
h[i] += deltas[i]
l[i] -= deltas[i]
delta_f_1 = mf(h, *args) - l0
delta_f_2 = mf(l, *args) - l0
delta_f = max(delta_f_1 + delta_f_2,0) #twice difference, really
deltas[i] /= max(delta_f**0.5,0.33) # can change by half decade
if delta_f < 5 and delta_f > 0.5: break
if delta_f < 5e-3:
# no constraint on parameter -- ignore it in further fittingor :
bad_mask[i] = True
return_code[i] = 1
if (delta_f_1/delta_f_2 > 10 or delta_f_1/delta_f_2 < 1./10):
# significant asymmetry in likelihood
bad_mask[i] = True
return_code[i] = 2
if (delta_f_2 < 5e-3 and delta_f_1 > 0.5):
# not actually at maximum of likelihood -- upper limit condition
bad_mask[i] = True
return_code[i] = 3
if not quiet: print ('fail, need upper limit')
import pdb; pdb.set_trace()
for i in range(npar):
if bad_mask[i]:
hessian[i,:] = 0 #no correlation?
hessian[:,i] = 0
continue
for j in range(i,npar): #Second partials by finite difference
xhyh,xhyl,xlyh,xlyl=p.copy(),p.copy(),p.copy(),p.copy()
xdelt = deltas[i]
ydelt = deltas[j]
xhyh[i] += xdelt; xhyh[j] += ydelt
xhyl[i] += xdelt; xhyl[j] -= ydelt
xlyh[i] -= xdelt; xlyh[j] += ydelt
xlyl[i] -= xdelt; xlyl[j] -= ydelt
hessian[i][j]=hessian[j][i]=(mf(xhyh, *args)-mf(xhyl, *args)
-mf(xlyh, *args)+mf(xlyl, *args))/\
(4*xdelt*ydelt)
mf(p, *args) #call likelihood with original values; this resets model and any other values that might be used later
return hessian,return_code
@staticmethod
def mycov(grad,par,full_output=False,init_step=0.04,min_step=1e-6,max_step=1,max_iters=5,target=0.5,min_func=1e-4,max_func=4):
"""Perform finite differences on the _analytic_ gradient provided by user to calculate hessian/covariance matrix.
Positional args:
grad : a function to return a gradient
par : vector of parameters (should be function minimum for covariance matrix calculation)
Keyword args:
full_output [False] : if True, return information about convergence, else just the covariance matrix
init_step [1e-3] : initial step size (0.04 ~ 10% in log10 space); can be a scalar or vector
min_step [1e-6] : the minimum step size to take in parameter space
max_step [1] : the maximum step size to take in parameter sapce
max_iters [5] : maximum number of iterations to attempt to converge on a good step size
target [0.5] : the target change in the function value for step size
min_func [1e-4] : the minimum allowable change in (abs) function value to accept for convergence
max_func [4] : the maximum allowable change in (abs) function value to accept for convergence
"""
nparams = len(par)
step_size = np.ones(nparams)*init_step
step_size = np.maximum(step_size,min_step*1.1)
step_size = np.minimum(step_size,max_step*0.9)
hess = np.zeros([nparams,nparams])
min_flags = np.asarray([False]*nparams)
max_flags = np.asarray([False]*nparams)
iters = np.zeros(nparams)
for i in range(nparams):
converged = False
for j in range(max_iters):
iters[i] += 1
di = step_size[i]
par[i] += di
g_up = grad(par)
par[i] -= 2*di
g_dn = grad(par)
par[i] += di
delta_f = (g_up - g_dn)[i]
converged,new_step = revised_step(delta_f,di,i)
#print ('Parameter %d -- Iteration %d -- Step size: %.2e -- delta: %.2e'%(i,j,di,delta_f))
if converged: break
else: step_size[i] = new_step
hess[i,:] = (g_up - g_dn) / (2*di) # central difference
if not converged:
print ('Warning: step size for parameter %d (%.2g) did not result in convergence.'%(i,di))
try:
cov = np.linalg.inv(hess)
except:
print ('Error inverting hessian.')
#cov = np.zeros([nparams,nparams])
raise Exception('Error inverting hessian')
if full_output:
return cov,hess,step_size,iters,min_flags,max_flags
else:
return cov
class Projector(Fitted):
""" adapt a function object to create a projection, a function of a subset of its parameters
Require that it has a methods __call__, set_parmeters, get_parameters, and perhaps gradient
"""
def __init__(self, fn, select=[0], par=None, ):
"""
parameters:
fn: function of par: should be minimizable
par: array type or None
default parameters to use: if None, get from fn.get_parameters)
select: list of free parameter
TODO: use mask instead or optionally
"""
self.fn=fn
self.select = select
self.mask = np.zeros(len(fn.get_parameters()),bool)
self.mask[select]=True
self.fpar= fn.get_parameters().copy()
self.par = np.asarray(par[:]) if par is not None else self.fpar[self.mask]
assert len(self.par)==sum(self.mask), 'wrong number of specified parameters'
def __call__(self, x):
""" len of x must be number of selected parameters"""
self.fpar[self.mask]=x
ret= self.fn(self.fpar)
#print ('value(%.2f)=%.2f' % (x,ret))
return ret
def gradient(self, x):
""" the function object may not support this
"""
self.fpar[self.mask]=x
t = self.fn.gradient(self.fpar)[self.mask]
#print ('gradient(%.2f)=%.2f' % (x, t))
return t
@property
@property
def fmin(self, x=None, **kwargs):
""" run simple fmin """
try:
par = optimize.fmin(self, [x] if x is not None else self.par, **kwargs)
self.set_parameters(par)
except:
raise
def minimize(self, par0=None, **fit_kw):
""" create Minimizer of this, run it, update original parameters
parameters:
par0 : array type of float or None
pass to Minimizer
return value, parameter values, errors
"""
self.fitter = Minimizer(self, par0)
c2, par, dpar = self.fitter(**fit_kw)
self.par = par
self.set_parameters(par)
return c2, par, dpar
class Profile(Fitted):
""" Manage a function of one parameter, projected from a multi-parameter function,
with option evaluate by either optimizing on the remaining parameters or not
"""
def __init__(self, fn, index, par=None, profile=True):
"""
parameters
---------
fn : function of a set of parameters
Must implement Fitted interface
index : integer or string
the index to examine, or its parameter name
par: arary type or None
initial set of parameters for fn if not None
profile: bool
set False to not apply profile
"""
# local reference to the basic function, copy of original parametes
self.fn = fn
if type(index)==types.StringType:
try:
self.index = list(fn.parameter_names).index(index)
except ValueError:
raise FitterException('parameter name "%s" not one of %s' % (index, fn.parameter_names))
except Exception as msg:
raise
else: self.index = index
self.fpar = par if par is not None else fn.get_parameters().copy()
npar = len(self.fpar)
self.mask = np.ones(npar,bool)
self.mask[self.index]=False
# set up function of the selected parameter (self) and a function of the rest
select = range(npar)
assert self.index in select, 'Expect index to select to be one of parameters'
self.par = self.fpar[self.index:self.index+1]
select.remove(self.index)
self.pfun = Projector(fn, select)
self.profile = profile
# set up a fitter for the remaining parameters
self.fitter = Minimizer(self.pfun)
@property
def test(x0=1.1, pars=[1.0, 1.5], **kwargs):
""" test with a parabola corresponding to a Gaussian with mean, sigma in pars
>>> pars=[1.0, 1.5]; x0=1.1
>>> testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2
>>> func = TestFunc(testf, [x0])
>>> m = Minimizer(func) # create minimizer object
>>> m() # run default fit
(1.0000000000211928, array([ 0.99999023]), array([ 1.5]))
"""
testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2
print ('input parameters:', pars)
func = TestFunc(testf, [x0])
m = Minimizer(func)
#m = Minimizer(testf, [x0], )
f = m(use_gradient=False)
print ('solution at %.2f, +/- %.2f ' % (m.get_parameters(), np.sqrt(m.cov_matrix.diagonal())))
return func, m, f
if __name__ == "__main__":
print (__doc__)
import doctest
doctest.testmod()
| 40.218121 | 142 | 0.561786 | """
Basic fitter utilities
Authors: Matthew Kerr, Toby Burnett
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/utilities/fitter.py,v 1.10 2013/07/28 15:27:44 burnett Exp $
"""
import types
import numpy as np
from scipy import optimize #for fmin,fmin_powell,fmin_bfgs
from numpy import linalg #for inv
import numdifftools
class FitterException(Exception): pass
class Fitted(object):
""" base class for a function object to define fit properties """
@property
def bounds(self):
return None
@property
def parameter_names(self):
return None
def get_parameters(self):
raise FitterException('get_parameters is not implemented')
def set_parameters(self, par):
raise FitterException('set_parameters is not implemented')
def minimize(self, **kwargs):
""" minimize the function using optimize.fmin_l_bfgs_b
"""
use_gradient = kwargs.pop('use_gradient',True)#, self.gradient(self.get_parameters()) is None)
ret =optimize.fmin_l_bfgs_b(self, self.get_parameters(),
bounds=self.bounds,
fprime= None, # expect gradient calculated by function
approx_grad = not use_gradient,
args = (use_gradient,), # pass to the function
**kwargs)
if ret[2]['warnflag']==0:
self.set_parameters(ret[0])
else:
print ('Fit failure:\n%s' % ret[2])
return ret
def hessian(self, pars=None, **kwargs):
"""
Return the Hessian matrix
For sigmas and correlation coefficients, invert to covariance
cov = self.hessian().I
sigs = np.sqrt(cov.diagonal())
corr = cov / np.outer(sigs,sigs)
"""
if pars is None: pars = self.get_parameters()
return np.matrix(numdifftools.Hessian(self, **kwargs)(pars))
def test(fn = None, p0=None, pars=None):
if fn is None:
pars=[1.0, 2.]
fn = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2
return TestFunc(fn, [1.1])
class Minimizer(object):
""" this is mostly extracted as is from uw.like.specfitter and turned into a utility
"""
def __init__(self, fn, parameters=None, args=(), quiet=True):
""" fn : function object
note that it will be minimized, so should be negative of log likelihood
"""
self.quiet = quiet
self.par = parameters
self.args = args
self.fn = fn
npar = len(self.get_parameters())
self.cov_matrix=np.zeros([npar,npar])
def gradient(self,parameters,*args):
""" access gradient if defined by the function
"""
assert hasattr(self.fn, 'gradient'), 'Minimize: use_gradient set, but function did not define a gradient'
return self.fn.gradient(parameters)
def get_parameters(self):
return self.fn.get_parameters() if self.par is None else self.par
def set_parameters(self, par):
if self.par is None:
self.fn.set_parameters(par)
else:
self.par = par
def get_free_errors(self):
"""Return the diagonal elements of the covariance matrix -- useful for step sizes in minimization, if known.
"""
assert False, 'get_free_errors not implemented yet'
def optimize(self, optimizer, **kwargs):
return optimizer( self.fn, self.get_parameters(), **kwargs)
def __call__(self, method='simplex', tolerance = 0.01, save_values = True,
estimate_errors=True, error_for_steps=False,
use_gradient = True, gtol = 1e-1, **kwargs):
"""Maximize likelihood and estimate errors.
method -- ['simplex'] fitter; 'powell' or 'simplex' or 'minuit'
tolerance -- (approximate) absolute tolerance
"""
if method.lower() not in ['simplex','powell','minuit', 'l-bfgs-b']:
raise Exception('Unknown fitting method for F.fit(): "%s"' % method)
use_gradient = use_gradient and hasattr(self.fn, 'gradient')
use_bounds = kwargs.pop('use_bounds', self.fn.bounds is not None)
if method == 'minuit':
return self.minuit()
# scipy
ll_0 = self.fn(self.get_parameters(), *self.args)
if ll_0==0: ll_0=1.0
if use_gradient and not use_bounds:
f0 = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,full_output=1,maxiter=500,gtol=gtol,disp=0)
for i in range(10):
f = self._save_bfgs = optimize.fmin_bfgs(self.fn,self.get_parameters(),self.gradient,
full_output=1,maxiter=500,gtol=gtol,disp=0)
if abs(f0[1] - f[1]) < tolerance: break # note absolute tolerance
if not self.quiet:
print ('Did not converge on first gradient iteration. Trying again.')
print (f0[1],f[1],abs(f0[1]-f[1]))
f0 = f
elif use_gradient:
if not self.quiet: print ('using optimize.fmin_l_bfgs_b with parameter bounds %s\n, kw= %s'% (self.fn.bounds, kwargs))
ret = optimize.fmin_l_bfgs_b(self.fn, self.get_parameters(),
bounds=self.fn.bounds,
fprime=self.gradient ,
**kwargs)
if ret[2]['warnflag']>0:
print ('Fit failure:\n%s' % ret[2])
if not self.quiet:
print (ret[2])
f = ret
else:
minimizer = optimize.fmin_powell if method == 'powell' else optimize.fmin
f = minimizer(self.fn, self.get_parameters(),full_output=1,
maxiter=10000, maxfun=20000, ftol=0.01/abs(ll_0), disp=0 if self.quiet else 1)
if not self.quiet: print ('Function value at minimum: %.8g'%f[1])
self.set_parameters(f[0])
self.fitvalue=f[1]
if estimate_errors:
self.__set_error__(use_gradient)
if estimate_errors:
diag = self.cov_matrix.diagonal().copy()
bad = diag<0
if np.any(bad):
if not self.quiet: print ('Minimizer warning: bad errors for values %s'\
%np.asarray(self.fn.parameter_names)[bad]) # %np.arange(len(bad))[bad]
diag[bad]=np.nan
return f[1], f[0], np.sqrt(diag)
return f[1], f[0]
def minuit(self):
from uw.utilities.minuit import Minuit
temp_params = self.get_parameters()
npars = temp_params.shape[0]
param_names = ['p%i'%i for i in range(npars)]
if use_gradient :
gradient = self.gradient
force_gradient = 1
else:
gradient = None
force_gradient = 0
if error_for_steps:
steps = self.get_free_errors()
steps[steps<1e-6] = 0.04 # for models without error estimates, put in the defaults
steps[steps > 1] = 1 # probably don't want to step more than 100%...
m = Minuit(self.fn,temp_params,up=.5,maxcalls=20000,tolerance=tolerance,printMode=-self.quiet,param_names=param_names,steps=steps)
else:
m = Minuit(self.fn,temp_params,up=.5,maxcalls=20000,tolerance=tolerance,printMode=-self.quiet,param_names=param_names)
params,fval = m.minimize()
if save_values:
if estimate_errors == True:
self.__set_error_minuit(m,'HESSE')
self.fn(params) # reset values to the ones found by minimization step
self.fitvalue= fval
return fval
def __set_error_minuit(self,m,method='HESSE'):
"""Compute errors for minuit fit."""
#Not sure yet if there will be problems with including the backgrounds.
self.cov_matrix = m.errors(method=method)
print ('Minuit error not done?')
#self.bgm.set_covariance_matrix(self.cov_matrix,current_position = 0)
#self.psm.set_covariance_matrix(self.cov_matrix,current_position = len(self.bgm.parameters()))
def sigmas(self):
""" quietly return nan for negative diagonal terms """
diag = self.cov_matrix.diagonal()
bad = diag<0
if np.any(bad): diag[bad]=np.nan
return np.sqrt(diag)
def correlations(self, percent=False):
"""Return the linear correlation coefficients for the estimated covariance matrix.
any rows or columns with a zero error (failed fit) will be nan
"""
s = self.sigmas()
s[s==0] = np.nan
t =self.cov_matrix / np.outer(s,s)
return t*100. if percent else t
def __set_error__(self,use_gradient=False):
npar = len(self.get_parameters())
if use_gradient:
save_pars = self.get_parameters().copy()
cov_matrix,hessian = Minimizer.mycov(self.gradient,self.get_parameters(),full_output=True)[:2]
self.set_parameters(save_pars)
mask = hessian.diagonal()>0
else:
hessian, bad_mask = Minimizer.hessian(self.fn, self.get_parameters(), quiet=self.quiet)
cov_matrix = None
mask = bad_mask==0
if np.all(-mask):
self.cov_matrix = np.zeros([npar,npar])
success = False
return
full = np.all(mask)
if not full:
h = hessian[mask].T[mask]
hessian = h
success = False
npar = len(self.get_parameters())
try:
if linalg.det(hessian)<=0:
full=False
if not self.quiet: print ('Attempting to invert full hessian...')
self.cov_matrix =t = cov_matrix if cov_matrix is not None else linalg.inv(hessian)
if np.any(np.isnan(self.cov_matrix)):
if not self.quiet: print ('Found NaN in covariance matrix!')
raise Exception('Found NaN in covariance matrix!')
# now expand if necesary
if not full:
# must be better way to expand a matrix
self.cov_matrix =np.zeros([npar,npar])
k = np.arange(npar)[mask]
for i in range(len(k)):
ki = k[i]
self.cov_matrix[k[i],k[i]] = t[i,i]
for j in range(i+1, len(k)):
self.cov_matrix[ki,k[j]] =self.cov_matrix[k[j],ki] = t[i,j]
success = True
except linalg.LinAlgError as e:
if not qself.quiet:
print ('Error generating cov matrix, %s' % e)
self.cov_matrix = np.zeros([npar,npar])
success = False
return success
@staticmethod
def hessian(mf, pars, quiet=True, *args):
"""Calculate the Hessian matrix using finite differences (adapted from specfitter.SpectralModelFitter.hessian)
mf: minimizing function
pars: parameters at the minimum,
args: additional arguments for mf.
returns matrix, error code array
"""
p = pars.copy()
npar = len(pars)
deltas = np.abs(0.01 * p) #initial guess
hessian = np.zeros([npar,npar])
bad_mask = np.asarray([False] * npar)
return_code = np.zeros(npar)
l0 = mf(p, *args)
#find good values with which to estimate the covariance matrix -- look at diagonal deviations
#iterate until change in function consistent with ~1 sigma conditional error
for i in range(npar):
if not quiet: print ('Working on parameter %d'%(i))
h,l = p.copy(),p.copy()
for j in range(10):
h[:] = p[:]; l[:] = p[:];
h[i] += deltas[i]
l[i] -= deltas[i]
delta_f_1 = mf(h, *args) - l0
delta_f_2 = mf(l, *args) - l0
delta_f = max(delta_f_1 + delta_f_2,0) #twice difference, really
deltas[i] /= max(delta_f**0.5,0.33) # can change by half decade
if delta_f < 5 and delta_f > 0.5: break
if delta_f < 5e-3:
# no constraint on parameter -- ignore it in further fittingor :
bad_mask[i] = True
return_code[i] = 1
if (delta_f_1/delta_f_2 > 10 or delta_f_1/delta_f_2 < 1./10):
# significant asymmetry in likelihood
bad_mask[i] = True
return_code[i] = 2
if (delta_f_2 < 5e-3 and delta_f_1 > 0.5):
# not actually at maximum of likelihood -- upper limit condition
bad_mask[i] = True
return_code[i] = 3
if not quiet: print ('fail, need upper limit')
import pdb; pdb.set_trace()
for i in range(npar):
if bad_mask[i]:
hessian[i,:] = 0 #no correlation?
hessian[:,i] = 0
continue
for j in range(i,npar): #Second partials by finite difference
xhyh,xhyl,xlyh,xlyl=p.copy(),p.copy(),p.copy(),p.copy()
xdelt = deltas[i]
ydelt = deltas[j]
xhyh[i] += xdelt; xhyh[j] += ydelt
xhyl[i] += xdelt; xhyl[j] -= ydelt
xlyh[i] -= xdelt; xlyh[j] += ydelt
xlyl[i] -= xdelt; xlyl[j] -= ydelt
hessian[i][j]=hessian[j][i]=(mf(xhyh, *args)-mf(xhyl, *args)
-mf(xlyh, *args)+mf(xlyl, *args))/\
(4*xdelt*ydelt)
mf(p, *args) #call likelihood with original values; this resets model and any other values that might be used later
return hessian,return_code
@staticmethod
def mycov(grad,par,full_output=False,init_step=0.04,min_step=1e-6,max_step=1,max_iters=5,target=0.5,min_func=1e-4,max_func=4):
"""Perform finite differences on the _analytic_ gradient provided by user to calculate hessian/covariance matrix.
Positional args:
grad : a function to return a gradient
par : vector of parameters (should be function minimum for covariance matrix calculation)
Keyword args:
full_output [False] : if True, return information about convergence, else just the covariance matrix
init_step [1e-3] : initial step size (0.04 ~ 10% in log10 space); can be a scalar or vector
min_step [1e-6] : the minimum step size to take in parameter space
max_step [1] : the maximum step size to take in parameter sapce
max_iters [5] : maximum number of iterations to attempt to converge on a good step size
target [0.5] : the target change in the function value for step size
min_func [1e-4] : the minimum allowable change in (abs) function value to accept for convergence
max_func [4] : the maximum allowable change in (abs) function value to accept for convergence
"""
nparams = len(par)
step_size = np.ones(nparams)*init_step
step_size = np.maximum(step_size,min_step*1.1)
step_size = np.minimum(step_size,max_step*0.9)
hess = np.zeros([nparams,nparams])
min_flags = np.asarray([False]*nparams)
max_flags = np.asarray([False]*nparams)
def revised_step(delta_f,current_step,index):
if (current_step == max_step):
max_flags[i] = True
return True,0
elif (current_step == min_step):
min_flags[i] = True
return True,0
else:
adf = abs(delta_f)
if adf < 1e-8:
# need to address a step size that results in a likelihood change that's too
# small compared to precision
pass
if (adf < min_func) or (adf > max_func):
new_step = current_step/(adf/target)
new_step = min(new_step,max_step)
new_step = max(new_step,min_step)
return False,new_step
else:
return True,0
iters = np.zeros(nparams)
for i in range(nparams):
converged = False
for j in range(max_iters):
iters[i] += 1
di = step_size[i]
par[i] += di
g_up = grad(par)
par[i] -= 2*di
g_dn = grad(par)
par[i] += di
delta_f = (g_up - g_dn)[i]
converged,new_step = revised_step(delta_f,di,i)
#print ('Parameter %d -- Iteration %d -- Step size: %.2e -- delta: %.2e'%(i,j,di,delta_f))
if converged: break
else: step_size[i] = new_step
hess[i,:] = (g_up - g_dn) / (2*di) # central difference
if not converged:
print ('Warning: step size for parameter %d (%.2g) did not result in convergence.'%(i,di))
try:
cov = np.linalg.inv(hess)
except:
print ('Error inverting hessian.')
#cov = np.zeros([nparams,nparams])
raise Exception('Error inverting hessian')
if full_output:
return cov,hess,step_size,iters,min_flags,max_flags
else:
return cov
class Projector(Fitted):
""" adapt a function object to create a projection, a function of a subset of its parameters
Require that it has a methods __call__, set_parmeters, get_parameters, and perhaps gradient
"""
def __init__(self, fn, select=[0], par=None, ):
"""
parameters:
fn: function of par: should be minimizable
par: array type or None
default parameters to use: if None, get from fn.get_parameters)
select: list of free parameter
TODO: use mask instead or optionally
"""
self.fn=fn
self.select = select
self.mask = np.zeros(len(fn.get_parameters()),bool)
self.mask[select]=True
self.fpar= fn.get_parameters().copy()
self.par = np.asarray(par[:]) if par is not None else self.fpar[self.mask]
assert len(self.par)==sum(self.mask), 'wrong number of specified parameters'
def get_parameters(self):
return self.par
def set_parameters(self,par=None):
p = par if par is not None else self.par
self.par = p
self.fpar[self.mask] = p
self.fn.set_parameters(self.fpar) # note this sets the original set
def __call__(self, x):
""" len of x must be number of selected parameters"""
self.fpar[self.mask]=x
ret= self.fn(self.fpar)
#print ('value(%.2f)=%.2f' % (x,ret))
return ret
def gradient(self, x):
""" the function object may not support this
"""
self.fpar[self.mask]=x
t = self.fn.gradient(self.fpar)[self.mask]
#print ('gradient(%.2f)=%.2f' % (x, t))
return t
@property
def parameter_names(self):
return None if not hasattr(self.fn,'parameter_names') else self.fn.parameter_names[self.mask]
@property
def bounds(self):
return None if self.fn.bounds is None else np.array(self.fn.bounds)[self.mask]
def fmin(self, x=None, **kwargs):
""" run simple fmin """
try:
par = optimize.fmin(self, [x] if x is not None else self.par, **kwargs)
self.set_parameters(par)
except:
raise
def minimize(self, par0=None, **fit_kw):
""" create Minimizer of this, run it, update original parameters
parameters:
par0 : array type of float or None
pass to Minimizer
return value, parameter values, errors
"""
self.fitter = Minimizer(self, par0)
c2, par, dpar = self.fitter(**fit_kw)
self.par = par
self.set_parameters(par)
return c2, par, dpar
class Profile(Fitted):
""" Manage a function of one parameter, projected from a multi-parameter function,
with option evaluate by either optimizing on the remaining parameters or not
"""
def __init__(self, fn, index, par=None, profile=True):
"""
parameters
---------
fn : function of a set of parameters
Must implement Fitted interface
index : integer or string
the index to examine, or its parameter name
par: arary type or None
initial set of parameters for fn if not None
profile: bool
set False to not apply profile
"""
# local reference to the basic function, copy of original parametes
self.fn = fn
if type(index)==types.StringType:
try:
self.index = list(fn.parameter_names).index(index)
except ValueError:
raise FitterException('parameter name "%s" not one of %s' % (index, fn.parameter_names))
except Exception as msg:
raise
else: self.index = index
self.fpar = par if par is not None else fn.get_parameters().copy()
npar = len(self.fpar)
self.mask = np.ones(npar,bool)
self.mask[self.index]=False
# set up function of the selected parameter (self) and a function of the rest
select = range(npar)
assert self.index in select, 'Expect index to select to be one of parameters'
self.par = self.fpar[self.index:self.index+1]
select.remove(self.index)
self.pfun = Projector(fn, select)
self.profile = profile
# set up a fitter for the remaining parameters
self.fitter = Minimizer(self.pfun)
def __call__(self, x):
self.fpar[self.index]=x[0]
# if don't optimize the other parameters
if self.profile:
v,p,s =self.fitter() #fit value, parameters, errors
self.fpar[self.mask]=p
r = self.fn(self.fpar)
print (v,r)
else:
r = self.fn(self.fpar)
return r
@property
def parameter_names(self):
return self.fn.parameter_names[self.index:self.index+1]
def get_parameters(self):
return self.par
def set_parameters(self, par=None):
p = par if par is not None else self.par
self.par = p
self.fpar[self.index] = p
class TestFunc(Fitted):
def __init__(self, fn, pars):
self.fn = fn
self.pars = pars
@property
def bounds(self):
return [(0.9,2), ]
def __call__(self, pars):
return self.fn(pars)
def get_parameters(self): return self.pars
def set_parameters(self,pars):
self.pars = pars
def test(x0=1.1, pars=[1.0, 1.5], **kwargs):
""" test with a parabola corresponding to a Gaussian with mean, sigma in pars
>>> pars=[1.0, 1.5]; x0=1.1
>>> testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2
>>> func = TestFunc(testf, [x0])
>>> m = Minimizer(func) # create minimizer object
>>> m() # run default fit
(1.0000000000211928, array([ 0.99999023]), array([ 1.5]))
"""
testf = lambda p: 1.+ 0.5*((p[0]-pars[0])/pars[1])**2
print ('input parameters:', pars)
func = TestFunc(testf, [x0])
m = Minimizer(func)
#m = Minimizer(testf, [x0], )
f = m(use_gradient=False)
print ('solution at %.2f, +/- %.2f ' % (m.get_parameters(), np.sqrt(m.cov_matrix.diagonal())))
return func, m, f
if __name__ == "__main__":
print (__doc__)
import doctest
doctest.testmod()
| 5,797 | 164 | 589 |
be296d19d60e06bb3d6093baff8e0f188b1419b0 | 1,384 | py | Python | L1Trigger/L1THGCal/python/hgcalTowerMapProducer_cfi.py | Michael-Krohn/cmssw | 25064b40a13dc451b498e850214fcbe141f7cb75 | [
"Apache-2.0"
] | 2 | 2018-06-01T05:18:55.000Z | 2021-04-08T21:44:06.000Z | L1Trigger/L1THGCal/python/hgcalTowerMapProducer_cfi.py | Michael-Krohn/cmssw | 25064b40a13dc451b498e850214fcbe141f7cb75 | [
"Apache-2.0"
] | 26 | 2018-10-30T12:47:58.000Z | 2022-03-29T08:39:00.000Z | L1Trigger/L1THGCal/python/hgcalTowerMapProducer_cfi.py | p2l1pfp/cmssw | 9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9 | [
"Apache-2.0"
] | 2 | 2020-03-20T18:46:13.000Z | 2021-03-12T09:23:07.000Z | import FWCore.ParameterSet.Config as cms
import math
L1TTriggerTowerConfig_etaphi = cms.PSet(readMappingFile=cms.bool(False),
minEta=cms.double(1.479),
maxEta=cms.double(3.0),
minPhi=cms.double(-1*math.pi),
maxPhi=cms.double(math.pi),
nBinsEta=cms.int32(18),
nBinsPhi=cms.int32(72),
binsEta=cms.vdouble(),
binsPhi=cms.vdouble())
towerMap2D_parValues = cms.PSet( useLayerWeights = cms.bool(False),
layerWeights = cms.vdouble(),
L1TTriggerTowerConfig = L1TTriggerTowerConfig_etaphi
)
tower_map = cms.PSet( ProcessorName = cms.string('HGCalTowerMapProcessor'),
towermap_parameters = towerMap2D_parValues.clone()
)
hgcalTowerMapProducer = cms.EDProducer(
"HGCalTowerMapProducer",
InputTriggerCells = cms.InputTag('hgcalVFEProducer:HGCalVFEProcessorSums'),
ProcessorParameters = tower_map.clone()
)
hgcalTowerMapProducerHFNose = hgcalTowerMapProducer.clone(
InputTriggerCells = cms.InputTag('hfnoseVFEProducer:HFNoseVFEProcessorSums')
)
| 43.25 | 85 | 0.547688 | import FWCore.ParameterSet.Config as cms
import math
L1TTriggerTowerConfig_etaphi = cms.PSet(readMappingFile=cms.bool(False),
minEta=cms.double(1.479),
maxEta=cms.double(3.0),
minPhi=cms.double(-1*math.pi),
maxPhi=cms.double(math.pi),
nBinsEta=cms.int32(18),
nBinsPhi=cms.int32(72),
binsEta=cms.vdouble(),
binsPhi=cms.vdouble())
towerMap2D_parValues = cms.PSet( useLayerWeights = cms.bool(False),
layerWeights = cms.vdouble(),
L1TTriggerTowerConfig = L1TTriggerTowerConfig_etaphi
)
tower_map = cms.PSet( ProcessorName = cms.string('HGCalTowerMapProcessor'),
towermap_parameters = towerMap2D_parValues.clone()
)
hgcalTowerMapProducer = cms.EDProducer(
"HGCalTowerMapProducer",
InputTriggerCells = cms.InputTag('hgcalVFEProducer:HGCalVFEProcessorSums'),
ProcessorParameters = tower_map.clone()
)
hgcalTowerMapProducerHFNose = hgcalTowerMapProducer.clone(
InputTriggerCells = cms.InputTag('hfnoseVFEProducer:HFNoseVFEProcessorSums')
)
| 0 | 0 | 0 |
1d9d233c7e37bec6b3e1f0f38264ff51b1dac383 | 2,069 | py | Python | testgrids/eosfit.py | ajulik1997/testgrids | 0d52e9b4fccd755a9421934d40b8b312a198699f | [
"MIT"
] | null | null | null | testgrids/eosfit.py | ajulik1997/testgrids | 0d52e9b4fccd755a9421934d40b8b312a198699f | [
"MIT"
] | null | null | null | testgrids/eosfit.py | ajulik1997/testgrids | 0d52e9b4fccd755a9421934d40b8b312a198699f | [
"MIT"
] | null | null | null | # eosfit.py fits E(V) data to a Birch-Murnaghan equation of state.
# Current version: 3.1
#
# Copyright (C) 2012 Kurt Lejaeghere <Kurt.Lejaeghere@UGent.be>, Center for
# Molecular Modeling (CMM), Ghent University, Ghent, Belgium
#
# eosfit.py is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# eosfit.py is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eosfit.py; if not, see <http://www.gnu.org/licenses/>.
# The following code is based on the source code of eos.py from the Atomic
# Simulation Environment (ASE) <https://wiki.fysik.dtu.dk/ase/>.
# Python and numpy are required to use this script.
# Edit for use in script by Alexander Liptak <Alexander.Liptak.2015@live.rhul.ac.uk>
import numpy as np
| 36.298246 | 104 | 0.670372 | # eosfit.py fits E(V) data to a Birch-Murnaghan equation of state.
# Current version: 3.1
#
# Copyright (C) 2012 Kurt Lejaeghere <Kurt.Lejaeghere@UGent.be>, Center for
# Molecular Modeling (CMM), Ghent University, Ghent, Belgium
#
# eosfit.py is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# eosfit.py is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eosfit.py; if not, see <http://www.gnu.org/licenses/>.
# The following code is based on the source code of eos.py from the Atomic
# Simulation Environment (ASE) <https://wiki.fysik.dtu.dk/ase/>.
# Python and numpy are required to use this script.
# Edit for use in script by Alexander Liptak <Alexander.Liptak.2015@live.rhul.ac.uk>
import numpy as np
def BM(volumes, energies):
fitdata = np.polyfit(np.power(volumes, np.divide(-2, 3)), energies, 3, full=True)
residuals0 = np.divide(fitdata[1], np.sum(np.power(np.subtract(energies, np.average(energies)), 2)))
deriv0 = np.poly1d(fitdata[0])
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
volume0 = 0
x = 0
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
volume0 = x**(-3./2.)
break
if volume0 == 0:
print('Error: No minimum could be found')
exit()
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) -
8./27. * x**(15./2.) * deriv3(x))
bulk_modulus0 = derivV2 / x**(3./2.)
bulk_deriv0 = -1 - x**(-3./2.) * derivV3 / derivV2
return volume0, bulk_modulus0, bulk_deriv0, residuals0
| 864 | 0 | 23 |
16ef86cedd96dea29913b4087cc47bb49e7abf68 | 13,774 | py | Python | synthesisdatabase/managers/download_manager.py | olivettigroup/synthesis-database-public | 3a5b96558249c3079b7acaf9e96bd0282c0341ce | [
"MIT"
] | 7 | 2017-02-28T01:01:02.000Z | 2021-05-24T04:48:01.000Z | synthesisdatabase/managers/download_manager.py | olivettigroup/synthesis-database-public | 3a5b96558249c3079b7acaf9e96bd0282c0341ce | [
"MIT"
] | 1 | 2019-03-07T06:38:28.000Z | 2019-04-04T18:14:38.000Z | synthesisdatabase/managers/download_manager.py | olivettigroup/synthesis-database-public | 3a5b96558249c3079b7acaf9e96bd0282c0341ce | [
"MIT"
] | 4 | 2017-05-01T18:57:39.000Z | 2020-06-04T06:01:53.000Z | import sys
sys.path.append("..")
from models import (Paper, Paragraph)
from time import sleep
from pymongo import MongoClient
from pymatgen.matproj.rest import (MPRester)
from articledownloader.articledownloader import (ArticleDownloader)
from os import (environ, path, remove, listdir, strerror)
from autologging import (logged, traced)
from json import (dumps, loads)
from bson.objectid import (ObjectId)
from re import (search, sub)
from functools import (wraps)
from bs4 import BeautifulSoup
import bibtexparser
import time
reload(sys)
sys.setdefaultencoding('utf8')
@logged
| 35.776623 | 142 | 0.610861 | import sys
sys.path.append("..")
from models import (Paper, Paragraph)
from time import sleep
from pymongo import MongoClient
from pymatgen.matproj.rest import (MPRester)
from articledownloader.articledownloader import (ArticleDownloader)
from os import (environ, path, remove, listdir, strerror)
from autologging import (logged, traced)
from json import (dumps, loads)
from bson.objectid import (ObjectId)
from re import (search, sub)
from functools import (wraps)
from bs4 import BeautifulSoup
import bibtexparser
import time
reload(sys)
sys.setdefaultencoding('utf8')
@logged
class DownloadManager:
ad = ArticleDownloader(environ.get('ELS_API_KEY'), environ.get('CRF_API_KEY'))
connection = MongoClient()
dl_doi_pdf_map = {}
doi_fails = []
dl_dois = []
rows_per_query = 0
def __init__(self, db):
self.db = db
def set_dois_per_query(self, num_docs):
self.rows_per_query = int(num_docs)
def get_dois(self, queries, mode, wait_time=0):
if mode == 'mp':
self.__logger.info( 'Searching with MP queries' )
mpr = MPRester(environ.get('MAPI_KEY'), endpoint="https://www.materialsproject.org/rest")
mpids = []
for query in queries:
try:
entries = mpr.get_entries(query)
for entry in entries:
mpids.extend(entry.data['task_ids'])
for mpid in mpids:
mpid = mpr.get_materials_id_from_task_id(mpid)['materials_id']
bibtex = mpr.get_materials_id_references(mpid)
parsed_bibtex = bibtexparser.loads(bibtex)
for item in parsed_bibtex.entries:
if 'doi' in item:
if item['doi'] not in self.dl_dois:
self.dl_dois.append(item['doi'])
except:
self.__logger.warning( 'FAILURE: Failed to get DOIs from MP:' + str(query) )
elif mode == 'cr':
self.__logger.info( 'Searching with CR queries' )
for query in queries:
dl_dois = []
try:
dl_dois = self.ad.get_dois_from_search(query, rows=self.rows_per_query)
except Exception, e:
self.__logger.warning( 'FAILURE: Failed to get DOIs from CR: ' + str(query) )
self.__logger.warning( 'EXCEPTION: ' + str(e) )
sleep(wait_time)
self.dl_dois.extend(dl_dois)
elif mode == 'issn':
self.__logger.info( 'Searching with ISSN queries' )
for query in queries:
dl_dois = []
try:
dl_dois = self.ad.get_dois_from_journal_issn(query, rows=self.rows_per_query, pub_after=1900)
except Exception, e:
self.__logger.warning( 'FAILURE: Failed to get DOIs from CR by ISSN: ' + str(query) )
self.__logger.warning( 'EXCEPTION: ' + str(e) )
sleep(wait_time)
self.dl_dois.extend(dl_dois)
def build_doi_pdf_map(self, doi_pdf_map):
self.dl_dois = set(self.dl_dois)
self.__logger.info( str(len(self.dl_dois)) + ' DOIs retrieved. Setting up PDF map...')
for doi in self.dl_dois:
safe_doi = str(doi).translate(None, '/.()')
self.dl_doi_pdf_map[str(safe_doi) + '.pdf'] = doi
#Load up old DOIs
if path.isfile(doi_pdf_map):
with open(doi_pdf_map, 'rb') as f:
old_map = loads(f.read())
for key in old_map:
if not key in self.dl_doi_pdf_map:
self.dl_doi_pdf_map[key] = old_map[key]
with open(doi_pdf_map, 'wb') as f:
f.write(dumps(self.dl_doi_pdf_map))
self.__logger.info(dumps(self.dl_doi_pdf_map, sort_keys=True, indent=2))
def get_articles(self, files_dir, doi_pdf_map, doi_fail_log, file_ext='.pdf'):
total_dls, success_dls, fail_dls = 0, 0, 0
self.__logger.info('Attempting to download...')
for doi in self.dl_dois:
safe_doi = str(doi).translate(None, '/.()')
filename = path.join(files_dir, str(safe_doi) + file_ext)
if not path.isfile(filename) and not path.isdir(filename + ".d"):
try:
download_success = self.get_article(doi, filename)
if download_success:
success_dls += 1
else:
self.__logger.warning('FAILURE: Unable to download file: ' + str(doi))
remove(filename)
self.doi_fails.append(doi)
fail_dls += 1
except:
self.__logger.warning('FAILURE: Error while trying to download file: ' + str(doi))
remove(filename)
self.doi_fails.append(doi)
fail_dls += 1
total_dls += 1
self.__logger.info(dumps(self.doi_fails, sort_keys=True, indent=2))
with open(doi_fail_log, 'wb') as f:
f.write(dumps(self.doi_fails))
self.__logger.info( 'Total attempted downloads: ' + str(total_dls) )
self.__logger.info( 'Total successful downloads: ' + str(success_dls) )
self.__logger.info( 'Total failed downloads: ' + str(fail_dls) )
def get_article(self, doi, filename, els_delay=1, wil_delay=15):
writefile = open(filename, 'wb')
download_success = False
#Use DOI prefixes to optimize downloading attempts
els_dois = ['10\.1016', '10\.1006']
wil_dois = ['10\.1002', '10\.1111']
spr_dois = ['10\.1007', '10\.1140', '10\.1891']
rsc_dois = ['10\.1039']
ecs_dois = ['10\.1149']
nat_dois = ['10\.1038']
#Use blacklist (unsubbed journals) to speed up downloading
blacklist = [
'10\.1002\/chin',
'10\.1002\/ange',
'10\.1002\/apj',
'10\.1002\/elsc',
'10\.1002\/ffj',
'10\.1002\/cjoc',
]
blacklist_match = any([search(d, doi) for d in blacklist])
if blacklist_match: return False
els_match = any([search(d, doi) for d in els_dois])
wil_match = any([search(d, doi) for d in wil_dois])
spr_match = any([search(d, doi) for d in spr_dois])
rsc_match = any([search(d, doi) for d in rsc_dois])
ecs_match = any([search(d, doi) for d in ecs_dois])
nat_match = any([search(d, doi) for d in nat_dois])
if wil_match:
download_success = self.ad.get_html_from_doi(doi, writefile, 'wiley')
sleep(wil_delay)
if els_match:
download_success = self.ad.get_html_from_doi(doi, writefile, 'elsevier')
sleep(els_delay)
elif rsc_match:
download_success = self.ad.get_html_from_doi(doi, writefile, 'rsc')
# elif ecs_match:
# download_success = self.ad.get_pdf_from_doi(doi, writefile, 'ecs')
elif spr_match:
download_success = self.ad.get_html_from_doi(doi, writefile, 'springer')
elif nat_match:
download_success = self.ad.get_html_from_doi(doi, writefile, 'nature')
if writefile.tell() == 0:
writefile.close()
return False #Empty file reporting
writefile.close()
return download_success
def is_title_relevant(self, title):
title = title.lower()
irrelevant_words = [
"medical",
"dna",
"rna",
"protein",
"bacteria",
"biomedicine",
"bioassay",
"cellular",
]
for word in irrelevant_words:
if word in title:
return False
return True
def save_papers(self, pdf_files_dir, html_files_dir, doi_pdf_map, collection='papers', overwrite=False, file_locs=[], para_classifier=None):
self.dl_doi_pdf_map = loads(open(doi_pdf_map, 'rb').read())
for filename in file_locs:
doi = ''
is_html_file = bool(filename[-4:] == 'html')
if is_html_file:
if filename[:-5] + '.pdf' in self.dl_doi_pdf_map:
doi = self.dl_doi_pdf_map[filename[:-5] + '.pdf']
else: #PDF file
if filename[:-2] in self.dl_doi_pdf_map:
doi = self.dl_doi_pdf_map[filename[:-2]]
if doi == '':
if not is_html_file:
doi = filename[:-6] #strip the file suffix and use safe_doi as doi
else:
doi = filename[:-5]
self.__logger.info("INFO: Used backup DOI (not in map): " + str(doi))
if self.connection[self.db][collection].find({'doi': doi}).count() == 1 and not overwrite:
self.__logger.info("SKIPPED: Not overwriting and paper already in DB: " + str(doi))
continue
try:
paper = open(path.join(pdf_files_dir, filename, 'docseg.json')).read()
try:
plaintext = loads(unicode(paper), strict=False)
except:
self.__logger.warning("FAILURE: Invalid JSON from watr-works: " + str(doi))
pass
except:
self.__logger.warning("FAILURE: No docseg found from watr-works: " + str(doi))
pass
safe_doi = str(doi).translate(None, '/.()')
title = None
if title is None:
title_match = self.connection[self.db].doi_title_abstract_map.find_one({'doi': doi}, {'title': True})
if title_match is not None:
title = title_match['title']
if title is None: title = self.ad.get_title_from_doi(doi, 'crossref')
if title is None: title = unicode('')
if not self.is_title_relevant(title):
self.__logger.info("WARNING: Irrelevant title detected; paper skipped: " + str(doi))
continue
try:
abstract = unicode('')
if abstract == '':
abstract_match = self.connection[self.db].doi_title_abstract_map.find_one({'doi': doi},{'abstract':True})
if abstract_match is not None:
abstract = abstract_match['abstract']
if abstract == '':
#Use DOI prefixes to optimize downloading attempts
els_dois = ['10\.1016', '10\.1006']
if any([search(d, doi) for d in els_dois]):
abstract = self.ad.get_abstract_from_doi(doi, 'elsevier')
else:
abstract = unicode('')
if abstract is None: abstract = unicode('')
new_paper = Paper()
del new_paper['_id'] #prevents duplication; ID assigned on insertion
new_paper['doi'] = doi
new_paper['abstract'] = abstract
new_paper['title'] = title
if not is_html_file:
new_paper['pdf_loc'] = unicode(path.join(pdf_files_dir, filename, safe_doi + '.pdf'))
else:
new_paper['pdf_loc'] = unicode(path.join(html_files_dir, filename, safe_doi + '.html'))
new_paper['modified'] = int(time.time())
new_paper['paragraphs'] = []
#Compute paragraphs
html_paragraphs_used = False
recipe_found = False
#Override to use HTML paragraphs when available
if path.isfile(path.join(html_files_dir, safe_doi + '.html')):
html_text = open(path.join(html_files_dir, safe_doi + '.html'), 'rb').read()
soup = BeautifulSoup(html_text, 'html.parser')
paragraphs = soup.find_all('p') + soup.find_all('div', {'class':'NLM_p'}) + soup.find_all('span')
paragraphs = [p.getText() for p in paragraphs]
paragraphs = [p.replace('\n','').replace('\t','') for p in paragraphs]
paragraphs = [p for p in paragraphs if len(p) > 80]
if len(paragraphs) > 20:
for paragraph in paragraphs:
new_paragraph = Paragraph()
new_paragraph['_id'] = unicode(ObjectId())
if para_classifier.predict_one(paragraph):
new_paragraph['type'] = unicode('recipe')
recipe_found = True
new_paragraph['text'] = paragraph
new_paper['paragraphs'].append(new_paragraph)
html_paragraphs_used = True
self.__logger.info("INFO: Used HTML paragraphs for paper: " + str(doi))
if not html_paragraphs_used:
para_label_ids = []
for line in plaintext['labels']:
if line[0] == 'ds:para-begin':
para_label_ids.append(line[1][0])
para_label_iter = iter(para_label_ids)
try:
next_label = next(para_label_iter)
except:
self.__logger.warning("WARNING: No paragraphs detected in file: " + str(doi))
continue
current_para = ''
for line in plaintext['lines']:
if line[2] == next_label:
if current_para != '':
new_paragraph = Paragraph()
new_paragraph['_id'] = unicode(ObjectId())
if para_classifier.predict_one(current_para):
new_paragraph['type'] = unicode('recipe')
recipe_found = True
new_paragraph['text'] = current_para
new_paper['paragraphs'].append(new_paragraph)
current_para = ''
try:
next_label = next(para_label_iter)
except:
break
for token in line[0]:
if search('{.*?}', token) is not None: token = sub('[{}_^]', '', token)
current_para += token + ' '
except Exception, e:
self.__logger.warning('FAILURE: Unable to save paper: ' + str(doi))
self.__logger.warning('ERR_MSG: ' + str(e))
continue
if len(new_paper['paragraphs']) == 0:
self.__logger.warning('WARNING: No paragraphs found; skipping paper: ' + str(doi))
continue
if not recipe_found:
self.__logger.warning('WARNING: No recipe found; skipping paper: ' + str(doi))
continue
if self.connection[self.db][collection].find({'doi': doi}).count() == 0:
self.connection[self.db][collection].insert_one(new_paper)
elif self.connection[self.db][collection].find({'doi': doi}).count() == 1 and overwrite and not used_backup_doi:
self.connection[self.db][collection].update_one({'doi': doi}, {'$set': new_paper})
if self.connection[self.db].doi_title_abstract_map.find({'doi': doi}).count() == 0:
self.connection[self.db].doi_title_abstract_map.insert_one({
'doi': new_paper['doi'],
'title': new_paper['title'],
'abstract': new_paper['abstract']
})
| 12,781 | 388 | 22 |
a26d42f799f99e7095be3f0c3901520eec0d8829 | 24,371 | py | Python | 11/solution.py | AlecRosenbaum/adventofcode2017 | 9214a64db77492790d30bbd22e835535d05abb25 | [
"MIT"
] | null | null | null | 11/solution.py | AlecRosenbaum/adventofcode2017 | 9214a64db77492790d30bbd22e835535d05abb25 | [
"MIT"
] | null | null | null | 11/solution.py | AlecRosenbaum/adventofcode2017 | 9214a64db77492790d30bbd22e835535d05abb25 | [
"MIT"
] | null | null | null | """
Day 11 challenge
"""
import attr
import math
from functools import reduce
@attr.s
DIRECTIONS = {
"nw": Offset(horizontal=-.5, vertical=.5),
"n": Offset(vertical=1),
"ne": Offset(horizontal=.5, vertical=.5),
"se": Offset(horizontal=.5, vertical=-.5),
"s": Offset(vertical=-1),
"sw": Offset(horizontal=-.5, vertical=-.5),
}
if __name__ == "__main__":
puzzle_input = """s,s,sw,se,s,nw,nw,ne,n,ne,n,n,n,n,n,n,n,ne,n,ne,ne,se,ne,n,ne,n,n,ne,se,sw,se,s,se,se,se,se,s,se,se,s,se,se,nw,se,se,se,s,s,nw,s,s,se,nw,s,n,s,nw,s,s,s,s,s,s,s,s,s,s,s,sw,s,s,s,s,s,sw,sw,s,sw,s,nw,sw,sw,s,sw,ne,sw,sw,s,se,sw,sw,sw,sw,sw,sw,sw,nw,sw,sw,sw,se,sw,nw,nw,sw,sw,sw,s,sw,nw,se,nw,se,nw,sw,nw,nw,se,n,sw,s,s,s,nw,sw,sw,nw,se,nw,sw,sw,sw,nw,sw,sw,nw,nw,nw,nw,ne,n,nw,nw,ne,nw,nw,nw,nw,nw,se,nw,nw,n,nw,nw,nw,sw,n,nw,nw,nw,nw,n,s,nw,ne,nw,s,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,s,sw,n,n,nw,nw,n,n,nw,nw,n,nw,n,n,nw,n,s,n,nw,ne,n,nw,n,nw,n,n,n,n,se,s,n,s,n,s,n,n,n,nw,n,s,n,n,n,n,n,ne,n,n,n,n,s,n,n,n,n,sw,n,n,n,nw,n,n,n,n,nw,se,n,ne,n,n,ne,n,ne,ne,n,n,n,n,ne,n,n,nw,n,n,n,n,ne,se,se,ne,ne,ne,n,ne,n,ne,ne,nw,ne,ne,n,n,n,ne,ne,ne,n,ne,nw,n,s,ne,ne,ne,ne,ne,n,s,ne,ne,ne,n,ne,ne,ne,sw,ne,ne,ne,s,n,ne,ne,n,ne,ne,ne,ne,ne,se,ne,ne,se,ne,ne,ne,ne,se,ne,se,ne,nw,nw,sw,s,n,ne,ne,ne,ne,ne,sw,ne,ne,ne,sw,ne,ne,ne,ne,sw,se,ne,ne,ne,ne,se,s,se,s,nw,ne,ne,n,se,ne,ne,ne,sw,ne,s,s,nw,se,nw,ne,s,ne,se,ne,n,ne,n,s,n,ne,ne,s,ne,se,se,ne,sw,nw,s,n,nw,n,se,ne,se,se,sw,ne,ne,sw,se,se,se,se,sw,ne,se,s,ne,ne,n,se,ne,sw,ne,ne,se,se,nw,se,ne,ne,nw,sw,se,s,s,se,se,se,s,se,nw,se,ne,se,se,se,se,se,se,se,sw,nw,se,se,se,se,se,se,sw,se,sw,ne,se,se,se,se,se,se,se,se,s,se,se,se,se,se,se,ne,se,se,s,sw,s,se,se,se,se,se,se,se,s,se,sw,se,se,n,s,se,s,ne,se,se,se,s,se,s,se,se,ne,se,se,sw,s,se,se,se,se,nw,se,n,ne,s,s,nw,se,se,s,se,n,se,se,s,se,se,s,se,se,ne,se,se,se,s,s,sw,s,s,se,s,se,s,se,s,se,se,se,s,se,s,nw,s,s,se,se,se,se,sw,sw,s,se,s,se,se,s,n,se,se,se,se,s,se,se,s,se,se,se,sw,s,s,s,se,se,s,s,se,s,s,se,s,s,n,s,nw,s,n,s,sw,s,nw,s,s,se,se,sw,s,s,s,sw,se,s,n,s,se,n,s,se,se,se,s,s,s,se,ne,s,se,n,se,s,se,se,s,ne,sw,se,s,s,se,s,s,s,s,s,s,s,s,se,s,nw,s,s,s,s,s,s,s,s,s,s,s,ne,ne,s,s,s,s,s,s,s,s,s,ne,ne,s,s,s,s,s,s,s,s,nw,s,s,se,sw,s,sw,s,s,nw,s,s,s,s,s,s,s,s,s,n,ne,se,s,s,s,s,n,se,s,sw,s,sw,sw,sw,s,s,sw,s,s,s,nw,sw,s,s,s,s,s,ne,sw,s,s,sw,s,s,s,s,s,s,sw,s,s,se,s,s,sw,n,sw,s,s,sw,s,s,s,s,s,sw,s,ne,s,s,s,s,sw,ne,s,ne,n,sw,s,s,s,sw,s,sw,nw,s,s,ne,sw,sw,nw,s,s,sw,sw,s,ne,s,s,sw,se,s,s,sw,s,s,sw,s,sw,sw,s,s,s,s,sw,sw,sw,s,n,ne,s,ne,s,sw,s,se,s,sw,sw,s,sw,sw,sw,sw,s,s,s,s,se,s,sw,sw,sw,sw,n,s,sw,s,s,sw,sw,s,s,n,sw,s,sw,sw,ne,sw,sw,s,sw,sw,sw,sw,sw,s,s,sw,se,sw,sw,sw,sw,s,s,sw,s,sw,sw,nw,sw,sw,se,sw,s,s,nw,nw,s,s,sw,sw,s,n,s,sw,sw,se,s,sw,sw,ne,sw,sw,sw,sw,sw,ne,sw,s,sw,sw,n,sw,sw,sw,sw,s,sw,sw,sw,sw,sw,n,nw,s,sw,s,s,n,ne,sw,sw,sw,sw,n,sw,se,sw,sw,s,se,sw,sw,sw,sw,sw,sw,s,ne,ne,ne,sw,sw,sw,ne,s,sw,sw,sw,sw,nw,s,sw,sw,s,s,sw,sw,n,nw,nw,sw,sw,sw,se,nw,nw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,n,sw,sw,sw,nw,nw,se,sw,sw,sw,sw,sw,ne,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,s,sw,sw,se,sw,s,sw,sw,nw,nw,nw,sw,sw,nw,sw,se,ne,sw,sw,sw,sw,ne,sw,sw,nw,sw,se,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,n,nw,sw,sw,sw,s,sw,sw,nw,n,s,sw,n,nw,nw,sw,sw,nw,sw,sw,se,sw,sw,nw,sw,sw,s,sw,nw,sw,nw,sw,nw,nw,nw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,s,sw,nw,nw,ne,n,nw,sw,sw,nw,sw,sw,nw,nw,nw,sw,sw,sw,ne,s,sw,nw,nw,sw,nw,sw,s,nw,n,nw,sw,sw,nw,nw,sw,nw,nw,n,sw,nw,sw,nw,sw,n,sw,nw,sw,sw,sw,sw,n,sw,n,nw,nw,s,sw,se,sw,sw,nw,n,sw,sw,sw,n,sw,nw,sw,ne,nw,sw,sw,s,n,nw,sw,nw,nw,nw,sw,sw,sw,nw,nw,ne,sw,s,sw,nw,n,sw,sw,sw,nw,ne,ne,sw,nw,nw,sw,s,s,sw,sw,nw,ne,sw,nw,sw,nw,nw,sw,sw,sw,sw,nw,nw,s,se,nw,sw,nw,ne,s,nw,nw,ne,sw,nw,nw,n,nw,nw,sw,sw,sw,nw,nw,nw,sw,nw,nw,n,sw,sw,nw,s,n,sw,nw,nw,sw,nw,n,nw,nw,nw,nw,nw,nw,sw,sw,n,n,sw,sw,nw,nw,nw,nw,ne,nw,nw,nw,sw,nw,nw,nw,nw,ne,nw,nw,nw,nw,n,nw,nw,nw,s,nw,nw,sw,nw,s,nw,ne,ne,nw,nw,sw,nw,nw,nw,nw,sw,nw,se,sw,nw,sw,nw,nw,ne,nw,n,nw,nw,sw,nw,nw,nw,sw,nw,ne,s,nw,nw,sw,s,nw,sw,sw,nw,nw,nw,sw,s,nw,nw,nw,nw,se,nw,s,nw,nw,nw,se,ne,ne,nw,nw,nw,nw,nw,sw,nw,ne,ne,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,ne,nw,nw,s,nw,nw,ne,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,ne,nw,n,nw,nw,ne,n,nw,sw,nw,n,nw,n,sw,nw,ne,s,se,n,ne,se,nw,ne,nw,ne,nw,ne,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,ne,nw,s,se,nw,n,n,nw,ne,nw,nw,nw,nw,ne,nw,nw,s,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,n,nw,nw,s,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,nw,nw,nw,nw,s,nw,se,n,n,n,nw,se,nw,nw,s,s,nw,nw,n,nw,nw,s,nw,nw,nw,se,nw,nw,nw,se,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,n,nw,n,ne,nw,nw,nw,se,ne,nw,nw,nw,n,nw,nw,ne,n,n,nw,nw,sw,nw,nw,nw,nw,se,nw,n,s,nw,nw,n,n,nw,se,n,nw,nw,nw,n,nw,nw,nw,n,nw,se,n,se,sw,s,n,s,nw,nw,nw,nw,n,n,s,nw,nw,se,nw,nw,se,nw,n,n,nw,n,se,nw,n,n,nw,n,nw,n,nw,nw,n,nw,n,s,nw,nw,nw,nw,ne,ne,se,sw,nw,n,n,nw,s,n,nw,nw,n,n,nw,n,nw,nw,nw,nw,nw,n,nw,n,n,sw,n,se,nw,n,n,nw,n,nw,nw,n,s,sw,nw,ne,nw,n,sw,nw,nw,n,nw,sw,s,nw,n,n,nw,se,n,nw,n,ne,n,nw,nw,n,nw,nw,n,nw,n,nw,nw,nw,n,se,sw,nw,nw,nw,sw,nw,nw,nw,nw,se,n,n,ne,n,nw,nw,n,nw,nw,n,sw,n,se,nw,nw,n,n,n,nw,n,nw,n,nw,n,ne,n,n,nw,n,n,n,nw,se,sw,n,sw,n,nw,nw,n,n,n,se,nw,sw,ne,n,se,nw,nw,n,n,n,n,n,n,nw,n,n,nw,sw,nw,n,sw,n,n,se,sw,n,n,n,nw,sw,nw,n,n,n,n,nw,n,n,nw,n,s,n,n,sw,n,nw,ne,s,nw,ne,n,n,n,ne,s,n,n,n,n,n,n,se,nw,nw,n,n,nw,n,n,s,se,n,nw,n,n,n,n,n,n,nw,n,n,n,nw,nw,nw,n,n,n,nw,nw,sw,n,se,n,s,n,n,n,n,n,n,ne,n,se,n,n,n,se,n,nw,n,nw,n,n,n,n,n,n,n,nw,n,n,n,n,n,n,ne,n,n,nw,n,n,sw,n,nw,n,n,sw,n,n,n,nw,se,n,n,n,nw,n,s,n,n,n,n,n,n,n,s,n,n,n,n,nw,n,n,sw,sw,nw,n,nw,nw,sw,n,n,n,n,n,n,n,n,n,n,n,s,n,n,n,n,nw,n,n,n,n,n,n,n,s,n,nw,n,sw,nw,ne,n,nw,n,sw,n,n,n,n,n,ne,n,nw,n,n,n,n,n,n,n,ne,n,n,n,n,ne,n,n,n,n,ne,n,n,n,n,ne,n,n,s,n,n,se,n,n,n,n,n,n,n,nw,n,ne,nw,sw,ne,nw,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,ne,n,n,se,n,ne,n,ne,n,n,n,n,ne,n,ne,se,s,ne,nw,se,n,n,n,n,se,n,n,ne,s,n,nw,n,nw,nw,n,n,n,n,n,n,n,n,n,s,n,n,n,s,n,nw,n,n,n,n,n,n,n,n,n,n,n,n,ne,ne,n,sw,n,se,n,n,n,n,n,n,n,se,n,ne,n,ne,n,n,n,n,n,sw,n,n,s,nw,n,n,n,ne,ne,n,n,n,ne,n,n,se,n,n,n,n,ne,n,n,n,s,n,se,ne,n,n,n,n,n,n,n,n,s,ne,s,nw,n,ne,s,ne,n,n,n,n,ne,n,n,n,n,n,n,n,n,n,ne,n,se,ne,n,ne,ne,ne,ne,se,n,n,ne,n,n,n,n,nw,ne,sw,ne,n,n,ne,se,n,n,n,n,se,n,n,n,ne,n,se,ne,ne,n,s,nw,n,ne,n,n,nw,n,n,ne,n,ne,n,sw,n,se,n,ne,nw,ne,ne,n,n,n,ne,n,ne,nw,n,s,n,n,n,n,ne,n,n,ne,n,nw,n,n,n,ne,n,s,n,n,n,n,n,ne,ne,n,sw,n,ne,n,n,sw,ne,n,ne,ne,n,n,ne,ne,ne,n,ne,ne,ne,n,ne,sw,n,n,ne,ne,ne,se,n,ne,ne,ne,n,nw,n,ne,n,n,n,ne,n,n,n,n,n,ne,ne,n,ne,s,nw,ne,n,ne,ne,ne,n,n,n,n,nw,n,n,ne,ne,ne,n,ne,n,ne,ne,n,ne,ne,nw,ne,nw,n,n,ne,se,ne,se,ne,n,nw,n,n,s,n,se,ne,ne,n,ne,n,ne,s,n,n,sw,ne,ne,se,n,ne,n,n,n,n,sw,ne,ne,nw,n,n,ne,ne,ne,n,ne,n,sw,ne,ne,ne,ne,n,ne,se,ne,sw,n,n,n,ne,ne,sw,ne,ne,ne,n,ne,ne,n,ne,se,ne,s,nw,n,sw,n,ne,n,n,n,n,ne,n,sw,ne,ne,nw,n,ne,se,ne,ne,ne,ne,n,ne,ne,n,ne,n,ne,ne,ne,n,s,s,ne,ne,ne,s,ne,ne,ne,sw,n,n,ne,n,s,ne,n,n,nw,n,se,sw,ne,ne,ne,s,n,n,ne,ne,n,ne,ne,nw,ne,ne,ne,s,se,ne,ne,ne,n,ne,nw,n,ne,ne,sw,n,n,ne,ne,ne,n,ne,ne,se,ne,ne,n,ne,ne,ne,sw,s,n,n,n,se,n,s,ne,ne,ne,sw,ne,ne,se,ne,ne,ne,ne,ne,n,s,se,ne,ne,ne,n,ne,sw,se,s,ne,n,ne,ne,n,n,n,ne,n,ne,ne,se,ne,ne,n,ne,ne,ne,ne,ne,s,ne,ne,ne,nw,ne,ne,ne,ne,ne,n,ne,s,ne,ne,ne,n,ne,sw,n,n,n,ne,ne,n,ne,s,n,n,n,ne,ne,n,ne,ne,ne,sw,se,sw,ne,ne,s,ne,nw,ne,nw,se,nw,n,ne,se,n,ne,ne,ne,ne,ne,s,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,sw,ne,ne,ne,ne,n,ne,s,ne,s,ne,ne,n,ne,se,ne,ne,nw,n,ne,ne,ne,s,ne,sw,ne,n,ne,ne,n,ne,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,se,n,ne,ne,ne,ne,sw,ne,n,ne,se,ne,ne,ne,se,se,ne,sw,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,sw,n,n,ne,ne,n,ne,ne,ne,sw,ne,n,ne,ne,ne,se,ne,ne,nw,nw,sw,ne,ne,ne,ne,sw,ne,se,ne,n,ne,ne,ne,nw,ne,ne,ne,ne,ne,ne,sw,ne,ne,nw,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,s,sw,ne,ne,s,sw,sw,ne,ne,ne,nw,ne,n,se,ne,ne,ne,ne,n,ne,nw,ne,ne,n,se,ne,ne,ne,ne,sw,ne,ne,s,ne,s,ne,ne,ne,ne,ne,ne,ne,nw,ne,ne,ne,se,ne,ne,ne,ne,se,ne,ne,sw,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,n,ne,ne,nw,se,ne,ne,ne,ne,se,ne,sw,ne,ne,n,se,ne,ne,se,ne,sw,nw,ne,ne,ne,ne,ne,ne,s,ne,se,sw,ne,s,ne,se,se,se,ne,ne,s,ne,ne,s,ne,se,ne,nw,s,ne,se,ne,ne,ne,sw,ne,se,ne,ne,ne,sw,ne,ne,ne,ne,nw,ne,ne,nw,ne,ne,s,ne,ne,se,ne,ne,nw,ne,ne,se,se,se,ne,se,ne,se,se,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,s,se,ne,sw,ne,ne,se,ne,ne,ne,ne,s,ne,se,ne,ne,se,n,n,ne,se,s,ne,s,ne,se,nw,ne,se,ne,ne,se,ne,se,se,ne,ne,se,ne,ne,ne,n,se,ne,ne,ne,ne,s,se,se,n,ne,se,se,ne,ne,se,ne,se,se,ne,ne,sw,ne,ne,ne,ne,ne,ne,nw,se,ne,se,ne,se,ne,s,ne,ne,sw,ne,ne,ne,ne,ne,s,se,ne,ne,s,se,ne,ne,ne,nw,ne,ne,ne,se,ne,ne,ne,ne,se,ne,ne,ne,se,ne,ne,se,n,s,ne,ne,ne,se,s,n,se,se,ne,sw,ne,ne,n,ne,se,n,se,n,s,n,s,ne,se,ne,ne,ne,ne,n,ne,se,sw,se,ne,se,nw,ne,ne,ne,se,se,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,n,s,ne,ne,ne,ne,ne,se,se,se,se,sw,s,n,ne,s,ne,ne,sw,se,se,ne,ne,ne,ne,ne,ne,se,se,s,ne,se,ne,nw,n,ne,se,se,ne,se,ne,ne,se,se,se,ne,ne,sw,se,nw,se,nw,se,se,se,se,ne,n,n,ne,se,se,ne,ne,se,ne,ne,se,ne,sw,ne,se,ne,n,se,nw,sw,ne,ne,se,ne,se,ne,ne,se,ne,se,ne,ne,ne,se,ne,se,se,n,nw,ne,ne,ne,ne,sw,n,ne,ne,ne,nw,ne,se,se,ne,ne,s,nw,n,ne,ne,ne,ne,ne,ne,ne,se,ne,sw,se,ne,s,n,ne,ne,se,ne,se,se,ne,se,ne,se,se,ne,se,se,se,ne,ne,ne,ne,se,ne,ne,nw,ne,ne,se,ne,s,se,ne,se,ne,ne,ne,ne,n,se,ne,se,se,ne,ne,ne,nw,ne,se,se,nw,ne,se,se,ne,sw,ne,ne,ne,n,ne,ne,ne,n,se,ne,se,ne,n,n,se,ne,se,se,se,ne,se,se,sw,se,ne,se,ne,sw,sw,ne,ne,ne,s,n,ne,ne,nw,ne,n,se,se,se,se,ne,nw,ne,ne,ne,se,ne,se,n,n,se,n,se,se,se,se,se,ne,sw,ne,se,ne,se,se,se,ne,sw,se,s,se,se,ne,se,se,se,n,ne,se,se,ne,ne,ne,ne,se,se,ne,se,se,ne,se,ne,ne,se,se,nw,se,se,ne,se,sw,se,ne,n,ne,se,se,se,nw,se,se,se,ne,se,ne,se,se,ne,se,se,ne,ne,se,se,sw,ne,se,se,sw,se,se,s,ne,ne,se,ne,s,ne,se,se,ne,se,nw,n,se,se,s,se,ne,se,ne,ne,sw,ne,ne,n,se,s,n,ne,se,se,ne,nw,ne,ne,se,se,ne,sw,ne,ne,ne,se,sw,ne,se,se,ne,n,ne,se,nw,se,se,se,ne,se,ne,ne,ne,s,ne,nw,ne,ne,ne,se,se,se,nw,se,ne,se,se,se,ne,se,ne,se,se,se,ne,se,se,se,n,se,ne,ne,se,se,se,se,sw,ne,se,se,se,ne,se,nw,n,ne,ne,ne,sw,n,se,n,se,se,n,ne,se,se,ne,se,ne,se,nw,se,se,se,s,sw,ne,se,sw,se,se,se,ne,ne,se,ne,se,se,se,se,ne,se,se,ne,se,se,ne,se,ne,se,se,se,ne,se,ne,ne,se,s,ne,ne,nw,se,ne,n,ne,se,se,ne,se,n,ne,ne,se,ne,se,se,se,ne,se,se,ne,ne,nw,ne,s,se,se,se,se,se,sw,nw,n,se,se,s,se,se,se,nw,se,n,nw,se,ne,ne,se,nw,se,se,se,se,se,se,ne,se,se,se,se,ne,se,se,ne,se,se,se,se,ne,se,se,se,se,ne,ne,nw,se,se,se,se,se,se,s,se,se,se,n,se,ne,ne,ne,se,se,se,se,ne,se,se,ne,se,se,se,se,se,se,se,sw,se,ne,ne,s,se,se,ne,se,se,se,ne,ne,se,se,se,se,se,se,se,s,sw,se,ne,se,se,se,se,se,se,se,n,se,sw,se,se,se,ne,se,se,se,se,se,s,ne,nw,se,se,se,se,se,sw,se,ne,sw,se,ne,se,se,ne,se,ne,n,ne,se,se,ne,se,ne,nw,nw,se,se,se,se,se,se,se,se,se,se,sw,se,se,se,se,se,nw,se,n,se,se,ne,se,se,nw,se,se,se,se,se,ne,nw,nw,se,se,se,se,ne,se,se,se,ne,ne,se,se,se,se,se,se,se,se,se,se,se,sw,s,se,se,ne,nw,se,se,se,se,se,se,sw,sw,se,ne,sw,nw,se,se,se,se,se,n,se,se,ne,se,se,se,nw,se,ne,se,se,se,s,se,se,n,se,se,ne,se,s,se,ne,se,se,se,nw,se,se,n,se,s,n,se,se,se,nw,se,s,sw,se,ne,se,se,se,nw,se,sw,se,se,sw,s,se,n,ne,sw,se,n,nw,se,ne,se,se,se,se,se,ne,se,se,se,se,se,se,se,se,n,se,se,se,ne,se,se,se,sw,se,nw,se,se,ne,se,se,se,se,n,ne,se,se,se,n,se,se,se,se,se,se,se,s,s,se,se,se,s,ne,se,se,se,se,se,se,se,se,se,se,se,se,se,n,n,se,se,se,se,se,se,se,se,se,s,sw,se,se,se,n,nw,se,se,se,se,ne,ne,se,n,se,se,sw,ne,sw,se,se,ne,se,se,se,se,se,se,se,se,se,n,se,nw,se,se,se,sw,s,se,se,se,se,se,se,se,se,ne,s,se,se,se,nw,s,se,n,se,se,se,s,sw,se,se,se,se,nw,ne,se,se,se,ne,s,se,sw,se,se,se,se,se,se,s,se,s,se,sw,se,ne,s,se,se,nw,se,se,nw,n,se,se,se,nw,nw,se,se,se,se,se,nw,s,se,se,ne,se,se,se,se,se,se,se,sw,se,se,se,se,se,se,nw,se,se,s,se,se,se,se,s,s,se,se,se,s,se,se,se,s,s,n,se,se,se,se,n,n,se,sw,nw,se,s,se,nw,se,s,nw,nw,se,s,se,se,se,se,se,se,sw,nw,se,se,s,se,se,se,se,se,se,n,n,ne,se,s,s,se,se,se,se,se,se,s,se,se,s,se,se,n,se,se,s,se,s,se,s,se,sw,se,se,sw,se,ne,se,sw,se,se,se,s,nw,se,ne,n,se,se,nw,se,ne,se,se,se,s,se,se,nw,se,s,se,se,se,nw,se,se,sw,s,se,s,se,se,nw,s,se,se,s,se,se,s,se,se,se,se,se,sw,s,se,se,s,sw,nw,ne,nw,se,nw,se,s,se,se,se,se,se,s,se,se,se,se,sw,s,sw,se,se,se,s,sw,sw,s,n,se,s,se,nw,se,se,se,ne,se,se,se,se,s,se,se,s,nw,s,se,s,nw,se,se,se,se,se,n,s,se,ne,n,se,se,nw,se,s,se,n,se,nw,s,s,s,se,nw,s,s,se,s,se,se,nw,s,se,s,se,se,se,se,n,se,se,s,se,se,se,se,s,s,se,s,se,se,s,se,s,n,se,n,se,se,s,se,s,se,se,s,s,se,se,s,se,se,se,se,s,s,s,se,s,nw,s,se,se,se,ne,nw,se,se,se,se,se,se,n,se,se,se,se,se,se,nw,se,se,se,s,s,nw,se,ne,se,s,se,ne,se,se,nw,se,se,se,sw,n,se,sw,se,se,nw,ne,s,se,sw,se,s,s,s,se,s,se,n,sw,sw,se,se,se,ne,se,s,se,sw,n,se,se,se,s,s,se,s,se,n,s,ne,se,se,s,se,se,s,sw,s,se,se,ne,s,n,se,se,se,s,s,s,se,se,s,s,ne,se,s,se,nw,se,s,se,se,s,s,s,se,n,se,se,ne,se,se,s,sw,se,s,ne,se,se,se,s,s,se,se,se,se,se,se,se,s,ne,se,s,se,s,s,nw,nw,s,s,nw,s,se,se,ne,se,se,se,n,s,s,s,s,se,se,s,s,s,nw,sw,se,s,s,n,se,s,s,s,s,n,s,se,s,s,se,sw,nw,nw,se,se,se,s,sw,se,se,se,s,se,se,s,s,s,se,ne,s,se,s,s,se,s,ne,se,se,se,se,se,se,ne,se,ne,s,se,se,se,se,se,s,s,s,n,se,nw,nw,s,se,sw,se,se,s,se,se,nw,s,s,s,ne,nw,se,se,se,n,ne,se,s,se,ne,se,ne,sw,ne,se,s,sw,se,se,se,s,s,ne,s,se,se,sw,s,s,s,s,se,se,s,s,se,s,se,s,s,nw,s,s,s,s,nw,nw,se,s,s,sw,s,se,nw,s,se,s,se,s,se,n,sw,n,ne,s,s,s,se,se,nw,s,n,se,s,s,s,se,s,s,s,sw,se,se,se,se,se,se,sw,s,nw,se,n,s,se,sw,nw,se,se,se,se,s,ne,se,ne,s,s,se,se,se,s,s,s,s,n,se,sw,n,s,nw,s,se,s,se,se,se,s,se,n,s,se,nw,se,s,s,se,se,se,s,s,n,s,se,s,s,s,se,se,se,s,s,s,s,se,se,s,se,s,s,s,s,s,s,s,s,nw,nw,se,n,sw,s,s,ne,s,nw,s,se,s,s,sw,s,se,nw,se,s,s,s,s,s,s,s,se,se,s,se,ne,s,se,se,se,s,s,s,se,ne,s,ne,s,s,se,s,s,ne,s,s,se,s,s,s,s,s,se,n,sw,n,s,se,se,s,s,nw,s,sw,se,n,s,se,s,s,sw,s,s,s,s,s,nw,s,nw,se,se,s,s,nw,se,s,s,sw,sw,s,se,se,s,s,s,se,s,se,s,s,s,se,s,ne,s,s,se,s,s,se,se,s,s,s,n,s,s,s,s,se,s,s,se,s,s,se,s,s,s,nw,se,s,s,se,se,se,nw,s,se,ne,s,se,s,n,nw,se,sw,se,se,s,se,s,se,s,sw,s,se,se,se,nw,s,s,s,s,sw,s,s,s,n,s,sw,s,s,se,se,se,s,se,s,s,s,se,s,se,se,nw,s,s,se,ne,s,se,s,se,se,se,s,s,s,s,se,s,s,s,s,se,s,s,s,s,se,se,se,sw,s,se,s,s,nw,s,s,se,s,se,s,se,s,sw,s,ne,s,s,ne,s,sw,s,s,ne,n,s,se,se,s,s,s,s,se,se,s,s,se,se,s,nw,s,s,n,s,ne,se,n,s,s,s,s,s,sw,s,n,n,s,s,sw,s,sw,n,se,s,s,s,s,nw,se,s,s,s,s,s,s,se,s,s,sw,s,s,s,se,se,ne,s,s,s,ne,se,se,s,s,se,n,n,se,n,sw,s,sw,se,nw,n,s,n,s,nw,sw,s,se,se,s,s,s,s,s,s,se,s,s,se,s,s,s,s,s,sw,se,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,nw,se,s,se,s,s,s,s,s,s,s,s,se,nw,se,s,s,s,se,nw,s,s,ne,s,s,se,se,sw,s,ne,n,s,s,se,n,ne,se,s,s,s,s,nw,s,ne,s,s,n,s,s,s,s,s,se,s,s,s,s,se,s,s,s,s,n,s,se,s,n,s,s,sw,s,s,s,s,s,se,s,ne,s,sw,n,s,n,se,nw,nw,s,sw,ne,n,sw,n,sw,s,n,s,se,s,se,s,s,s,s,nw,se,s,s,s,se,sw,s,se,s,s,s,s,s,s,s,s,s,s,se,ne,nw,s,s,s,s,s,se,s,ne,sw,se,se,sw,sw,s,se,ne,s,nw,ne,n,s,s,s,s,s,ne,s,s,s,s,se,s,se,s,s,s,sw,s,s,se,s,s,s,s,n,se,s,ne,s,s,s,s,se,sw,se,s,s,s,s,s,se,s,n,n,s,s,sw,s,s,s,s,ne,s,sw,s,s,s,s,s,sw,s,s,n,s,n,s,s,s,n,se,s,s,s,s,s,s,s,s,s,s,s,s,ne,sw,s,s,ne,nw,se,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,sw,s,s,s,nw,s,s,se,se,s,n,s,se,s,s,s,se,s,nw,s,s,s,s,se,s,s,s,ne,s,s,s,se,s,s,s,s,s,s,s,n,s,s,s,s,s,ne,s,s,s,s,sw,s,s,s,s,s,s,s,s,s,s,ne,se,sw,s,sw,s,s,s,se,s,s,s,s,se,s,s,ne,s,s,s,s,se,ne,se,n,n,n,n,n,n,nw,nw,sw,sw,nw,ne,sw,ne,sw,nw,sw,ne,s,sw,sw,sw,sw,s,sw,sw,sw,sw,nw,s,s,s,nw,s,s,s,se,s,se,s,se,n,se,s,se,s,nw,sw,se,se,s,se,se,se,se,se,se,se,se,nw,se,se,se,se,se,ne,se,ne,s,se,se,se,se,se,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,s,ne,nw,ne,sw,ne,ne,ne,ne,ne,ne,ne,se,n,ne,ne,se,ne,ne,n,se,ne,ne,sw,ne,ne,n,ne,n,ne,n,n,n,sw,n,ne,n,ne,ne,se,ne,n,n,ne,n,n,s,n,n,ne,n,s,n,n,n,s,n,ne,n,n,n,n,n,se,nw,nw,n,n,nw,n,se,n,sw,s,n,nw,nw,se,nw,nw,nw,nw,n,n,se,n,n,n,nw,nw,n,nw,ne,n,nw,n,nw,sw,nw,n,n,n,ne,se,nw,n,nw,n,n,nw,n,nw,n,nw,n,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,se,sw,sw,nw,se,nw,s,nw,nw,nw,nw,nw,nw,nw,s,n,nw,nw,nw,sw,nw,se,nw,nw,nw,nw,sw,sw,nw,s,sw,nw,s,nw,nw,sw,nw,sw,sw,nw,sw,se,nw,nw,nw,nw,nw,sw,sw,nw,se,sw,sw,nw,sw,nw,se,sw,nw,nw,nw,n,nw,sw,nw,ne,nw,nw,s,nw,nw,nw,sw,nw,sw,nw,sw,sw,sw,sw,n,sw,nw,s,sw,nw,ne,sw,n,s,sw,sw,sw,sw,ne,sw,sw,sw,nw,sw,s,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,se,n,sw,s,sw,sw,n,sw,sw,s,s,sw,sw,sw,sw,sw,sw,sw,s,s,sw,sw,sw,ne,sw,sw,s,sw,s,nw,sw,sw,se,se,sw,sw,sw,s,s,s,sw,nw,sw,nw,s,sw,sw,sw,sw,sw,sw,s,sw,s,sw,sw,sw,s,sw,s,sw,s,sw,s,sw,s,sw,s,sw,sw,s,s,s,sw,n,sw,s,n,s,s,s,sw,s,n,s,sw,sw,sw,sw,s,s,s,s,s,s,s,s,sw,sw,s,sw,se,s,s,s,sw,s,s,sw,s,ne,s,s,s,s,s,s,s,s,s,n,se,s,s,nw,s,s,s,se,s,s,s,s,s,s,s,s,s,s,nw,se,s,s,s,sw,s,s,s,s,s,s,s,ne,n,s,se,s,s,s,s,s,s,se,se,nw,nw,s,s,s,s,s,se,sw,s,s,se,s,s,s,s,s,s,s,s,s,n,s,s,s,se,s,s,s,s,s,s,se,s,s,n,s,s,se,s,s,s,se,s,s,nw,s,s,s,n,s,s,se,s,se,s,n,ne,s,s,s,s,se,ne,n,s,se,s,se,s,s,se,s,s,s,s,s,se,s,sw,s,se,se,s,se,s,s,ne,s,ne,ne,se,s,ne,se,s,ne,s,se,ne,se,se,se,sw,s,se,se,se,se,se,n,s,s,se,s,se,s,s,se,se,s,s,se,se,nw,se,ne,s,s,se,se,se,se,se,s,se,ne,se,se,s,ne,se,sw,se,se,se,se,se,nw,se,se,se,se,se,se,se,s,ne,n,sw,ne,se,se,se,se,se,ne,se,se,se,se,se,se,se,se,se,n,se,s,s,se,se,sw,nw,s,se,se,ne,se,se,ne,s,se,se,se,se,se,se,se,se,n,se,se,se,se,se,se,ne,se,ne,nw,se,se,ne,nw,nw,se,se,se,se,sw,se,se,sw,n,se,se,sw,se,se,se,se,nw,se,se,se,se,se,nw,sw,ne,ne,ne,se,ne,ne,nw,se,se,nw,se,se,se,se,se,ne,se,s,se,se,s,ne,se,ne,se,se,se,ne,s,se,ne,ne,ne,ne,se,n,se,se,se,se,ne,se,n,ne,se,se,se,ne,sw,nw,nw,ne,se,ne,se,se,se,n,se,ne,se,se,se,se,ne,se,ne,n,se,se,ne,nw,ne,ne,se,se,n,se,se,s,se,sw,ne,ne,ne,se,se,se,ne,ne,se,n,ne,ne,se,ne,ne,ne,se,ne,sw,n,ne,s,se,se,se,sw,ne,se,se,se,sw,ne,se,ne,ne,ne,sw,ne,ne,s,ne,s,ne,se,ne,ne,se,se,se,nw,ne,ne,ne,se,ne,se,se,ne,ne,ne,se,sw,ne,ne,ne,ne,se,ne,sw,ne,sw,ne,n,nw,sw,ne,ne,ne,sw,ne,se,sw,n,ne,ne,se,n,se,ne,n,ne,se,ne,nw,ne,ne,s,n,se,ne,nw,ne,se,ne,ne,ne,se,ne,se,se,ne,ne,ne,se,se,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,se,nw,ne,ne,ne,ne,ne,ne,ne,se,se,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,ne,ne,nw,n,n,ne,nw,ne,ne,sw,se,ne,s,ne,nw,ne,nw,ne,ne,ne,ne,ne,ne,ne,n,n,ne,n,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,n,n,ne,s,ne,ne,ne,nw,se,ne,nw,nw,ne,n,n,ne,ne,nw,ne,n,ne,sw,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,n,ne,ne,ne,ne,ne,n,n,ne,ne,se,sw,ne,n,ne,n,ne,ne,sw,ne,ne,n,ne,ne,se,sw,ne,ne,ne,s,n,n,s,ne,n,se,ne,ne,sw,s,ne,nw,n,n,ne,nw,ne,n,n,s,s,n,ne,ne,n,n,nw,ne,n,ne,ne,ne,ne,n,s,n,se,sw,se,ne,ne,ne,n,ne,ne,ne,n,nw,ne,n,n,ne,nw,ne,ne,sw,n,n,se,ne,n,ne,nw,se,ne,ne,ne,n,ne,ne,n,ne,ne,ne,ne,n,ne,ne,s,n,s,nw,n,n,nw,ne,ne,ne,s,n,s,ne,ne,ne,n,se,ne,ne,nw,n,n,n,ne,s,sw,n,ne,n,n,sw,n,ne,n,ne,n,sw,ne,ne,ne,sw,n,ne,ne,sw,ne,ne,n,n,se,ne,ne,s,ne,ne,n,nw,s,n,n,ne,nw,sw,ne,n,ne,ne,n,ne,n,se,n,ne,sw,sw,n,n,n,ne,ne,n,ne,n,s,ne,se,ne,nw,n,n,n,n,se,nw,ne,n,n,n,n,nw,nw,ne,n,se,ne,n,n,n,ne,n,ne,ne,ne,sw,n,s,n,n,n,ne,se,n,n,ne,ne,ne,sw,ne,ne,n,n,n,n,n,n,ne,ne,n,s,n,n,n,n,ne,ne,ne,n,n,se,nw,n,n,n,n,s,n,ne,n,n,n,n,n,n,n,n,sw,n,n,ne,n,n,n,s,ne,nw,n,nw,sw,n,n,n,n,s,n,n,ne,n,n,n,n,n,n,n,s,s,n,n,n,ne,n,s,ne,n,se,n,n,n,sw,n,n,n,ne,n,n,n,n,sw,se,n,sw,n,n,n,sw,ne,s,n,n,sw,s,se,n,n,n,n,ne,n,n,n,n,se,n,n,n,n,n,n,se,n,sw,n,n,n,nw,s,sw,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,sw,n,n,se,n,nw,se,n,sw,n,n,n,n,n,n,nw,n,n,n,n,n,n,n,n,n,sw,n,nw,ne,n,n,n,n,n,n,n,nw,n,n,sw,n,n,n,n,n,n,n,n,ne,n,s,n,n,n,n,ne,n,s,ne,n,n,n,nw,n,n,n,n,n,se,nw,n,nw,sw,n,n,ne,n,n,n,n,nw,n,n,n,nw,n,n,nw,n,n,n,sw,ne,n,n,s,n,n,n,nw,n,ne,n,nw,n,n,nw,nw,n,nw,nw,n,n,n,nw,sw,n,n,nw,sw,sw,n,nw,n,s,nw,n,s,nw,nw,nw,nw,n,se,n,n,nw,sw,nw,n,nw,n,ne,n,n,nw,nw,n,n,nw,nw,ne,nw,se,nw,nw,sw,n,n,n,sw,sw,n,n,n,n,nw,n,nw,nw,n,nw,n,nw,s,n,n,nw,nw,n,n,nw,n,sw,nw,nw,n,n,nw,nw,s,n,n,n,n,s,nw,n,n,n,n,n,nw,n,n,nw,n,nw,n,n,n,nw,n,n,nw,ne,nw,nw,n,nw,nw,n,n,nw,n,nw,ne,n,nw,n,n,nw,nw,nw,ne,nw,nw,n,n,n,se,ne,n,nw,n,n,n,n,nw,nw,n,nw,n,n,s,n,n,n,n,nw,n,nw,nw,se,n,nw,n,n,nw,nw,nw,nw,n,n,n,s,nw,nw,nw,ne,s,nw,nw,nw,n,nw,nw,sw,nw,n,nw,nw,n,nw,nw,n,nw,sw,n,n,n,n,nw,nw,nw,n,n,n,n,nw,n,n,ne,n,nw,nw,nw,ne,ne,n,n,nw,nw,ne,n,nw,nw,nw,nw,n,n,n,nw,nw,n,n,n,ne,nw,nw,nw,se,n,n,nw,n,nw,n,nw,nw,n,s,nw,n,nw,nw,nw,nw,nw,ne,nw,nw,n,nw,n,n,n,nw,n,n,ne,nw,n,n,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,s,nw,se,n,nw,nw,nw,nw,nw,nw,s,nw,n,nw,n,nw,nw,nw,nw,nw,se,s,nw,se,nw,n,ne,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,n,ne,nw,se,n,nw,nw,nw,nw,sw,n,nw,nw,n,nw,n,nw,nw,ne,n,nw,n,ne,se,n,sw,nw,ne,nw,sw,nw,ne,n,nw,ne,se,nw,nw,nw,nw,nw,ne,nw,n,nw,s,nw,nw,nw,ne,nw,nw,nw,nw,s,nw,n,sw,s,nw,se,nw,ne,nw,n,nw,sw,nw,n,nw,nw,sw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,s,s,nw,nw,nw,se,nw,nw,nw,nw,sw,nw,nw,n,nw,n,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,s,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,sw,nw,nw,nw,nw,nw,s,nw,sw,nw,nw,nw,se,nw,nw,sw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,n,nw,sw,n,nw,nw,se,nw,sw,se,sw,nw,sw,n,nw,nw,nw,ne,nw,nw,nw,se,nw,ne,ne,nw,nw,nw,nw,nw,s,nw,ne,n,nw,ne,nw,sw,nw,nw,sw,nw,se,nw,nw,ne,nw,ne,nw,sw,sw,nw,nw,nw,nw,nw,sw,nw,nw,nw,se,sw,sw,nw,s,sw,nw,nw,nw,nw,ne,nw,ne,nw,nw,sw,nw,nw,nw,sw,nw,nw,nw,ne,sw,s,nw,ne,nw,nw,se,se,n,nw,nw,sw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,n,nw,sw,nw,s,nw,sw,nw,nw,nw,sw,nw,nw,se,nw,n,nw,nw,ne,sw,nw,ne,nw,se,s,nw,nw,n,n,nw,nw,nw,nw,ne,se,sw,nw,nw,nw,nw,se,nw,nw,nw,sw,nw,nw,s,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,sw,nw,sw,nw,nw,nw,ne,nw,sw,nw,sw,nw,sw,nw,nw,nw,sw,ne,sw,nw,sw,nw,se,nw,se,nw,nw,n,sw,nw,nw,nw,nw,sw,sw,nw,sw,nw,nw,nw,nw,nw,sw,sw,nw,nw,nw,sw,nw,nw,nw,nw,ne,nw,n,nw,sw,sw,nw,nw,nw,nw,nw,sw,nw,sw,nw,sw,sw,nw,nw,sw,sw,nw,nw,nw,ne,sw,ne,sw,sw,nw,nw,nw,nw,nw,nw,nw,sw,se,nw,nw,sw,sw,nw,nw,s,nw,nw,sw,nw,nw,nw,n,sw,nw,sw,sw,n,nw,nw,nw,sw,sw,nw,s,nw,nw,nw,sw,nw,n,nw,nw,nw,nw,n,nw,nw,nw,sw,nw,nw,nw,se,nw,sw,nw,sw,sw,sw,nw,ne,s,sw,ne,nw,nw,s,nw,sw,nw,s,nw,sw,sw,sw,s,nw,se,nw,nw,nw,sw,sw,sw,n,nw,sw,nw,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,s,nw,nw,nw,nw,sw,sw,sw,nw,nw,sw,sw,nw,nw,sw,nw,sw,nw,sw,sw,sw,sw,sw,nw,sw,s,nw,nw,sw,sw,sw,sw,nw,sw,sw,nw,sw,ne,nw,sw,nw,nw,sw,nw,se,nw,n,sw,nw,s,nw,ne,nw,se,sw,sw,sw,n,ne,sw,nw,sw,nw,sw,se,s,sw,sw,nw,sw,sw,nw,nw,sw,nw,sw,nw,sw,nw,sw,sw,sw,ne,sw,nw,sw,nw,nw,sw,nw,n,nw,sw,n,nw,nw,sw,sw,se,nw,nw,sw,nw,sw,sw,sw,sw,nw,se,sw,sw,nw,nw,sw,sw,sw,nw,sw,nw,nw,nw,sw,n,nw,sw,n,nw,s,nw,nw,sw,s,sw,sw,nw,sw,sw,sw,nw,ne,sw,ne,sw,s,sw,sw,nw,sw,nw,sw,sw,nw,nw,nw,sw,sw,ne,sw,n,sw,sw,sw,n,sw,sw,nw,sw,n,n,sw,sw,sw,nw,s,sw,sw,sw,sw,nw,nw,sw,nw,sw,sw,s,sw,nw,sw,sw,sw,s,sw,sw,n,nw,sw,n,sw,sw,s,nw,se,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,nw,sw,sw,sw,s,ne,se,nw,sw,sw,sw,nw,s,nw,nw,sw,sw,s,sw,sw,se,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,nw,nw,nw,nw,nw,nw,s,nw,sw,sw,sw,sw,sw,n,sw,se,sw,sw,s,sw,sw,sw,sw,sw,s,s,sw,s,ne,sw,sw,s,sw,sw,nw,sw,sw,sw,sw,ne,se,sw,sw,sw,sw,n,sw,sw,sw,sw,s,nw,se,nw,nw,sw,sw,nw,se,nw,se,sw,sw,sw,nw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,ne,se,nw,nw,sw,sw,sw,n,s,sw,nw,nw,se,sw,ne,sw,n,s,sw,sw,nw,sw,sw,sw,sw,sw,ne,sw,sw,se,sw,sw,sw,sw,nw,sw,sw,ne,sw,nw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,se,sw,se,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,se,sw,sw,se,sw,sw,ne"""
print(solution_part_one(puzzle_input))
print(solution_part_two(puzzle_input))
| 276.943182 | 22,006 | 0.620697 | """
Day 11 challenge
"""
import attr
import math
from functools import reduce
@attr.s
class Offset(object):
horizontal = attr.ib(default=0)
vertical = attr.ib(default=0)
def __add__(self, other):
return Offset(
horizontal=self.horizontal + other.horizontal,
vertical=self.vertical + other.vertical,
)
def absolute_offset(self):
return math.sqrt(abs(self.horizontal)**2 + abs(self.vertical)**2)
DIRECTIONS = {
"nw": Offset(horizontal=-.5, vertical=.5),
"n": Offset(vertical=1),
"ne": Offset(horizontal=.5, vertical=.5),
"se": Offset(horizontal=.5, vertical=-.5),
"s": Offset(vertical=-1),
"sw": Offset(horizontal=-.5, vertical=-.5),
}
def solution_part_one(arg):
steps = list(map(DIRECTIONS.get, arg.split(",")))
effective_offset = sum(steps, Offset())
fast_path = []
while effective_offset.absolute_offset():
curr_dir = None
curr_abs_offset = None
for k, v in DIRECTIONS.items():
abs_offset = (effective_offset + v).absolute_offset()
if not curr_dir or abs_offset < curr_abs_offset:
curr_dir = k
curr_abs_offset = abs_offset
effective_offset += DIRECTIONS[curr_dir]
fast_path.append(curr_dir)
return len(fast_path)
def solution_part_two(arg):
steps = list(map(DIRECTIONS.get, arg.split(",")))
def get_fast_path(effective_offset):
fast_path = []
while effective_offset.absolute_offset():
curr_dir = None
curr_abs_offset = None
for k, v in DIRECTIONS.items():
abs_offset = (effective_offset + v).absolute_offset()
if not curr_dir or abs_offset < curr_abs_offset:
curr_dir = k
curr_abs_offset = abs_offset
effective_offset += DIRECTIONS[curr_dir]
fast_path.append(curr_dir)
return fast_path
return max(
map(len,
map(
get_fast_path,
reduce(
lambda agg, x: agg + [agg[-1] + x],
steps[1:],
[steps[0]],
),
),
),
)
if __name__ == "__main__":
puzzle_input = """s,s,sw,se,s,nw,nw,ne,n,ne,n,n,n,n,n,n,n,ne,n,ne,ne,se,ne,n,ne,n,n,ne,se,sw,se,s,se,se,se,se,s,se,se,s,se,se,nw,se,se,se,s,s,nw,s,s,se,nw,s,n,s,nw,s,s,s,s,s,s,s,s,s,s,s,sw,s,s,s,s,s,sw,sw,s,sw,s,nw,sw,sw,s,sw,ne,sw,sw,s,se,sw,sw,sw,sw,sw,sw,sw,nw,sw,sw,sw,se,sw,nw,nw,sw,sw,sw,s,sw,nw,se,nw,se,nw,sw,nw,nw,se,n,sw,s,s,s,nw,sw,sw,nw,se,nw,sw,sw,sw,nw,sw,sw,nw,nw,nw,nw,ne,n,nw,nw,ne,nw,nw,nw,nw,nw,se,nw,nw,n,nw,nw,nw,sw,n,nw,nw,nw,nw,n,s,nw,ne,nw,s,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,s,sw,n,n,nw,nw,n,n,nw,nw,n,nw,n,n,nw,n,s,n,nw,ne,n,nw,n,nw,n,n,n,n,se,s,n,s,n,s,n,n,n,nw,n,s,n,n,n,n,n,ne,n,n,n,n,s,n,n,n,n,sw,n,n,n,nw,n,n,n,n,nw,se,n,ne,n,n,ne,n,ne,ne,n,n,n,n,ne,n,n,nw,n,n,n,n,ne,se,se,ne,ne,ne,n,ne,n,ne,ne,nw,ne,ne,n,n,n,ne,ne,ne,n,ne,nw,n,s,ne,ne,ne,ne,ne,n,s,ne,ne,ne,n,ne,ne,ne,sw,ne,ne,ne,s,n,ne,ne,n,ne,ne,ne,ne,ne,se,ne,ne,se,ne,ne,ne,ne,se,ne,se,ne,nw,nw,sw,s,n,ne,ne,ne,ne,ne,sw,ne,ne,ne,sw,ne,ne,ne,ne,sw,se,ne,ne,ne,ne,se,s,se,s,nw,ne,ne,n,se,ne,ne,ne,sw,ne,s,s,nw,se,nw,ne,s,ne,se,ne,n,ne,n,s,n,ne,ne,s,ne,se,se,ne,sw,nw,s,n,nw,n,se,ne,se,se,sw,ne,ne,sw,se,se,se,se,sw,ne,se,s,ne,ne,n,se,ne,sw,ne,ne,se,se,nw,se,ne,ne,nw,sw,se,s,s,se,se,se,s,se,nw,se,ne,se,se,se,se,se,se,se,sw,nw,se,se,se,se,se,se,sw,se,sw,ne,se,se,se,se,se,se,se,se,s,se,se,se,se,se,se,ne,se,se,s,sw,s,se,se,se,se,se,se,se,s,se,sw,se,se,n,s,se,s,ne,se,se,se,s,se,s,se,se,ne,se,se,sw,s,se,se,se,se,nw,se,n,ne,s,s,nw,se,se,s,se,n,se,se,s,se,se,s,se,se,ne,se,se,se,s,s,sw,s,s,se,s,se,s,se,s,se,se,se,s,se,s,nw,s,s,se,se,se,se,sw,sw,s,se,s,se,se,s,n,se,se,se,se,s,se,se,s,se,se,se,sw,s,s,s,se,se,s,s,se,s,s,se,s,s,n,s,nw,s,n,s,sw,s,nw,s,s,se,se,sw,s,s,s,sw,se,s,n,s,se,n,s,se,se,se,s,s,s,se,ne,s,se,n,se,s,se,se,s,ne,sw,se,s,s,se,s,s,s,s,s,s,s,s,se,s,nw,s,s,s,s,s,s,s,s,s,s,s,ne,ne,s,s,s,s,s,s,s,s,s,ne,ne,s,s,s,s,s,s,s,s,nw,s,s,se,sw,s,sw,s,s,nw,s,s,s,s,s,s,s,s,s,n,ne,se,s,s,s,s,n,se,s,sw,s,sw,sw,sw,s,s,sw,s,s,s,nw,sw,s,s,s,s,s,ne,sw,s,s,sw,s,s,s,s,s,s,sw,s,s,se,s,s,sw,n,sw,s,s,sw,s,s,s,s,s,sw,s,ne,s,s,s,s,sw,ne,s,ne,n,sw,s,s,s,sw,s,sw,nw,s,s,ne,sw,sw,nw,s,s,sw,sw,s,ne,s,s,sw,se,s,s,sw,s,s,sw,s,sw,sw,s,s,s,s,sw,sw,sw,s,n,ne,s,ne,s,sw,s,se,s,sw,sw,s,sw,sw,sw,sw,s,s,s,s,se,s,sw,sw,sw,sw,n,s,sw,s,s,sw,sw,s,s,n,sw,s,sw,sw,ne,sw,sw,s,sw,sw,sw,sw,sw,s,s,sw,se,sw,sw,sw,sw,s,s,sw,s,sw,sw,nw,sw,sw,se,sw,s,s,nw,nw,s,s,sw,sw,s,n,s,sw,sw,se,s,sw,sw,ne,sw,sw,sw,sw,sw,ne,sw,s,sw,sw,n,sw,sw,sw,sw,s,sw,sw,sw,sw,sw,n,nw,s,sw,s,s,n,ne,sw,sw,sw,sw,n,sw,se,sw,sw,s,se,sw,sw,sw,sw,sw,sw,s,ne,ne,ne,sw,sw,sw,ne,s,sw,sw,sw,sw,nw,s,sw,sw,s,s,sw,sw,n,nw,nw,sw,sw,sw,se,nw,nw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,n,sw,sw,sw,nw,nw,se,sw,sw,sw,sw,sw,ne,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,s,sw,sw,se,sw,s,sw,sw,nw,nw,nw,sw,sw,nw,sw,se,ne,sw,sw,sw,sw,ne,sw,sw,nw,sw,se,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,n,nw,sw,sw,sw,s,sw,sw,nw,n,s,sw,n,nw,nw,sw,sw,nw,sw,sw,se,sw,sw,nw,sw,sw,s,sw,nw,sw,nw,sw,nw,nw,nw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,s,sw,nw,nw,ne,n,nw,sw,sw,nw,sw,sw,nw,nw,nw,sw,sw,sw,ne,s,sw,nw,nw,sw,nw,sw,s,nw,n,nw,sw,sw,nw,nw,sw,nw,nw,n,sw,nw,sw,nw,sw,n,sw,nw,sw,sw,sw,sw,n,sw,n,nw,nw,s,sw,se,sw,sw,nw,n,sw,sw,sw,n,sw,nw,sw,ne,nw,sw,sw,s,n,nw,sw,nw,nw,nw,sw,sw,sw,nw,nw,ne,sw,s,sw,nw,n,sw,sw,sw,nw,ne,ne,sw,nw,nw,sw,s,s,sw,sw,nw,ne,sw,nw,sw,nw,nw,sw,sw,sw,sw,nw,nw,s,se,nw,sw,nw,ne,s,nw,nw,ne,sw,nw,nw,n,nw,nw,sw,sw,sw,nw,nw,nw,sw,nw,nw,n,sw,sw,nw,s,n,sw,nw,nw,sw,nw,n,nw,nw,nw,nw,nw,nw,sw,sw,n,n,sw,sw,nw,nw,nw,nw,ne,nw,nw,nw,sw,nw,nw,nw,nw,ne,nw,nw,nw,nw,n,nw,nw,nw,s,nw,nw,sw,nw,s,nw,ne,ne,nw,nw,sw,nw,nw,nw,nw,sw,nw,se,sw,nw,sw,nw,nw,ne,nw,n,nw,nw,sw,nw,nw,nw,sw,nw,ne,s,nw,nw,sw,s,nw,sw,sw,nw,nw,nw,sw,s,nw,nw,nw,nw,se,nw,s,nw,nw,nw,se,ne,ne,nw,nw,nw,nw,nw,sw,nw,ne,ne,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,ne,nw,nw,s,nw,nw,ne,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,ne,nw,n,nw,nw,ne,n,nw,sw,nw,n,nw,n,sw,nw,ne,s,se,n,ne,se,nw,ne,nw,ne,nw,ne,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,ne,nw,s,se,nw,n,n,nw,ne,nw,nw,nw,nw,ne,nw,nw,s,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,n,nw,nw,s,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,nw,nw,nw,nw,s,nw,se,n,n,n,nw,se,nw,nw,s,s,nw,nw,n,nw,nw,s,nw,nw,nw,se,nw,nw,nw,se,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,n,nw,n,ne,nw,nw,nw,se,ne,nw,nw,nw,n,nw,nw,ne,n,n,nw,nw,sw,nw,nw,nw,nw,se,nw,n,s,nw,nw,n,n,nw,se,n,nw,nw,nw,n,nw,nw,nw,n,nw,se,n,se,sw,s,n,s,nw,nw,nw,nw,n,n,s,nw,nw,se,nw,nw,se,nw,n,n,nw,n,se,nw,n,n,nw,n,nw,n,nw,nw,n,nw,n,s,nw,nw,nw,nw,ne,ne,se,sw,nw,n,n,nw,s,n,nw,nw,n,n,nw,n,nw,nw,nw,nw,nw,n,nw,n,n,sw,n,se,nw,n,n,nw,n,nw,nw,n,s,sw,nw,ne,nw,n,sw,nw,nw,n,nw,sw,s,nw,n,n,nw,se,n,nw,n,ne,n,nw,nw,n,nw,nw,n,nw,n,nw,nw,nw,n,se,sw,nw,nw,nw,sw,nw,nw,nw,nw,se,n,n,ne,n,nw,nw,n,nw,nw,n,sw,n,se,nw,nw,n,n,n,nw,n,nw,n,nw,n,ne,n,n,nw,n,n,n,nw,se,sw,n,sw,n,nw,nw,n,n,n,se,nw,sw,ne,n,se,nw,nw,n,n,n,n,n,n,nw,n,n,nw,sw,nw,n,sw,n,n,se,sw,n,n,n,nw,sw,nw,n,n,n,n,nw,n,n,nw,n,s,n,n,sw,n,nw,ne,s,nw,ne,n,n,n,ne,s,n,n,n,n,n,n,se,nw,nw,n,n,nw,n,n,s,se,n,nw,n,n,n,n,n,n,nw,n,n,n,nw,nw,nw,n,n,n,nw,nw,sw,n,se,n,s,n,n,n,n,n,n,ne,n,se,n,n,n,se,n,nw,n,nw,n,n,n,n,n,n,n,nw,n,n,n,n,n,n,ne,n,n,nw,n,n,sw,n,nw,n,n,sw,n,n,n,nw,se,n,n,n,nw,n,s,n,n,n,n,n,n,n,s,n,n,n,n,nw,n,n,sw,sw,nw,n,nw,nw,sw,n,n,n,n,n,n,n,n,n,n,n,s,n,n,n,n,nw,n,n,n,n,n,n,n,s,n,nw,n,sw,nw,ne,n,nw,n,sw,n,n,n,n,n,ne,n,nw,n,n,n,n,n,n,n,ne,n,n,n,n,ne,n,n,n,n,ne,n,n,n,n,ne,n,n,s,n,n,se,n,n,n,n,n,n,n,nw,n,ne,nw,sw,ne,nw,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,ne,n,n,se,n,ne,n,ne,n,n,n,n,ne,n,ne,se,s,ne,nw,se,n,n,n,n,se,n,n,ne,s,n,nw,n,nw,nw,n,n,n,n,n,n,n,n,n,s,n,n,n,s,n,nw,n,n,n,n,n,n,n,n,n,n,n,n,ne,ne,n,sw,n,se,n,n,n,n,n,n,n,se,n,ne,n,ne,n,n,n,n,n,sw,n,n,s,nw,n,n,n,ne,ne,n,n,n,ne,n,n,se,n,n,n,n,ne,n,n,n,s,n,se,ne,n,n,n,n,n,n,n,n,s,ne,s,nw,n,ne,s,ne,n,n,n,n,ne,n,n,n,n,n,n,n,n,n,ne,n,se,ne,n,ne,ne,ne,ne,se,n,n,ne,n,n,n,n,nw,ne,sw,ne,n,n,ne,se,n,n,n,n,se,n,n,n,ne,n,se,ne,ne,n,s,nw,n,ne,n,n,nw,n,n,ne,n,ne,n,sw,n,se,n,ne,nw,ne,ne,n,n,n,ne,n,ne,nw,n,s,n,n,n,n,ne,n,n,ne,n,nw,n,n,n,ne,n,s,n,n,n,n,n,ne,ne,n,sw,n,ne,n,n,sw,ne,n,ne,ne,n,n,ne,ne,ne,n,ne,ne,ne,n,ne,sw,n,n,ne,ne,ne,se,n,ne,ne,ne,n,nw,n,ne,n,n,n,ne,n,n,n,n,n,ne,ne,n,ne,s,nw,ne,n,ne,ne,ne,n,n,n,n,nw,n,n,ne,ne,ne,n,ne,n,ne,ne,n,ne,ne,nw,ne,nw,n,n,ne,se,ne,se,ne,n,nw,n,n,s,n,se,ne,ne,n,ne,n,ne,s,n,n,sw,ne,ne,se,n,ne,n,n,n,n,sw,ne,ne,nw,n,n,ne,ne,ne,n,ne,n,sw,ne,ne,ne,ne,n,ne,se,ne,sw,n,n,n,ne,ne,sw,ne,ne,ne,n,ne,ne,n,ne,se,ne,s,nw,n,sw,n,ne,n,n,n,n,ne,n,sw,ne,ne,nw,n,ne,se,ne,ne,ne,ne,n,ne,ne,n,ne,n,ne,ne,ne,n,s,s,ne,ne,ne,s,ne,ne,ne,sw,n,n,ne,n,s,ne,n,n,nw,n,se,sw,ne,ne,ne,s,n,n,ne,ne,n,ne,ne,nw,ne,ne,ne,s,se,ne,ne,ne,n,ne,nw,n,ne,ne,sw,n,n,ne,ne,ne,n,ne,ne,se,ne,ne,n,ne,ne,ne,sw,s,n,n,n,se,n,s,ne,ne,ne,sw,ne,ne,se,ne,ne,ne,ne,ne,n,s,se,ne,ne,ne,n,ne,sw,se,s,ne,n,ne,ne,n,n,n,ne,n,ne,ne,se,ne,ne,n,ne,ne,ne,ne,ne,s,ne,ne,ne,nw,ne,ne,ne,ne,ne,n,ne,s,ne,ne,ne,n,ne,sw,n,n,n,ne,ne,n,ne,s,n,n,n,ne,ne,n,ne,ne,ne,sw,se,sw,ne,ne,s,ne,nw,ne,nw,se,nw,n,ne,se,n,ne,ne,ne,ne,ne,s,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,sw,ne,ne,ne,ne,n,ne,s,ne,s,ne,ne,n,ne,se,ne,ne,nw,n,ne,ne,ne,s,ne,sw,ne,n,ne,ne,n,ne,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,se,n,ne,ne,ne,ne,sw,ne,n,ne,se,ne,ne,ne,se,se,ne,sw,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,sw,n,n,ne,ne,n,ne,ne,ne,sw,ne,n,ne,ne,ne,se,ne,ne,nw,nw,sw,ne,ne,ne,ne,sw,ne,se,ne,n,ne,ne,ne,nw,ne,ne,ne,ne,ne,ne,sw,ne,ne,nw,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,s,sw,ne,ne,s,sw,sw,ne,ne,ne,nw,ne,n,se,ne,ne,ne,ne,n,ne,nw,ne,ne,n,se,ne,ne,ne,ne,sw,ne,ne,s,ne,s,ne,ne,ne,ne,ne,ne,ne,nw,ne,ne,ne,se,ne,ne,ne,ne,se,ne,ne,sw,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,n,ne,ne,nw,se,ne,ne,ne,ne,se,ne,sw,ne,ne,n,se,ne,ne,se,ne,sw,nw,ne,ne,ne,ne,ne,ne,s,ne,se,sw,ne,s,ne,se,se,se,ne,ne,s,ne,ne,s,ne,se,ne,nw,s,ne,se,ne,ne,ne,sw,ne,se,ne,ne,ne,sw,ne,ne,ne,ne,nw,ne,ne,nw,ne,ne,s,ne,ne,se,ne,ne,nw,ne,ne,se,se,se,ne,se,ne,se,se,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,s,se,ne,sw,ne,ne,se,ne,ne,ne,ne,s,ne,se,ne,ne,se,n,n,ne,se,s,ne,s,ne,se,nw,ne,se,ne,ne,se,ne,se,se,ne,ne,se,ne,ne,ne,n,se,ne,ne,ne,ne,s,se,se,n,ne,se,se,ne,ne,se,ne,se,se,ne,ne,sw,ne,ne,ne,ne,ne,ne,nw,se,ne,se,ne,se,ne,s,ne,ne,sw,ne,ne,ne,ne,ne,s,se,ne,ne,s,se,ne,ne,ne,nw,ne,ne,ne,se,ne,ne,ne,ne,se,ne,ne,ne,se,ne,ne,se,n,s,ne,ne,ne,se,s,n,se,se,ne,sw,ne,ne,n,ne,se,n,se,n,s,n,s,ne,se,ne,ne,ne,ne,n,ne,se,sw,se,ne,se,nw,ne,ne,ne,se,se,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,n,s,ne,ne,ne,ne,ne,se,se,se,se,sw,s,n,ne,s,ne,ne,sw,se,se,ne,ne,ne,ne,ne,ne,se,se,s,ne,se,ne,nw,n,ne,se,se,ne,se,ne,ne,se,se,se,ne,ne,sw,se,nw,se,nw,se,se,se,se,ne,n,n,ne,se,se,ne,ne,se,ne,ne,se,ne,sw,ne,se,ne,n,se,nw,sw,ne,ne,se,ne,se,ne,ne,se,ne,se,ne,ne,ne,se,ne,se,se,n,nw,ne,ne,ne,ne,sw,n,ne,ne,ne,nw,ne,se,se,ne,ne,s,nw,n,ne,ne,ne,ne,ne,ne,ne,se,ne,sw,se,ne,s,n,ne,ne,se,ne,se,se,ne,se,ne,se,se,ne,se,se,se,ne,ne,ne,ne,se,ne,ne,nw,ne,ne,se,ne,s,se,ne,se,ne,ne,ne,ne,n,se,ne,se,se,ne,ne,ne,nw,ne,se,se,nw,ne,se,se,ne,sw,ne,ne,ne,n,ne,ne,ne,n,se,ne,se,ne,n,n,se,ne,se,se,se,ne,se,se,sw,se,ne,se,ne,sw,sw,ne,ne,ne,s,n,ne,ne,nw,ne,n,se,se,se,se,ne,nw,ne,ne,ne,se,ne,se,n,n,se,n,se,se,se,se,se,ne,sw,ne,se,ne,se,se,se,ne,sw,se,s,se,se,ne,se,se,se,n,ne,se,se,ne,ne,ne,ne,se,se,ne,se,se,ne,se,ne,ne,se,se,nw,se,se,ne,se,sw,se,ne,n,ne,se,se,se,nw,se,se,se,ne,se,ne,se,se,ne,se,se,ne,ne,se,se,sw,ne,se,se,sw,se,se,s,ne,ne,se,ne,s,ne,se,se,ne,se,nw,n,se,se,s,se,ne,se,ne,ne,sw,ne,ne,n,se,s,n,ne,se,se,ne,nw,ne,ne,se,se,ne,sw,ne,ne,ne,se,sw,ne,se,se,ne,n,ne,se,nw,se,se,se,ne,se,ne,ne,ne,s,ne,nw,ne,ne,ne,se,se,se,nw,se,ne,se,se,se,ne,se,ne,se,se,se,ne,se,se,se,n,se,ne,ne,se,se,se,se,sw,ne,se,se,se,ne,se,nw,n,ne,ne,ne,sw,n,se,n,se,se,n,ne,se,se,ne,se,ne,se,nw,se,se,se,s,sw,ne,se,sw,se,se,se,ne,ne,se,ne,se,se,se,se,ne,se,se,ne,se,se,ne,se,ne,se,se,se,ne,se,ne,ne,se,s,ne,ne,nw,se,ne,n,ne,se,se,ne,se,n,ne,ne,se,ne,se,se,se,ne,se,se,ne,ne,nw,ne,s,se,se,se,se,se,sw,nw,n,se,se,s,se,se,se,nw,se,n,nw,se,ne,ne,se,nw,se,se,se,se,se,se,ne,se,se,se,se,ne,se,se,ne,se,se,se,se,ne,se,se,se,se,ne,ne,nw,se,se,se,se,se,se,s,se,se,se,n,se,ne,ne,ne,se,se,se,se,ne,se,se,ne,se,se,se,se,se,se,se,sw,se,ne,ne,s,se,se,ne,se,se,se,ne,ne,se,se,se,se,se,se,se,s,sw,se,ne,se,se,se,se,se,se,se,n,se,sw,se,se,se,ne,se,se,se,se,se,s,ne,nw,se,se,se,se,se,sw,se,ne,sw,se,ne,se,se,ne,se,ne,n,ne,se,se,ne,se,ne,nw,nw,se,se,se,se,se,se,se,se,se,se,sw,se,se,se,se,se,nw,se,n,se,se,ne,se,se,nw,se,se,se,se,se,ne,nw,nw,se,se,se,se,ne,se,se,se,ne,ne,se,se,se,se,se,se,se,se,se,se,se,sw,s,se,se,ne,nw,se,se,se,se,se,se,sw,sw,se,ne,sw,nw,se,se,se,se,se,n,se,se,ne,se,se,se,nw,se,ne,se,se,se,s,se,se,n,se,se,ne,se,s,se,ne,se,se,se,nw,se,se,n,se,s,n,se,se,se,nw,se,s,sw,se,ne,se,se,se,nw,se,sw,se,se,sw,s,se,n,ne,sw,se,n,nw,se,ne,se,se,se,se,se,ne,se,se,se,se,se,se,se,se,n,se,se,se,ne,se,se,se,sw,se,nw,se,se,ne,se,se,se,se,n,ne,se,se,se,n,se,se,se,se,se,se,se,s,s,se,se,se,s,ne,se,se,se,se,se,se,se,se,se,se,se,se,se,n,n,se,se,se,se,se,se,se,se,se,s,sw,se,se,se,n,nw,se,se,se,se,ne,ne,se,n,se,se,sw,ne,sw,se,se,ne,se,se,se,se,se,se,se,se,se,n,se,nw,se,se,se,sw,s,se,se,se,se,se,se,se,se,ne,s,se,se,se,nw,s,se,n,se,se,se,s,sw,se,se,se,se,nw,ne,se,se,se,ne,s,se,sw,se,se,se,se,se,se,s,se,s,se,sw,se,ne,s,se,se,nw,se,se,nw,n,se,se,se,nw,nw,se,se,se,se,se,nw,s,se,se,ne,se,se,se,se,se,se,se,sw,se,se,se,se,se,se,nw,se,se,s,se,se,se,se,s,s,se,se,se,s,se,se,se,s,s,n,se,se,se,se,n,n,se,sw,nw,se,s,se,nw,se,s,nw,nw,se,s,se,se,se,se,se,se,sw,nw,se,se,s,se,se,se,se,se,se,n,n,ne,se,s,s,se,se,se,se,se,se,s,se,se,s,se,se,n,se,se,s,se,s,se,s,se,sw,se,se,sw,se,ne,se,sw,se,se,se,s,nw,se,ne,n,se,se,nw,se,ne,se,se,se,s,se,se,nw,se,s,se,se,se,nw,se,se,sw,s,se,s,se,se,nw,s,se,se,s,se,se,s,se,se,se,se,se,sw,s,se,se,s,sw,nw,ne,nw,se,nw,se,s,se,se,se,se,se,s,se,se,se,se,sw,s,sw,se,se,se,s,sw,sw,s,n,se,s,se,nw,se,se,se,ne,se,se,se,se,s,se,se,s,nw,s,se,s,nw,se,se,se,se,se,n,s,se,ne,n,se,se,nw,se,s,se,n,se,nw,s,s,s,se,nw,s,s,se,s,se,se,nw,s,se,s,se,se,se,se,n,se,se,s,se,se,se,se,s,s,se,s,se,se,s,se,s,n,se,n,se,se,s,se,s,se,se,s,s,se,se,s,se,se,se,se,s,s,s,se,s,nw,s,se,se,se,ne,nw,se,se,se,se,se,se,n,se,se,se,se,se,se,nw,se,se,se,s,s,nw,se,ne,se,s,se,ne,se,se,nw,se,se,se,sw,n,se,sw,se,se,nw,ne,s,se,sw,se,s,s,s,se,s,se,n,sw,sw,se,se,se,ne,se,s,se,sw,n,se,se,se,s,s,se,s,se,n,s,ne,se,se,s,se,se,s,sw,s,se,se,ne,s,n,se,se,se,s,s,s,se,se,s,s,ne,se,s,se,nw,se,s,se,se,s,s,s,se,n,se,se,ne,se,se,s,sw,se,s,ne,se,se,se,s,s,se,se,se,se,se,se,se,s,ne,se,s,se,s,s,nw,nw,s,s,nw,s,se,se,ne,se,se,se,n,s,s,s,s,se,se,s,s,s,nw,sw,se,s,s,n,se,s,s,s,s,n,s,se,s,s,se,sw,nw,nw,se,se,se,s,sw,se,se,se,s,se,se,s,s,s,se,ne,s,se,s,s,se,s,ne,se,se,se,se,se,se,ne,se,ne,s,se,se,se,se,se,s,s,s,n,se,nw,nw,s,se,sw,se,se,s,se,se,nw,s,s,s,ne,nw,se,se,se,n,ne,se,s,se,ne,se,ne,sw,ne,se,s,sw,se,se,se,s,s,ne,s,se,se,sw,s,s,s,s,se,se,s,s,se,s,se,s,s,nw,s,s,s,s,nw,nw,se,s,s,sw,s,se,nw,s,se,s,se,s,se,n,sw,n,ne,s,s,s,se,se,nw,s,n,se,s,s,s,se,s,s,s,sw,se,se,se,se,se,se,sw,s,nw,se,n,s,se,sw,nw,se,se,se,se,s,ne,se,ne,s,s,se,se,se,s,s,s,s,n,se,sw,n,s,nw,s,se,s,se,se,se,s,se,n,s,se,nw,se,s,s,se,se,se,s,s,n,s,se,s,s,s,se,se,se,s,s,s,s,se,se,s,se,s,s,s,s,s,s,s,s,nw,nw,se,n,sw,s,s,ne,s,nw,s,se,s,s,sw,s,se,nw,se,s,s,s,s,s,s,s,se,se,s,se,ne,s,se,se,se,s,s,s,se,ne,s,ne,s,s,se,s,s,ne,s,s,se,s,s,s,s,s,se,n,sw,n,s,se,se,s,s,nw,s,sw,se,n,s,se,s,s,sw,s,s,s,s,s,nw,s,nw,se,se,s,s,nw,se,s,s,sw,sw,s,se,se,s,s,s,se,s,se,s,s,s,se,s,ne,s,s,se,s,s,se,se,s,s,s,n,s,s,s,s,se,s,s,se,s,s,se,s,s,s,nw,se,s,s,se,se,se,nw,s,se,ne,s,se,s,n,nw,se,sw,se,se,s,se,s,se,s,sw,s,se,se,se,nw,s,s,s,s,sw,s,s,s,n,s,sw,s,s,se,se,se,s,se,s,s,s,se,s,se,se,nw,s,s,se,ne,s,se,s,se,se,se,s,s,s,s,se,s,s,s,s,se,s,s,s,s,se,se,se,sw,s,se,s,s,nw,s,s,se,s,se,s,se,s,sw,s,ne,s,s,ne,s,sw,s,s,ne,n,s,se,se,s,s,s,s,se,se,s,s,se,se,s,nw,s,s,n,s,ne,se,n,s,s,s,s,s,sw,s,n,n,s,s,sw,s,sw,n,se,s,s,s,s,nw,se,s,s,s,s,s,s,se,s,s,sw,s,s,s,se,se,ne,s,s,s,ne,se,se,s,s,se,n,n,se,n,sw,s,sw,se,nw,n,s,n,s,nw,sw,s,se,se,s,s,s,s,s,s,se,s,s,se,s,s,s,s,s,sw,se,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,nw,se,s,se,s,s,s,s,s,s,s,s,se,nw,se,s,s,s,se,nw,s,s,ne,s,s,se,se,sw,s,ne,n,s,s,se,n,ne,se,s,s,s,s,nw,s,ne,s,s,n,s,s,s,s,s,se,s,s,s,s,se,s,s,s,s,n,s,se,s,n,s,s,sw,s,s,s,s,s,se,s,ne,s,sw,n,s,n,se,nw,nw,s,sw,ne,n,sw,n,sw,s,n,s,se,s,se,s,s,s,s,nw,se,s,s,s,se,sw,s,se,s,s,s,s,s,s,s,s,s,s,se,ne,nw,s,s,s,s,s,se,s,ne,sw,se,se,sw,sw,s,se,ne,s,nw,ne,n,s,s,s,s,s,ne,s,s,s,s,se,s,se,s,s,s,sw,s,s,se,s,s,s,s,n,se,s,ne,s,s,s,s,se,sw,se,s,s,s,s,s,se,s,n,n,s,s,sw,s,s,s,s,ne,s,sw,s,s,s,s,s,sw,s,s,n,s,n,s,s,s,n,se,s,s,s,s,s,s,s,s,s,s,s,s,ne,sw,s,s,ne,nw,se,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,sw,s,s,s,nw,s,s,se,se,s,n,s,se,s,s,s,se,s,nw,s,s,s,s,se,s,s,s,ne,s,s,s,se,s,s,s,s,s,s,s,n,s,s,s,s,s,ne,s,s,s,s,sw,s,s,s,s,s,s,s,s,s,s,ne,se,sw,s,sw,s,s,s,se,s,s,s,s,se,s,s,ne,s,s,s,s,se,ne,se,n,n,n,n,n,n,nw,nw,sw,sw,nw,ne,sw,ne,sw,nw,sw,ne,s,sw,sw,sw,sw,s,sw,sw,sw,sw,nw,s,s,s,nw,s,s,s,se,s,se,s,se,n,se,s,se,s,nw,sw,se,se,s,se,se,se,se,se,se,se,se,nw,se,se,se,se,se,ne,se,ne,s,se,se,se,se,se,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,s,ne,nw,ne,sw,ne,ne,ne,ne,ne,ne,ne,se,n,ne,ne,se,ne,ne,n,se,ne,ne,sw,ne,ne,n,ne,n,ne,n,n,n,sw,n,ne,n,ne,ne,se,ne,n,n,ne,n,n,s,n,n,ne,n,s,n,n,n,s,n,ne,n,n,n,n,n,se,nw,nw,n,n,nw,n,se,n,sw,s,n,nw,nw,se,nw,nw,nw,nw,n,n,se,n,n,n,nw,nw,n,nw,ne,n,nw,n,nw,sw,nw,n,n,n,ne,se,nw,n,nw,n,n,nw,n,nw,n,nw,n,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,se,sw,sw,nw,se,nw,s,nw,nw,nw,nw,nw,nw,nw,s,n,nw,nw,nw,sw,nw,se,nw,nw,nw,nw,sw,sw,nw,s,sw,nw,s,nw,nw,sw,nw,sw,sw,nw,sw,se,nw,nw,nw,nw,nw,sw,sw,nw,se,sw,sw,nw,sw,nw,se,sw,nw,nw,nw,n,nw,sw,nw,ne,nw,nw,s,nw,nw,nw,sw,nw,sw,nw,sw,sw,sw,sw,n,sw,nw,s,sw,nw,ne,sw,n,s,sw,sw,sw,sw,ne,sw,sw,sw,nw,sw,s,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,se,n,sw,s,sw,sw,n,sw,sw,s,s,sw,sw,sw,sw,sw,sw,sw,s,s,sw,sw,sw,ne,sw,sw,s,sw,s,nw,sw,sw,se,se,sw,sw,sw,s,s,s,sw,nw,sw,nw,s,sw,sw,sw,sw,sw,sw,s,sw,s,sw,sw,sw,s,sw,s,sw,s,sw,s,sw,s,sw,s,sw,sw,s,s,s,sw,n,sw,s,n,s,s,s,sw,s,n,s,sw,sw,sw,sw,s,s,s,s,s,s,s,s,sw,sw,s,sw,se,s,s,s,sw,s,s,sw,s,ne,s,s,s,s,s,s,s,s,s,n,se,s,s,nw,s,s,s,se,s,s,s,s,s,s,s,s,s,s,nw,se,s,s,s,sw,s,s,s,s,s,s,s,ne,n,s,se,s,s,s,s,s,s,se,se,nw,nw,s,s,s,s,s,se,sw,s,s,se,s,s,s,s,s,s,s,s,s,n,s,s,s,se,s,s,s,s,s,s,se,s,s,n,s,s,se,s,s,s,se,s,s,nw,s,s,s,n,s,s,se,s,se,s,n,ne,s,s,s,s,se,ne,n,s,se,s,se,s,s,se,s,s,s,s,s,se,s,sw,s,se,se,s,se,s,s,ne,s,ne,ne,se,s,ne,se,s,ne,s,se,ne,se,se,se,sw,s,se,se,se,se,se,n,s,s,se,s,se,s,s,se,se,s,s,se,se,nw,se,ne,s,s,se,se,se,se,se,s,se,ne,se,se,s,ne,se,sw,se,se,se,se,se,nw,se,se,se,se,se,se,se,s,ne,n,sw,ne,se,se,se,se,se,ne,se,se,se,se,se,se,se,se,se,n,se,s,s,se,se,sw,nw,s,se,se,ne,se,se,ne,s,se,se,se,se,se,se,se,se,n,se,se,se,se,se,se,ne,se,ne,nw,se,se,ne,nw,nw,se,se,se,se,sw,se,se,sw,n,se,se,sw,se,se,se,se,nw,se,se,se,se,se,nw,sw,ne,ne,ne,se,ne,ne,nw,se,se,nw,se,se,se,se,se,ne,se,s,se,se,s,ne,se,ne,se,se,se,ne,s,se,ne,ne,ne,ne,se,n,se,se,se,se,ne,se,n,ne,se,se,se,ne,sw,nw,nw,ne,se,ne,se,se,se,n,se,ne,se,se,se,se,ne,se,ne,n,se,se,ne,nw,ne,ne,se,se,n,se,se,s,se,sw,ne,ne,ne,se,se,se,ne,ne,se,n,ne,ne,se,ne,ne,ne,se,ne,sw,n,ne,s,se,se,se,sw,ne,se,se,se,sw,ne,se,ne,ne,ne,sw,ne,ne,s,ne,s,ne,se,ne,ne,se,se,se,nw,ne,ne,ne,se,ne,se,se,ne,ne,ne,se,sw,ne,ne,ne,ne,se,ne,sw,ne,sw,ne,n,nw,sw,ne,ne,ne,sw,ne,se,sw,n,ne,ne,se,n,se,ne,n,ne,se,ne,nw,ne,ne,s,n,se,ne,nw,ne,se,ne,ne,ne,se,ne,se,se,ne,ne,ne,se,se,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,se,nw,ne,ne,ne,ne,ne,ne,ne,se,se,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,ne,ne,nw,n,n,ne,nw,ne,ne,sw,se,ne,s,ne,nw,ne,nw,ne,ne,ne,ne,ne,ne,ne,n,n,ne,n,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,n,n,ne,s,ne,ne,ne,nw,se,ne,nw,nw,ne,n,n,ne,ne,nw,ne,n,ne,sw,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,n,ne,ne,ne,ne,ne,n,n,ne,ne,se,sw,ne,n,ne,n,ne,ne,sw,ne,ne,n,ne,ne,se,sw,ne,ne,ne,s,n,n,s,ne,n,se,ne,ne,sw,s,ne,nw,n,n,ne,nw,ne,n,n,s,s,n,ne,ne,n,n,nw,ne,n,ne,ne,ne,ne,n,s,n,se,sw,se,ne,ne,ne,n,ne,ne,ne,n,nw,ne,n,n,ne,nw,ne,ne,sw,n,n,se,ne,n,ne,nw,se,ne,ne,ne,n,ne,ne,n,ne,ne,ne,ne,n,ne,ne,s,n,s,nw,n,n,nw,ne,ne,ne,s,n,s,ne,ne,ne,n,se,ne,ne,nw,n,n,n,ne,s,sw,n,ne,n,n,sw,n,ne,n,ne,n,sw,ne,ne,ne,sw,n,ne,ne,sw,ne,ne,n,n,se,ne,ne,s,ne,ne,n,nw,s,n,n,ne,nw,sw,ne,n,ne,ne,n,ne,n,se,n,ne,sw,sw,n,n,n,ne,ne,n,ne,n,s,ne,se,ne,nw,n,n,n,n,se,nw,ne,n,n,n,n,nw,nw,ne,n,se,ne,n,n,n,ne,n,ne,ne,ne,sw,n,s,n,n,n,ne,se,n,n,ne,ne,ne,sw,ne,ne,n,n,n,n,n,n,ne,ne,n,s,n,n,n,n,ne,ne,ne,n,n,se,nw,n,n,n,n,s,n,ne,n,n,n,n,n,n,n,n,sw,n,n,ne,n,n,n,s,ne,nw,n,nw,sw,n,n,n,n,s,n,n,ne,n,n,n,n,n,n,n,s,s,n,n,n,ne,n,s,ne,n,se,n,n,n,sw,n,n,n,ne,n,n,n,n,sw,se,n,sw,n,n,n,sw,ne,s,n,n,sw,s,se,n,n,n,n,ne,n,n,n,n,se,n,n,n,n,n,n,se,n,sw,n,n,n,nw,s,sw,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,sw,n,n,se,n,nw,se,n,sw,n,n,n,n,n,n,nw,n,n,n,n,n,n,n,n,n,sw,n,nw,ne,n,n,n,n,n,n,n,nw,n,n,sw,n,n,n,n,n,n,n,n,ne,n,s,n,n,n,n,ne,n,s,ne,n,n,n,nw,n,n,n,n,n,se,nw,n,nw,sw,n,n,ne,n,n,n,n,nw,n,n,n,nw,n,n,nw,n,n,n,sw,ne,n,n,s,n,n,n,nw,n,ne,n,nw,n,n,nw,nw,n,nw,nw,n,n,n,nw,sw,n,n,nw,sw,sw,n,nw,n,s,nw,n,s,nw,nw,nw,nw,n,se,n,n,nw,sw,nw,n,nw,n,ne,n,n,nw,nw,n,n,nw,nw,ne,nw,se,nw,nw,sw,n,n,n,sw,sw,n,n,n,n,nw,n,nw,nw,n,nw,n,nw,s,n,n,nw,nw,n,n,nw,n,sw,nw,nw,n,n,nw,nw,s,n,n,n,n,s,nw,n,n,n,n,n,nw,n,n,nw,n,nw,n,n,n,nw,n,n,nw,ne,nw,nw,n,nw,nw,n,n,nw,n,nw,ne,n,nw,n,n,nw,nw,nw,ne,nw,nw,n,n,n,se,ne,n,nw,n,n,n,n,nw,nw,n,nw,n,n,s,n,n,n,n,nw,n,nw,nw,se,n,nw,n,n,nw,nw,nw,nw,n,n,n,s,nw,nw,nw,ne,s,nw,nw,nw,n,nw,nw,sw,nw,n,nw,nw,n,nw,nw,n,nw,sw,n,n,n,n,nw,nw,nw,n,n,n,n,nw,n,n,ne,n,nw,nw,nw,ne,ne,n,n,nw,nw,ne,n,nw,nw,nw,nw,n,n,n,nw,nw,n,n,n,ne,nw,nw,nw,se,n,n,nw,n,nw,n,nw,nw,n,s,nw,n,nw,nw,nw,nw,nw,ne,nw,nw,n,nw,n,n,n,nw,n,n,ne,nw,n,n,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,s,nw,se,n,nw,nw,nw,nw,nw,nw,s,nw,n,nw,n,nw,nw,nw,nw,nw,se,s,nw,se,nw,n,ne,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,n,ne,nw,se,n,nw,nw,nw,nw,sw,n,nw,nw,n,nw,n,nw,nw,ne,n,nw,n,ne,se,n,sw,nw,ne,nw,sw,nw,ne,n,nw,ne,se,nw,nw,nw,nw,nw,ne,nw,n,nw,s,nw,nw,nw,ne,nw,nw,nw,nw,s,nw,n,sw,s,nw,se,nw,ne,nw,n,nw,sw,nw,n,nw,nw,sw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,s,s,nw,nw,nw,se,nw,nw,nw,nw,sw,nw,nw,n,nw,n,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,s,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,sw,nw,nw,nw,nw,nw,s,nw,sw,nw,nw,nw,se,nw,nw,sw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,n,nw,sw,n,nw,nw,se,nw,sw,se,sw,nw,sw,n,nw,nw,nw,ne,nw,nw,nw,se,nw,ne,ne,nw,nw,nw,nw,nw,s,nw,ne,n,nw,ne,nw,sw,nw,nw,sw,nw,se,nw,nw,ne,nw,ne,nw,sw,sw,nw,nw,nw,nw,nw,sw,nw,nw,nw,se,sw,sw,nw,s,sw,nw,nw,nw,nw,ne,nw,ne,nw,nw,sw,nw,nw,nw,sw,nw,nw,nw,ne,sw,s,nw,ne,nw,nw,se,se,n,nw,nw,sw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,n,nw,sw,nw,s,nw,sw,nw,nw,nw,sw,nw,nw,se,nw,n,nw,nw,ne,sw,nw,ne,nw,se,s,nw,nw,n,n,nw,nw,nw,nw,ne,se,sw,nw,nw,nw,nw,se,nw,nw,nw,sw,nw,nw,s,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,sw,nw,sw,nw,nw,nw,ne,nw,sw,nw,sw,nw,sw,nw,nw,nw,sw,ne,sw,nw,sw,nw,se,nw,se,nw,nw,n,sw,nw,nw,nw,nw,sw,sw,nw,sw,nw,nw,nw,nw,nw,sw,sw,nw,nw,nw,sw,nw,nw,nw,nw,ne,nw,n,nw,sw,sw,nw,nw,nw,nw,nw,sw,nw,sw,nw,sw,sw,nw,nw,sw,sw,nw,nw,nw,ne,sw,ne,sw,sw,nw,nw,nw,nw,nw,nw,nw,sw,se,nw,nw,sw,sw,nw,nw,s,nw,nw,sw,nw,nw,nw,n,sw,nw,sw,sw,n,nw,nw,nw,sw,sw,nw,s,nw,nw,nw,sw,nw,n,nw,nw,nw,nw,n,nw,nw,nw,sw,nw,nw,nw,se,nw,sw,nw,sw,sw,sw,nw,ne,s,sw,ne,nw,nw,s,nw,sw,nw,s,nw,sw,sw,sw,s,nw,se,nw,nw,nw,sw,sw,sw,n,nw,sw,nw,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,s,nw,nw,nw,nw,sw,sw,sw,nw,nw,sw,sw,nw,nw,sw,nw,sw,nw,sw,sw,sw,sw,sw,nw,sw,s,nw,nw,sw,sw,sw,sw,nw,sw,sw,nw,sw,ne,nw,sw,nw,nw,sw,nw,se,nw,n,sw,nw,s,nw,ne,nw,se,sw,sw,sw,n,ne,sw,nw,sw,nw,sw,se,s,sw,sw,nw,sw,sw,nw,nw,sw,nw,sw,nw,sw,nw,sw,sw,sw,ne,sw,nw,sw,nw,nw,sw,nw,n,nw,sw,n,nw,nw,sw,sw,se,nw,nw,sw,nw,sw,sw,sw,sw,nw,se,sw,sw,nw,nw,sw,sw,sw,nw,sw,nw,nw,nw,sw,n,nw,sw,n,nw,s,nw,nw,sw,s,sw,sw,nw,sw,sw,sw,nw,ne,sw,ne,sw,s,sw,sw,nw,sw,nw,sw,sw,nw,nw,nw,sw,sw,ne,sw,n,sw,sw,sw,n,sw,sw,nw,sw,n,n,sw,sw,sw,nw,s,sw,sw,sw,sw,nw,nw,sw,nw,sw,sw,s,sw,nw,sw,sw,sw,s,sw,sw,n,nw,sw,n,sw,sw,s,nw,se,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,nw,sw,sw,sw,s,ne,se,nw,sw,sw,sw,nw,s,nw,nw,sw,sw,s,sw,sw,se,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,nw,nw,nw,nw,nw,nw,s,nw,sw,sw,sw,sw,sw,n,sw,se,sw,sw,s,sw,sw,sw,sw,sw,s,s,sw,s,ne,sw,sw,s,sw,sw,nw,sw,sw,sw,sw,ne,se,sw,sw,sw,sw,n,sw,sw,sw,sw,s,nw,se,nw,nw,sw,sw,nw,se,nw,se,sw,sw,sw,nw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,ne,se,nw,nw,sw,sw,sw,n,s,sw,nw,nw,se,sw,ne,sw,n,s,sw,sw,nw,sw,sw,sw,sw,sw,ne,sw,sw,se,sw,sw,sw,sw,nw,sw,sw,ne,sw,nw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,se,sw,se,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,se,sw,sw,se,sw,sw,ne"""
print(solution_part_one(puzzle_input))
print(solution_part_two(puzzle_input))
| 1,697 | 124 | 68 |
0edbf4856f3f5fc8d8eca3650fd3de2cd8ef2372 | 3,729 | py | Python | Metric/logger.py | ZM-Zhou/MDE_Platform_Pytorch | d86efe061bf14a6eed3352cc45e1437e46c138b1 | [
"MIT"
] | null | null | null | Metric/logger.py | ZM-Zhou/MDE_Platform_Pytorch | d86efe061bf14a6eed3352cc45e1437e46c138b1 | [
"MIT"
] | null | null | null | Metric/logger.py | ZM-Zhou/MDE_Platform_Pytorch | d86efe061bf14a6eed3352cc45e1437e46c138b1 | [
"MIT"
] | null | null | null | import time
import os
import json
import shutil
import numpy as np
import cv2 as cv
import torch
import torch.nn as nn
import torch.nn.functional as F
from Utils.import_choice import import_module
from Utils.visualization import VisualImage, make_output_img
def sec_to_hm(t):
"""Convert time in seconds to time in hours, minutes and seconds
e.g. 10239 -> (2, 50, 39)
"""
t = int(t)
s = t % 60
t //= 60
m = t % 60
t //= 60
return t, m, s
def sec_to_hm_str(t):
"""Convert time in seconds to a nice string
e.g. 10239 -> '02h50m39s'
"""
h, m, s = sec_to_hm(t)
return "{:02d}h{:02d}m{:02d}s".format(h, m, s)
| 32.426087 | 84 | 0.540896 | import time
import os
import json
import shutil
import numpy as np
import cv2 as cv
import torch
import torch.nn as nn
import torch.nn.functional as F
from Utils.import_choice import import_module
from Utils.visualization import VisualImage, make_output_img
def sec_to_hm(t):
"""Convert time in seconds to time in hours, minutes and seconds
e.g. 10239 -> (2, 50, 39)
"""
t = int(t)
s = t % 60
t //= 60
m = t % 60
t //= 60
return t, m, s
def sec_to_hm_str(t):
"""Convert time in seconds to a nice string
e.g. 10239 -> '02h50m39s'
"""
h, m, s = sec_to_hm(t)
return "{:02d}h{:02d}m{:02d}s".format(h, m, s)
class MetricLog():
def __init__(self, weights_path, data_name):
self.weights_path = weights_path
path_split = weights_path.split("/")
log_dir = ""
for name in path_split[:-2]:
log_dir += name + '/'
self.log_path = os.path.join(log_dir,
"{}_depth_".format(data_name) + path_split[-1])
if not os.path.isdir(self.log_path):
os.makedirs(self.log_path)
self.output_file = os.path.join(self.log_path, 'output.txt')
self.visual_tool = VisualImage()
self.device = "cpu"
self.is_train = False
self.output_count = 0
def log_print(self, arg, *args, **kargs):
with open(self.output_file, "a+") as f:
if kargs:
print(arg, end=kargs["end"])
f.write(arg + kargs["end"])
else:
print(arg)
f.write(arg + "\n")
def make_logdir(self, dir_name):
path = os.path.join(self.log_path, dir_name)
if not os.path.isdir(path):
os.makedirs(path)
def do_visualizion(self, dir_name, imgs, visual_modes, size, name=""):
if name == "":
save_path = os.path.join(self.log_path, dir_name,
str(self.output_count) + ".png")
self.output_count += 1
else:
save_path = os.path.join(self.log_path, dir_name,
name + ".png")
for k, v in imgs.items():
tar_img = v
break
_, _, h, w = tar_img.shape
for idx, (k, v) in enumerate(imgs.items()):
v = F.interpolate(v, [h, w], mode="bilinear",
align_corners=False)
v = v[0, ...].cpu().permute(1, 2, 0).numpy()
imgs[k] = self.visual_tool.do_visualize(v, visual_modes[idx])
img = make_output_img(imgs, size)
cv.imwrite(save_path, img)
# for trainer
def do_log_all_test(self, use_time, total, data_num):
self.log_print('Done in {} seconds!'.format(sec_to_hm_str(use_time)))
for k, v, in total.items():
total[k] = v / data_num
self.log_print("-->{}: {}".format(k, total[k]))
def load_models(self, networks):
"""Load model(s) from disk
"""
self.log_print("Loading pretrained opts")
for n, v in networks.items():
self.log_print("Loading {} weights...".format(n))
path = os.path.join(self.weights_path, "{}.pth".format(n))
model_dict = networks[n].state_dict()
try:
pretrained_dict = torch.load(path)
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict}
model_dict.update(pretrained_dict)
networks[n].load_state_dict(model_dict)
except:
self.log_print("{} is randomly initialized".format(n))
return networks
| 2,104 | 937 | 23 |
573a6fce8cc0764713b54343ab5e8d135b196d42 | 2,471 | py | Python | flask_app/models/recipe.py | Benton-Michael/recipes | 86e4a32b620a43830b300576be64e57b0fb24ad4 | [
"MIT"
] | null | null | null | flask_app/models/recipe.py | Benton-Michael/recipes | 86e4a32b620a43830b300576be64e57b0fb24ad4 | [
"MIT"
] | null | null | null | flask_app/models/recipe.py | Benton-Michael/recipes | 86e4a32b620a43830b300576be64e57b0fb24ad4 | [
"MIT"
] | null | null | null | from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
| 37.439394 | 192 | 0.622015 | from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
class Recipe:
db_name = 'recipes'
def __init__(self,db_data):
self.id = db_data['id']
self.name = db_data['name']
self.description = db_data['description']
self.instructions = db_data['instructions']
self.under30 = db_data['under30']
self.date_made = db_data['date_made']
self.user_id = db_data['user_id']
self.created_at = db_data['created_at']
self.updated_at = db_data['updated_at']
@classmethod
def save(cls,data):
query = "INSERT INTO recipes (name, description, instructions, under30, date_made, user_id) VALUES (%(name)s,%(description)s,%(instructions)s,%(under30)s,%(date_made)s,%(user_id)s);"
return connectToMySQL(cls.db_name).query_db(query, data)
@classmethod
def get_all(cls):
query = "SELECT * FROM recipes;"
results = connectToMySQL(cls.db_name).query_db(query)
all_recipes = []
for row in results:
print(row['date_made'])
all_recipes.append( cls(row) )
return all_recipes
@classmethod
def get_one(cls,data):
query = "SELECT * FROM recipes WHERE id = %(id)s;"
results = connectToMySQL(cls.db_name).query_db(query,data)
return cls( results[0] )
@classmethod
def update(cls, data):
query = "UPDATE recipes SET name=%(name)s, description=%(description)s, instructions=%(instructions)s, under30=%(under30)s, date_made=%(date_made)s,updated_at=NOW() WHERE id = %(id)s;"
return connectToMySQL(cls.db_name).query_db(query,data)
@classmethod
def destroy(cls,data):
query = "DELETE FROM recipes WHERE id = %(id)s;"
return connectToMySQL(cls.db_name).query_db(query,data)
@staticmethod
def validate_recipe(recipe):
is_valid = True
if len(recipe['name']) < 3:
is_valid = False
flash("Name must be at least 3 characters","recipe")
if len(recipe['instructions']) < 3:
is_valid = False
flash("Instructions must be at least 3 characters","recipe")
if len(recipe['description']) < 3:
is_valid = False
flash("Description must be at least 3 characters","recipe")
if recipe['date_made'] == "":
is_valid = False
flash("Please enter a date","recipe")
return is_valid
| 2,047 | 316 | 23 |
680f8d81c64768930a68b46fa87c28d4ef3f1453 | 15,200 | py | Python | Tests/Application.py | BigFriendly/tm1py | 03210d672cc3797025b8de80c42037e1e11f369f | [
"MIT"
] | null | null | null | Tests/Application.py | BigFriendly/tm1py | 03210d672cc3797025b8de80c42037e1e11f369f | [
"MIT"
] | null | null | null | Tests/Application.py | BigFriendly/tm1py | 03210d672cc3797025b8de80c42037e1e11f369f | [
"MIT"
] | null | null | null | import configparser
import os
import random
import unittest
from _datetime import datetime
from TM1py import TM1Service, Element, ElementAttribute, Hierarchy, Dimension, Cube, NativeView, AnonymousSubset, \
Subset, Process, Chore, ChoreStartTime, ChoreFrequency, ChoreTask
from TM1py.Objects.Application import CubeApplication, ApplicationTypes, ChoreApplication, DimensionApplication, \
FolderApplication, LinkApplication, ProcessApplication, SubsetApplication, ViewApplication, DocumentApplication
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini'))
# Hard coded stuff
PREFIX = 'TM1py_Tests_Applications_'
TM1PY_APP_FOLDER = PREFIX + "RootFolder"
APPLICATION_NAME = PREFIX + "Application"
CUBE_NAME = PREFIX + "Cube"
VIEW_NAME = PREFIX + "View"
SUBSET_NAME = PREFIX + "Subset"
PROCESS_NAME = PREFIX + "Process"
CHORE_NAME = PREFIX + "Chore"
FOLDER_NAME = PREFIX + "Folder"
LINK_NAME = PREFIX + "Link"
DOCUMENT_NAME = PREFIX + "Document"
DIMENSION_NAMES = [
PREFIX + 'Dimension1',
PREFIX + 'Dimension2',
PREFIX + 'Dimension3']
| 44.705882 | 120 | 0.684013 | import configparser
import os
import random
import unittest
from _datetime import datetime
from TM1py import TM1Service, Element, ElementAttribute, Hierarchy, Dimension, Cube, NativeView, AnonymousSubset, \
Subset, Process, Chore, ChoreStartTime, ChoreFrequency, ChoreTask
from TM1py.Objects.Application import CubeApplication, ApplicationTypes, ChoreApplication, DimensionApplication, \
FolderApplication, LinkApplication, ProcessApplication, SubsetApplication, ViewApplication, DocumentApplication
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini'))
# Hard coded stuff
PREFIX = 'TM1py_Tests_Applications_'
TM1PY_APP_FOLDER = PREFIX + "RootFolder"
APPLICATION_NAME = PREFIX + "Application"
CUBE_NAME = PREFIX + "Cube"
VIEW_NAME = PREFIX + "View"
SUBSET_NAME = PREFIX + "Subset"
PROCESS_NAME = PREFIX + "Process"
CHORE_NAME = PREFIX + "Chore"
FOLDER_NAME = PREFIX + "Folder"
LINK_NAME = PREFIX + "Link"
DOCUMENT_NAME = PREFIX + "Document"
DIMENSION_NAMES = [
PREFIX + 'Dimension1',
PREFIX + 'Dimension2',
PREFIX + 'Dimension3']
class TestDataMethods(unittest.TestCase):
tm1 = None
# Setup Cubes, Dimensions and Subsets
@classmethod
def setup_class(cls):
# Connection to TM1
cls.tm1 = TM1Service(**config['tm1srv01'])
@classmethod
def setUpClass(cls) -> None:
# Build Dimensions
for dimension_name in DIMENSION_NAMES:
elements = [Element('Element {}'.format(str(j)), 'Numeric') for j in range(1, 1001)]
element_attributes = [ElementAttribute("Attr1", "String"),
ElementAttribute("Attr2", "Numeric"),
ElementAttribute("Attr3", "Numeric")]
hierarchy = Hierarchy(dimension_name=dimension_name,
name=dimension_name,
elements=elements,
element_attributes=element_attributes)
dimension = Dimension(dimension_name, [hierarchy])
if cls.tm1.dimensions.exists(dimension.name):
cls.tm1.dimensions.update(dimension)
else:
cls.tm1.dimensions.create(dimension)
# Build Cube
cube = Cube(CUBE_NAME, DIMENSION_NAMES)
if not cls.tm1.cubes.exists(CUBE_NAME):
cls.tm1.cubes.create(cube)
# Build cube view
view = NativeView(
cube_name=CUBE_NAME,
view_name=VIEW_NAME,
suppress_empty_columns=True,
suppress_empty_rows=True)
view.add_row(
dimension_name=DIMENSION_NAMES[0],
subset=AnonymousSubset(
dimension_name=DIMENSION_NAMES[0],
expression='{[' + DIMENSION_NAMES[0] + '].Members}'))
view.add_row(
dimension_name=DIMENSION_NAMES[1],
subset=AnonymousSubset(
dimension_name=DIMENSION_NAMES[1],
expression='{[' + DIMENSION_NAMES[1] + '].Members}'))
view.add_column(
dimension_name=DIMENSION_NAMES[2],
subset=AnonymousSubset(
dimension_name=DIMENSION_NAMES[2],
expression='{[' + DIMENSION_NAMES[2] + '].Members}'))
if not cls.tm1.cubes.views.exists(CUBE_NAME, view.name, private=False):
cls.tm1.cubes.views.create(
view=view,
private=False)
# Build subset
subset = Subset(SUBSET_NAME, DIMENSION_NAMES[0], DIMENSION_NAMES[0], None, None, ["Element 1"])
if cls.tm1.dimensions.hierarchies.subsets.exists(
subset.name,
subset.dimension_name,
subset.hierarchy_name,
False):
cls.tm1.dimensions.hierarchies.subsets.delete(
subset.name,
subset.dimension_name,
subset.hierarchy_name,
False)
cls.tm1.dimensions.hierarchies.subsets.create(subset, False)
# Build process
p1 = Process(name=PROCESS_NAME)
p1.add_parameter('pRegion', 'pRegion (String)', value='US')
if cls.tm1.processes.exists(p1.name):
cls.tm1.processes.delete(p1.name)
cls.tm1.processes.create(p1)
# Build chore
c1 = Chore(
name=CHORE_NAME,
start_time=ChoreStartTime(datetime.now().year, datetime.now().month, datetime.now().day,
datetime.now().hour, datetime.now().minute, datetime.now().second),
dst_sensitivity=False,
active=True,
execution_mode=Chore.MULTIPLE_COMMIT,
frequency=ChoreFrequency(
days=int(random.uniform(0, 355)),
hours=int(random.uniform(0, 23)),
minutes=int(random.uniform(0, 59)),
seconds=int(random.uniform(0, 59))),
tasks=[ChoreTask(0, PROCESS_NAME, parameters=[{'Name': 'pRegion', 'Value': 'UK'}])])
cls.tm1.chores.create(c1)
# create Folder
app = FolderApplication("", TM1PY_APP_FOLDER)
cls.tm1.applications.create(application=app, private=False)
@classmethod
def tearDownClass(cls) -> None:
# delete view
cls.tm1.cubes.views.delete(CUBE_NAME, VIEW_NAME, False)
# delete cube
cls.tm1.cubes.delete(CUBE_NAME)
# delete dimensions
for dimension_name in DIMENSION_NAMES:
cls.tm1.dimensions.delete(dimension_name)
# delete chore
cls.tm1.chores.delete(CHORE_NAME)
# delete process
cls.tm1.processes.delete(PROCESS_NAME)
# delete folder
cls.tm1.applications.delete(
path="",
application_type=ApplicationTypes.FOLDER,
application_name=TM1PY_APP_FOLDER,
private=False)
def run_test_cube_application(self, private):
app = CubeApplication(TM1PY_APP_FOLDER, APPLICATION_NAME, CUBE_NAME)
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.CUBE, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.CUBE, private=private)
self.assertFalse(exists)
@unittest.skip
def test_cube_application_private(self):
self.run_test_cube_application(private=True)
def test_cube_application_public(self):
self.run_test_cube_application(private=False)
def run_test_chore_application(self, private):
app = ChoreApplication(TM1PY_APP_FOLDER, APPLICATION_NAME, CHORE_NAME)
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.CHORE, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.CHORE, private=private)
self.assertFalse(exists)
@unittest.skip
def test_chore_application_private(self):
self.run_test_chore_application(True)
def test_chore_application_public(self):
self.run_test_chore_application(False)
def run_test_dimension_application(self, private=False):
app = DimensionApplication(TM1PY_APP_FOLDER, APPLICATION_NAME, DIMENSION_NAMES[0])
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.DIMENSION, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.DIMENSION, private=private)
self.assertFalse(exists)
@unittest.skip
def test_dimension_application_private(self):
self.run_test_dimension_application(private=True)
def test_dimension_application_public(self):
self.run_test_dimension_application(private=False)
def run_test_document_application(self, private):
with open(r"resources\document.xlsx", "rb") as file:
app = DocumentApplication(path=TM1PY_APP_FOLDER, name=DOCUMENT_NAME, content=file.read())
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.DOCUMENT, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.DOCUMENT, private=private)
self.assertFalse(exists)
def test_document_application_private(self):
self.run_test_document_application(private=True)
def test_document_application_public(self):
self.run_test_document_application(private=False)
def run_test_folder_application(self, private):
app = FolderApplication(TM1PY_APP_FOLDER, "not_relevant")
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.FOLDER, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.FOLDER, private=private)
self.assertFalse(exists)
def test_run_folder_application_private(self):
self.run_test_folder_application(private=True)
def test_run_folder_application_public(self):
self.run_test_folder_application(private=False)
def run_test_link_application(self, private):
app = LinkApplication(TM1PY_APP_FOLDER, APPLICATION_NAME, LINK_NAME)
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.LINK, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.LINK, private=private)
self.assertFalse(exists)
def test_run_link_application_private(self):
self.run_test_link_application(True)
def test_run_link_application_public(self):
self.run_test_link_application(False)
def run_test_process_application(self, private):
app = ProcessApplication(TM1PY_APP_FOLDER, APPLICATION_NAME, PROCESS_NAME)
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.PROCESS, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.PROCESS, private=private)
self.assertFalse(exists)
@unittest.skip
def test_process_application_private(self):
self.run_test_process_application(True)
def test_process_application_public(self):
self.run_test_process_application(False)
def run_test_subset_application(self, private):
app = SubsetApplication(TM1PY_APP_FOLDER, APPLICATION_NAME, DIMENSION_NAMES[0], DIMENSION_NAMES[0], SUBSET_NAME)
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.SUBSET, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.SUBSET, private=private)
self.assertFalse(exists)
@unittest.skip
def test_subset_application_private(self):
self.run_test_subset_application(True)
def test_subset_application_public(self):
self.run_test_subset_application(False)
def run_test_view_application(self, private):
app = ViewApplication(TM1PY_APP_FOLDER, APPLICATION_NAME, CUBE_NAME, VIEW_NAME)
self.tm1.applications.create(application=app, private=private)
app_retrieved = self.tm1.applications.get(app.path, app.application_type, app.name, private=private)
self.assertEqual(app, app_retrieved)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.VIEW, private=private)
self.assertTrue(exists)
self.tm1.applications.delete(app.path, app.application_type, app.name, private=private)
exists = self.tm1.applications.exists(
app.path, name=app.name, application_type=ApplicationTypes.VIEW, private=private)
self.assertFalse(exists)
@unittest.skip
def test_view_application_private(self):
self.run_test_view_application(True)
def test_view_application_public(self):
self.run_test_view_application(False)
| 13,003 | 1,052 | 23 |
b233161373558bab9922ed14cc69db5f313c6bba | 141 | py | Python | odoo/custom/src/private/fieldservice_worker_geolocalize/models/__init__.py | mtelahun/odoo-geo | 1473dabfa8bc082a552cbb88981635d9eb358dce | [
"BSL-1.0"
] | null | null | null | odoo/custom/src/private/fieldservice_worker_geolocalize/models/__init__.py | mtelahun/odoo-geo | 1473dabfa8bc082a552cbb88981635d9eb358dce | [
"BSL-1.0"
] | null | null | null | odoo/custom/src/private/fieldservice_worker_geolocalize/models/__init__.py | mtelahun/odoo-geo | 1473dabfa8bc082a552cbb88981635d9eb358dce | [
"BSL-1.0"
] | null | null | null | # Copyright (C) 2021 TREVI Software
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# flake8: noqa
from . import fsm_person
| 23.5 | 63 | 0.723404 | # Copyright (C) 2021 TREVI Software
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# flake8: noqa
from . import fsm_person
| 0 | 0 | 0 |
54d628d4d3ddf9311d90625573627f6cc737d717 | 363 | py | Python | lecture/migrations/0009_auto_20191217_1120.py | JhoLee/django-lecture_manager | d74ab1d48c954583ffd509346d7cb30b9214f1dc | [
"MIT"
] | null | null | null | lecture/migrations/0009_auto_20191217_1120.py | JhoLee/django-lecture_manager | d74ab1d48c954583ffd509346d7cb30b9214f1dc | [
"MIT"
] | 7 | 2020-06-05T20:02:50.000Z | 2021-09-22T18:05:02.000Z | lecture/migrations/0009_auto_20191217_1120.py | JhoLee/django-lecture_manager | d74ab1d48c954583ffd509346d7cb30b9214f1dc | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2019-12-17 02:20
from django.db import migrations
| 19.105263 | 47 | 0.586777 | # Generated by Django 3.0 on 2019-12-17 02:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lecture', '0008_auto_20191217_1115'),
]
operations = [
migrations.RenameField(
model_name='notice',
old_name='attachment',
new_name='file',
),
]
| 0 | 259 | 23 |
b4df9266c9f7897610aa8a7227dacf7d1148455c | 5,990 | py | Python | ownthinkdb/txtpro.py | sealhuang/KBQA-BERT | fa8c32da0625326a7209d053c3c6b1bd14a702fd | [
"MIT"
] | null | null | null | ownthinkdb/txtpro.py | sealhuang/KBQA-BERT | fa8c32da0625326a7209d053c3c6b1bd14a702fd | [
"MIT"
] | null | null | null | ownthinkdb/txtpro.py | sealhuang/KBQA-BERT | fa8c32da0625326a7209d053c3c6b1bd14a702fd | [
"MIT"
] | null | null | null | # vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import csv
def collect_relations(file_name='ownthink_v2.csv'):
"""Get all unique relations."""
rel_dict = {}
# output relation file
csvf_relation = open("relation_dict.csv", "w", newline='', encoding='utf-8')
w_relation = csv.writer(csvf_relation)
w_relation.writerow(("name", "count"))
# load data
with open(file_name, encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
rel = tmp[1]
if not rel in rel_dict:
rel_dict[rel] = 1
else:
rel_dict[rel] += 1
# save file
for r in rel_dict:
w_relation.writerow((r, rel_dict[r]))
csvf_relation.close()
def collect_mentions(file_name='ownthink_v2.csv'):
"""Get all unique mentions."""
mention_dict = {}
# output file
csvf = open("mention_dict.csv", "w", newline='', encoding='utf-8')
writer = csv.writer(csvf)
writer.writerow(("entity", "mention"))
# load data
with open(file_name, encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
rel = tmp[1]
if rel=='歧义关系':
if not tmp[2] in mention_dict:
mention_dict[tmp[2]] = [tmp[0]]
elif tmp[0] in mention_dict[tmp[2]]:
print('%s-%s has been detected'%(tmp[0], tmp[2]))
else:
mention_dict[tmp[2]].append(tmp[0])
# save file
for e in mention_dict:
s = [e]
for item in mention_dict[e]:
s.append(item)
writer.writerow(tuple(s))
csvf.close()
def check_entity_exists():
"""Check the existence of the entity in the mention-entity pairs."""
# get all entities which have mentions
entities = []
with open('mention_dict.csv', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
entities.append(tmp[0])
# check the existence of the entity in triples
with open('./ownthink_v2.csv', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
#print(tmp)
continue
if not tmp[1]=='歧义关系' and not tmp[0] in entities:
print(tmp)
def prepare4neo4j(file_name='ownthink_v2.csv'):
"""Encode each triple in text into neo4j-required format."""
# entity dict
entity_dict = {}
entity_count = 0
# mention dict
mention_dict = {}
mention_count = 0
# property dict
property_dict = {}
property_count = 0
# output entity file
csvf_entity = open("entity.csv", "w", newline='', encoding='utf-8')
w_entity = csv.writer(csvf_entity)
w_entity.writerow(("entity:ID", "name", ":LABEL"))
# output relation file
csvf_relation = open("relation.csv", "w", newline='', encoding='utf-8')
w_relation = csv.writer(csvf_relation)
w_relation.writerow((":START_ID", "name", ":END_ID", ":TYPE"))
# load data
with open(file_name, 'r', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip().replace('\n', ' ') for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
n1 = tmp[0]
rel = tmp[1]
n2 = tmp[2]
if rel=='歧义关系':
if not n2 in entity_dict:
entity_count += 1
entity_dict[n2] = 'e'+str(entity_count)
if not n1 in mention_dict:
mention_count += 1
mention_dict[n1] = 'm'+str(mention_count)
w_relation.writerow((
mention_dict[n1],
rel,
entity_dict[n2],
"MENTION",
))
else:
if not n1 in entity_dict:
entity_count += 1
entity_dict[n1] = 'e'+str(entity_count)
if not n2 in property_dict:
property_count += 1
property_dict[n2] = 'p'+str(property_count)
w_relation.writerow((
entity_dict[n1],
rel,
property_dict[n2],
'RELATION',
))
# save relations
csvf_relation.close()
# save entities and mentions
for e in entity_dict:
w_entity.writerow((entity_dict[e], e, "ENTITY"))
for m in mention_dict:
w_entity.writerow((mention_dict[m], m, "MENTION"))
for p in property_dict:
w_entity.writerow((property_dict[p], p, "PROPERTY"))
csvf_entity.close()
if __name__ == '__main__':
#collect_relations()
#collect_mentions()
#check_entity_exists()
prepare4neo4j()
| 32.378378 | 80 | 0.534391 | # vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import csv
def collect_relations(file_name='ownthink_v2.csv'):
"""Get all unique relations."""
rel_dict = {}
# output relation file
csvf_relation = open("relation_dict.csv", "w", newline='', encoding='utf-8')
w_relation = csv.writer(csvf_relation)
w_relation.writerow(("name", "count"))
# load data
with open(file_name, encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
rel = tmp[1]
if not rel in rel_dict:
rel_dict[rel] = 1
else:
rel_dict[rel] += 1
# save file
for r in rel_dict:
w_relation.writerow((r, rel_dict[r]))
csvf_relation.close()
def collect_mentions(file_name='ownthink_v2.csv'):
"""Get all unique mentions."""
mention_dict = {}
# output file
csvf = open("mention_dict.csv", "w", newline='', encoding='utf-8')
writer = csv.writer(csvf)
writer.writerow(("entity", "mention"))
# load data
with open(file_name, encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
rel = tmp[1]
if rel=='歧义关系':
if not tmp[2] in mention_dict:
mention_dict[tmp[2]] = [tmp[0]]
elif tmp[0] in mention_dict[tmp[2]]:
print('%s-%s has been detected'%(tmp[0], tmp[2]))
else:
mention_dict[tmp[2]].append(tmp[0])
# save file
for e in mention_dict:
s = [e]
for item in mention_dict[e]:
s.append(item)
writer.writerow(tuple(s))
csvf.close()
def check_entity_exists():
"""Check the existence of the entity in the mention-entity pairs."""
# get all entities which have mentions
entities = []
with open('mention_dict.csv', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
entities.append(tmp[0])
# check the existence of the entity in triples
with open('./ownthink_v2.csv', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip() for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
#print(tmp)
continue
if not tmp[1]=='歧义关系' and not tmp[0] in entities:
print(tmp)
def prepare4neo4j(file_name='ownthink_v2.csv'):
"""Encode each triple in text into neo4j-required format."""
# entity dict
entity_dict = {}
entity_count = 0
# mention dict
mention_dict = {}
mention_count = 0
# property dict
property_dict = {}
property_count = 0
# output entity file
csvf_entity = open("entity.csv", "w", newline='', encoding='utf-8')
w_entity = csv.writer(csvf_entity)
w_entity.writerow(("entity:ID", "name", ":LABEL"))
# output relation file
csvf_relation = open("relation.csv", "w", newline='', encoding='utf-8')
w_relation = csv.writer(csvf_relation)
w_relation.writerow((":START_ID", "name", ":END_ID", ":TYPE"))
# load data
with open(file_name, 'r', encoding='utf-8') as csv_info:
# pop out the header row
csv_info.readline()
csv_reader = csv.reader(x.replace('\0', '') for x in csv_info)
for line in csv_reader:
tmp = [item.strip().replace('\n', ' ') for item in line]
_tmp = [item for item in tmp if item]
if not len(_tmp)==3:
print(tmp)
continue
n1 = tmp[0]
rel = tmp[1]
n2 = tmp[2]
if rel=='歧义关系':
if not n2 in entity_dict:
entity_count += 1
entity_dict[n2] = 'e'+str(entity_count)
if not n1 in mention_dict:
mention_count += 1
mention_dict[n1] = 'm'+str(mention_count)
w_relation.writerow((
mention_dict[n1],
rel,
entity_dict[n2],
"MENTION",
))
else:
if not n1 in entity_dict:
entity_count += 1
entity_dict[n1] = 'e'+str(entity_count)
if not n2 in property_dict:
property_count += 1
property_dict[n2] = 'p'+str(property_count)
w_relation.writerow((
entity_dict[n1],
rel,
property_dict[n2],
'RELATION',
))
# save relations
csvf_relation.close()
# save entities and mentions
for e in entity_dict:
w_entity.writerow((entity_dict[e], e, "ENTITY"))
for m in mention_dict:
w_entity.writerow((mention_dict[m], m, "MENTION"))
for p in property_dict:
w_entity.writerow((property_dict[p], p, "PROPERTY"))
csvf_entity.close()
if __name__ == '__main__':
#collect_relations()
#collect_mentions()
#check_entity_exists()
prepare4neo4j()
| 0 | 0 | 0 |
75caf6ad60c039475381420aed2f0d566e6b5908 | 4,522 | py | Python | photos/tests.py | kasparov-creat/Picture-Perfection | d48e517800eff64e8efe61b7157def0a89ae3978 | [
"MIT"
] | null | null | null | photos/tests.py | kasparov-creat/Picture-Perfection | d48e517800eff64e8efe61b7157def0a89ae3978 | [
"MIT"
] | null | null | null | photos/tests.py | kasparov-creat/Picture-Perfection | d48e517800eff64e8efe61b7157def0a89ae3978 | [
"MIT"
] | null | null | null | from django.test import TestCase
from .models import Category, Image, Location
# Set up method
# Testing instance
# Set up method
# Testing instance
# Set up method
# Testing instance
| 35.328125 | 131 | 0.66077 | from django.test import TestCase
from .models import Category, Image, Location
class CategoryTestClass(TestCase):
# Set up method
def setUp(self):
self.category = Category(name = 'travel')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.category,Category))
def test_save_method(self):
self.category.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_update_category(self):
self.category.save_category()
self.category.update_category(self.category.id,'fitness')
update=Category.objects.get(name='fitness')
self.assertEqual(update.name,'fitness')
def tearDown(self):
Category.objects.all().delete()
def test_delete_category(self):
self.category.save_category()
categories=Category.objects.all()
self.assertEqual(len(categories),1)
self.category.delete_category()
del_category=Category.objects.all()
self.assertEqual(len(del_category),0)
class LocationTestClass(TestCase):
# Set up method
def setUp(self):
self.location = Location(name = 'Dubai')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.location,Location))
def test_save_method(self):
self.location.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
def test_update_location(self):
self.location.save_location()
self.location.update_location(self.location.id,'Dubai')
update=Location.objects.get(name='Dubai')
self.assertEqual(update.name,'Dubai')
def tearDown(self):
Location.objects.all().delete()
def test_delete_location(self):
self.location.save_location()
locations=Location.objects.all()
self.assertEqual(len(locations),1)
self.location.delete_location()
del_location=Location.objects.all()
self.assertEqual(len(del_location),0)
class ImageTestClass(TestCase):
# Set up method
def setUp(self):
self.category = Category(name = 'travel')
self.category.save()
self.location = Location(name = 'Dubai')
self.location.save()
self.image = Image(name = 'image_name', description = 'this is an image', category = self.category, location = self.location)
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
def test_save_method(self):
self.image.save()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
def tearDown(self):
Image.objects.all().delete()
Category.objects.all().delete()
Location.objects.all().delete()
def test_delete_image(self):
self.image.save_image()
images=Image.objects.all()
self.assertEqual(len(images),1)
self.image.delete_image()
del_images=Image.objects.all()
self.assertEqual(len(del_images),0)
def test_search_category(self):
self.location = Location(name='Nairobi')
self.location.save_location()
self.category = Category(name='fitness')
self.category.save_category()
self.image=Image(image="photo.png",name='gym',description='workout',location=self.location,category=self.category)
self.image.save_image()
images=Image.search_by_category(self.category.name)
self.assertEqual(len(images),1)
def test_get_image_by_id(self):
self.location = Location(name='Nairobi')
self.location.save_location()
self.category = Category(name='fitness')
self.category.save_category()
self.image= Image(id=1,image="photo.png",name='gym',description='workout',location=self.location,category=self.category)
self.image.save_image()
images = Image.get_image_by_id(self.image.id)
self.assertEqual(images.name, self.image.name)
def test_search_location(self):
self.location = Location(name='Nairobi')
self.location.save_location()
self.category = Category(name='fitness')
self.category.save_category()
self.image=Image(id=1,image="photo.png",name='gym',description='workout',location=self.location,category=self.category)
self.image.save_image()
images = Image.search_by_location("Nairobi")
self.assertTrue(len(images) > 0)
| 3,500 | 36 | 742 |
86af3493021fc803c2875cc14631bdd8241d8319 | 1,535 | py | Python | Introducing_CircuitPlaygroundExpress/CircuitPlaygroundExpress_AudioSine/code.py | claycooper/Adafruit_Learning_System_Guides | 890431bd4b9df929bc601e5886c2a735d89814f9 | [
"MIT"
] | null | null | null | Introducing_CircuitPlaygroundExpress/CircuitPlaygroundExpress_AudioSine/code.py | claycooper/Adafruit_Learning_System_Guides | 890431bd4b9df929bc601e5886c2a735d89814f9 | [
"MIT"
] | null | null | null | Introducing_CircuitPlaygroundExpress/CircuitPlaygroundExpress_AudioSine/code.py | claycooper/Adafruit_Learning_System_Guides | 890431bd4b9df929bc601e5886c2a735d89814f9 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2017 Limor Fried for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import array
import math
import board
import digitalio
try:
from audiocore import RawSample
except ImportError:
from audioio import RawSample
try:
from audioio import AudioOut
except ImportError:
try:
from audiopwmio import PWMAudioOut as AudioOut
except ImportError:
pass # not always supported by every board!
FREQUENCY = 440 # 440 Hz middle 'A'
SAMPLERATE = 8000 # 8000 samples/second, recommended!
# Generate one period of sine wav.
length = SAMPLERATE // FREQUENCY
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15)
# Enable the speaker
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.direction = digitalio.Direction.OUTPUT
speaker_enable.value = True
audio = AudioOut(board.SPEAKER)
sine_wave_sample = RawSample(sine_wave)
# A single sine wave sample is hundredths of a second long. If you set loop=False, it will play
# a single instance of the sample (a quick burst of sound) and then silence for the rest of the
# duration of the time.sleep(). If loop=True, it will play the single instance of the sample
# continuously for the duration of the time.sleep().
audio.play(sine_wave_sample, loop=True) # Play the single sine_wave sample continuously...
time.sleep(1) # for the duration of the sleep (in seconds)
audio.stop() # and then stop.
| 31.979167 | 95 | 0.744625 | # SPDX-FileCopyrightText: 2017 Limor Fried for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import array
import math
import board
import digitalio
try:
from audiocore import RawSample
except ImportError:
from audioio import RawSample
try:
from audioio import AudioOut
except ImportError:
try:
from audiopwmio import PWMAudioOut as AudioOut
except ImportError:
pass # not always supported by every board!
FREQUENCY = 440 # 440 Hz middle 'A'
SAMPLERATE = 8000 # 8000 samples/second, recommended!
# Generate one period of sine wav.
length = SAMPLERATE // FREQUENCY
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15)
# Enable the speaker
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.direction = digitalio.Direction.OUTPUT
speaker_enable.value = True
audio = AudioOut(board.SPEAKER)
sine_wave_sample = RawSample(sine_wave)
# A single sine wave sample is hundredths of a second long. If you set loop=False, it will play
# a single instance of the sample (a quick burst of sound) and then silence for the rest of the
# duration of the time.sleep(). If loop=True, it will play the single instance of the sample
# continuously for the duration of the time.sleep().
audio.play(sine_wave_sample, loop=True) # Play the single sine_wave sample continuously...
time.sleep(1) # for the duration of the sleep (in seconds)
audio.stop() # and then stop.
| 0 | 0 | 0 |
c7247eec88133f20ed823babefa8db152456f367 | 17,666 | py | Python | src/checkrs/sim_cdf.py | timothyb0912/checkrs | 213ac39b5dccc0c38d984a66286174de070af00d | [
"BSD-3-Clause"
] | 2 | 2021-08-31T17:07:12.000Z | 2021-09-02T09:20:33.000Z | src/checkrs/sim_cdf.py | timothyb0912/checkrs | 213ac39b5dccc0c38d984a66286174de070af00d | [
"BSD-3-Clause"
] | 28 | 2020-09-26T01:38:14.000Z | 2020-12-03T00:47:22.000Z | src/checkrs/sim_cdf.py | timothyb0912/checkrs | 213ac39b5dccc0c38d984a66286174de070af00d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Functions for plotting simulated vs observed cumulative distribution functions.
"""
from __future__ import absolute_import
import os
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
import altair as alt
import attr
import checkrs.base as base
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotnine as p9
import seaborn as sbn
from checkrs.plot_utils import _choice_evaluator
from checkrs.plot_utils import _label_despine_save_and_show_plot
from checkrs.plot_utils import _plot_single_cdf_on_axis
from checkrs.plot_utils import _thin_rows
from checkrs.utils import progress
try:
# in Python 3 range returns an iterator instead of list
# to maintain backwards compatibility use "old" version of range
from past.builtins import range
except ImportError:
pass
# Set the plotting style
sbn.set_style("darkgrid")
def plot_simulated_cdfs(
sim_y,
orig_df,
filter_idx,
col_to_plot,
choice_col,
sim_color="#a6bddb",
orig_color="#045a8d",
choice_condition=1,
thin_pct=None,
fig_and_ax=None,
label="Simulated",
title=None,
bar_alpha=0.5,
bar_color="#fee391",
n_traces=None,
rseed=None,
show=True,
figsize=(10, 6),
fontsize=12,
xlim=None,
ylim=None,
output_file=None,
dpi=500,
**kwargs,
):
"""
Plots an observed cumulative density function (CDF) versus the simulated
versions of that same CDF.
Parameters
----------
sim_y : 2D ndarray.
The simulated outcomes. All elements should be zeros or ones. There
should be one column for every set of simulated outcomes. There should
be one row for every row of one's dataset.
orig_df : pandas DataFrame.
The dataframe containing the data used to estimate one's model. Should
have the same number of rows as `sim_y`.
filter_idx : 1D ndarray of booleans.
Should have the same number of rows as `orig_df`. Will denote the rows
that should be used to compute the CDF if their outcome is
`choice_condition`.
col_to_plot : str.
A column in `orig_df` whose data will be used to compute the KDEs.
choice_col : str.
The column in `orig_df` containing the data on the original outcomes.
sim_color, orig_color : valid 'color' argument for matplotlib, optional.
The colors that will be used to plot the simulated and observed CDFs,
respectively. Default is `sim_color == '#a6bddb'` and
`orig_color == '#045a8d'`.
choice_condition : `{0, 1}`, optional.
Denotes the outcome class that we wish to plot the CDFs for. If
`choice_condition == 1`, then we will plot the CDFs for those where
`sim_y == 1` and `filter_idx == True`. If `choice_condition == 0`, we
will plot the CDFs for those rows where `sim_y == 0` and
`filter_idx == True`. Default == 1.
fig_and_ax : list of matplotlib figure and axis, or `None`, optional.
Determines whether a new figure will be created for the plot or whether
the plot will be drawn on the passed Axes object. If None, a new figure
will be created. Default is `None`.
label : str or None, optional.
The label for the simulated CDFs. If None, no label will be displayed.
Default = 'Simulated'.
title : str or None, optional.
The plot title. If None, no title will be displayed. Default is None.
bar_alpha : float in (0.0, 1.0), optional.
Denotes the opacity of the bar used to denote the proportion of
simulations where no observations had `sim_y == choice_condition`.
Higher values lower the bar's transparency. `0` leads to an invisible
bar. Default == 0.5.
bar_color : valid 'color' argument for matplotlib, optional.
The color that will be used to plot the bar that shows the proportion
of simulations where no observations had `sim_y == choice_condition`.
Default is '#fee391'.
thin_pct : float in (0.0, 1.0) or None, optional.
Determines the percentage of the data (rows) to be used for plotting.
If None, the full dataset will be used. Default is None.
n_traces : int or None, optional.
Should be less than `sim_y.shape[1]`. Denotes the number of simulated
choices to randomly select for plotting. If None, all columns of
`sim_y` will be used for plotting. Default is None.
rseed : int or None, optional.
Denotes the random seed to be used when selecting `n_traces` columns
for plotting. This is useful for reproducing an exact plot when using
`n_traces`. If None, no random seed will be set. Default is None.
show : bool, optional.
Determines whether `fig.show()` will be called after the plots have
been drawn. Default is True.
figsize : 2-tuple of ints, optional.
If a new figure is created for this plot, this kwarg determines the
width and height of the figure that is created. Default is `(5, 3)`.
fontsize : int or None, optional.
The fontsize to be used in the plot. Default is 12.
xlim, ylim : 2-tuple of ints or None, optional.
Denotes the extent that will be set on the x-axis and y-axis,
respectively, of the matplotlib Axes instance that is drawn on. If
None, then the extent will not be manually altered. Default is None.
output_file : str, or None, optional.
Denotes the relative or absolute filepath (including the file format)
that is to be used to save the plot. If None, the plot will not be
saved to file. Default is None.
dpi : positive int, optional.
Denotes the number of 'dots per inch' for the saved figure. Will only
be used if `output_file is not None`. Default == 500.
kwargs : passed to `ax.plot` call in matplotlib.
Returns
-------
None.
"""
# Filter the data
filtered_sim_y = sim_y[filter_idx, :]
filtered_orig_df = orig_df.loc[filter_idx, :]
if rseed is not None:
np.random.seed(rseed)
if n_traces is not None:
selected_cols = np.random.choice(
filtered_sim_y.shape[1], size=n_traces, replace=False
)
filtered_sim_y = filtered_sim_y[:, selected_cols]
if thin_pct is not None:
# Randomly select rows to be retained for plotting
selected_rows = _thin_rows(filtered_sim_y, thin_pct)
# Filter the simulated-y, df, and filtering values
filtered_sim_y = filtered_sim_y[selected_rows, :]
filtered_orig_df = filtered_orig_df.iloc[selected_rows, :]
sample_iterator = progress(range(filtered_sim_y.shape[1]), desc="Calculating CDFs")
# Get the original values
orig_choices = filtered_orig_df[choice_col].values
orig_plotting_idx = _choice_evaluator(orig_choices, choice_condition)
orig_plotting_vals = filtered_orig_df.loc[orig_plotting_idx, col_to_plot].values
if fig_and_ax is None:
fig, axis = plt.subplots(1, figsize=figsize)
fig_and_ax = [fig, axis]
else:
fig, axis = fig_and_ax
# Count simulated data with no obs meeting the choice and filter conditions
num_null_choices = 0
# store the minimum and maximum x-values
min_x, max_x = orig_plotting_vals.min(), orig_plotting_vals.max()
for i in sample_iterator:
current_choices = filtered_sim_y[:, i]
# Determine the final rows to use for plotting
plotting_idx = _choice_evaluator(current_choices, choice_condition)
if plotting_idx.sum() == 0:
num_null_choices += 1
continue
# Get the values for plotting
current_plotting_vals = filtered_orig_df.loc[plotting_idx, col_to_plot].values
# Update the plot extents
min_x = min(current_plotting_vals.min(), min_x)
max_x = max(current_plotting_vals.max(), max_x)
_plot_single_cdf_on_axis(
current_plotting_vals, axis, color=sim_color, alpha=0.5, **kwargs
)
# Plot the originally observed relationship
_plot_single_cdf_on_axis(
orig_plotting_vals,
axis,
color=orig_color,
label="Observed",
alpha=1.0,
**kwargs,
)
if num_null_choices > 0:
num_null_pct = num_null_choices / float(filtered_sim_y.shape[1])
null_pct_density_equivalent = axis.get_ylim()[1] * num_null_pct
null_label = "'No Obs' Simulations: {:.2%}".format(num_null_pct)
axis.bar(
[0],
[null_pct_density_equivalent],
width=0.1 * np.ptp(orig_plotting_vals),
align="edge",
alpha=bar_alpha,
color=bar_color,
label=null_label,
)
if label is not None:
_patch = mpatches.Patch(color=sim_color, label=label)
current_handles, current_labels = axis.get_legend_handles_labels()
current_handles.append(_patch)
current_labels.append(label)
axis.legend(current_handles, current_labels, loc="best", fontsize=fontsize)
# set the plot extents
if xlim is None:
axis.set_xlim((min_x, max_x))
else:
axis.set_xlim(xlim)
if ylim is not None:
axis.set_ylim(ylim)
# Take care of boilerplate plotting necessities
_label_despine_save_and_show_plot(
x_label=col_to_plot,
y_label="Cumulative\nDensity\nFunction",
fig_and_ax=fig_and_ax,
fontsize=fontsize,
y_rot=0,
y_pad=40,
title=title,
output_file=output_file,
show=show,
dpi=dpi,
)
return None
@attr.s
| 35.191235 | 87 | 0.614627 | # -*- coding: utf-8 -*-
"""
Functions for plotting simulated vs observed cumulative distribution functions.
"""
from __future__ import absolute_import
import os
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
import altair as alt
import attr
import checkrs.base as base
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotnine as p9
import seaborn as sbn
from checkrs.plot_utils import _choice_evaluator
from checkrs.plot_utils import _label_despine_save_and_show_plot
from checkrs.plot_utils import _plot_single_cdf_on_axis
from checkrs.plot_utils import _thin_rows
from checkrs.utils import progress
try:
# in Python 3 range returns an iterator instead of list
# to maintain backwards compatibility use "old" version of range
from past.builtins import range
except ImportError:
pass
# Set the plotting style
sbn.set_style("darkgrid")
def plot_simulated_cdfs(
sim_y,
orig_df,
filter_idx,
col_to_plot,
choice_col,
sim_color="#a6bddb",
orig_color="#045a8d",
choice_condition=1,
thin_pct=None,
fig_and_ax=None,
label="Simulated",
title=None,
bar_alpha=0.5,
bar_color="#fee391",
n_traces=None,
rseed=None,
show=True,
figsize=(10, 6),
fontsize=12,
xlim=None,
ylim=None,
output_file=None,
dpi=500,
**kwargs,
):
"""
Plots an observed cumulative density function (CDF) versus the simulated
versions of that same CDF.
Parameters
----------
sim_y : 2D ndarray.
The simulated outcomes. All elements should be zeros or ones. There
should be one column for every set of simulated outcomes. There should
be one row for every row of one's dataset.
orig_df : pandas DataFrame.
The dataframe containing the data used to estimate one's model. Should
have the same number of rows as `sim_y`.
filter_idx : 1D ndarray of booleans.
Should have the same number of rows as `orig_df`. Will denote the rows
that should be used to compute the CDF if their outcome is
`choice_condition`.
col_to_plot : str.
A column in `orig_df` whose data will be used to compute the KDEs.
choice_col : str.
The column in `orig_df` containing the data on the original outcomes.
sim_color, orig_color : valid 'color' argument for matplotlib, optional.
The colors that will be used to plot the simulated and observed CDFs,
respectively. Default is `sim_color == '#a6bddb'` and
`orig_color == '#045a8d'`.
choice_condition : `{0, 1}`, optional.
Denotes the outcome class that we wish to plot the CDFs for. If
`choice_condition == 1`, then we will plot the CDFs for those where
`sim_y == 1` and `filter_idx == True`. If `choice_condition == 0`, we
will plot the CDFs for those rows where `sim_y == 0` and
`filter_idx == True`. Default == 1.
fig_and_ax : list of matplotlib figure and axis, or `None`, optional.
Determines whether a new figure will be created for the plot or whether
the plot will be drawn on the passed Axes object. If None, a new figure
will be created. Default is `None`.
label : str or None, optional.
The label for the simulated CDFs. If None, no label will be displayed.
Default = 'Simulated'.
title : str or None, optional.
The plot title. If None, no title will be displayed. Default is None.
bar_alpha : float in (0.0, 1.0), optional.
Denotes the opacity of the bar used to denote the proportion of
simulations where no observations had `sim_y == choice_condition`.
Higher values lower the bar's transparency. `0` leads to an invisible
bar. Default == 0.5.
bar_color : valid 'color' argument for matplotlib, optional.
The color that will be used to plot the bar that shows the proportion
of simulations where no observations had `sim_y == choice_condition`.
Default is '#fee391'.
thin_pct : float in (0.0, 1.0) or None, optional.
Determines the percentage of the data (rows) to be used for plotting.
If None, the full dataset will be used. Default is None.
n_traces : int or None, optional.
Should be less than `sim_y.shape[1]`. Denotes the number of simulated
choices to randomly select for plotting. If None, all columns of
`sim_y` will be used for plotting. Default is None.
rseed : int or None, optional.
Denotes the random seed to be used when selecting `n_traces` columns
for plotting. This is useful for reproducing an exact plot when using
`n_traces`. If None, no random seed will be set. Default is None.
show : bool, optional.
Determines whether `fig.show()` will be called after the plots have
been drawn. Default is True.
figsize : 2-tuple of ints, optional.
If a new figure is created for this plot, this kwarg determines the
width and height of the figure that is created. Default is `(5, 3)`.
fontsize : int or None, optional.
The fontsize to be used in the plot. Default is 12.
xlim, ylim : 2-tuple of ints or None, optional.
Denotes the extent that will be set on the x-axis and y-axis,
respectively, of the matplotlib Axes instance that is drawn on. If
None, then the extent will not be manually altered. Default is None.
output_file : str, or None, optional.
Denotes the relative or absolute filepath (including the file format)
that is to be used to save the plot. If None, the plot will not be
saved to file. Default is None.
dpi : positive int, optional.
Denotes the number of 'dots per inch' for the saved figure. Will only
be used if `output_file is not None`. Default == 500.
kwargs : passed to `ax.plot` call in matplotlib.
Returns
-------
None.
"""
# Filter the data
filtered_sim_y = sim_y[filter_idx, :]
filtered_orig_df = orig_df.loc[filter_idx, :]
if rseed is not None:
np.random.seed(rseed)
if n_traces is not None:
selected_cols = np.random.choice(
filtered_sim_y.shape[1], size=n_traces, replace=False
)
filtered_sim_y = filtered_sim_y[:, selected_cols]
if thin_pct is not None:
# Randomly select rows to be retained for plotting
selected_rows = _thin_rows(filtered_sim_y, thin_pct)
# Filter the simulated-y, df, and filtering values
filtered_sim_y = filtered_sim_y[selected_rows, :]
filtered_orig_df = filtered_orig_df.iloc[selected_rows, :]
sample_iterator = progress(range(filtered_sim_y.shape[1]), desc="Calculating CDFs")
# Get the original values
orig_choices = filtered_orig_df[choice_col].values
orig_plotting_idx = _choice_evaluator(orig_choices, choice_condition)
orig_plotting_vals = filtered_orig_df.loc[orig_plotting_idx, col_to_plot].values
if fig_and_ax is None:
fig, axis = plt.subplots(1, figsize=figsize)
fig_and_ax = [fig, axis]
else:
fig, axis = fig_and_ax
# Count simulated data with no obs meeting the choice and filter conditions
num_null_choices = 0
# store the minimum and maximum x-values
min_x, max_x = orig_plotting_vals.min(), orig_plotting_vals.max()
for i in sample_iterator:
current_choices = filtered_sim_y[:, i]
# Determine the final rows to use for plotting
plotting_idx = _choice_evaluator(current_choices, choice_condition)
if plotting_idx.sum() == 0:
num_null_choices += 1
continue
# Get the values for plotting
current_plotting_vals = filtered_orig_df.loc[plotting_idx, col_to_plot].values
# Update the plot extents
min_x = min(current_plotting_vals.min(), min_x)
max_x = max(current_plotting_vals.max(), max_x)
_plot_single_cdf_on_axis(
current_plotting_vals, axis, color=sim_color, alpha=0.5, **kwargs
)
# Plot the originally observed relationship
_plot_single_cdf_on_axis(
orig_plotting_vals,
axis,
color=orig_color,
label="Observed",
alpha=1.0,
**kwargs,
)
if num_null_choices > 0:
num_null_pct = num_null_choices / float(filtered_sim_y.shape[1])
null_pct_density_equivalent = axis.get_ylim()[1] * num_null_pct
null_label = "'No Obs' Simulations: {:.2%}".format(num_null_pct)
axis.bar(
[0],
[null_pct_density_equivalent],
width=0.1 * np.ptp(orig_plotting_vals),
align="edge",
alpha=bar_alpha,
color=bar_color,
label=null_label,
)
if label is not None:
_patch = mpatches.Patch(color=sim_color, label=label)
current_handles, current_labels = axis.get_legend_handles_labels()
current_handles.append(_patch)
current_labels.append(label)
axis.legend(current_handles, current_labels, loc="best", fontsize=fontsize)
# set the plot extents
if xlim is None:
axis.set_xlim((min_x, max_x))
else:
axis.set_xlim(xlim)
if ylim is not None:
axis.set_ylim(ylim)
# Take care of boilerplate plotting necessities
_label_despine_save_and_show_plot(
x_label=col_to_plot,
y_label="Cumulative\nDensity\nFunction",
fig_and_ax=fig_and_ax,
fontsize=fontsize,
y_rot=0,
y_pad=40,
title=title,
output_file=output_file,
show=show,
dpi=dpi,
)
return None
@attr.s
class ViewSimCDF(base.View):
data: pd.DataFrame = attr.ib()
_url: str = attr.ib()
_metadata: Dict[str, str] = attr.ib()
theme: base.PlotTheme = attr.ib()
def set_plotting_col(self, column: str) -> bool:
"""
Raises ValueError if `column` not in `data`.
"""
if not isinstance(column, str):
msg = "`column` MUST be a string."
raise TypeError(column)
if column not in self.data.columns:
msg = "`column` not in `data.columns`"
raise ValueError(msg)
self.theme.plotting_col = column
return True
@classmethod
def from_chart_data(cls, data: base.ChartData) -> "ViewSimCDF":
"""
Instantiates the simulated CDF chart from the given `ChartData`.
"""
return cls(
data=data.data,
url=data.url,
metadata=data.metadata,
theme=base.PlotTheme(
label_y="Cumulative\nDistribution\nFunction",
plotting_col=data.metadata["target"],
),
)
def draw(self, backend: str) -> base.ViewObject:
"""
Specifies the view of the data using `backend`.
"""
if backend == "plotnine":
return self.draw_plotnine()
elif backend == "altair":
return self.draw_altair()
else:
raise ValueError("`backend` MUST == 'plotnine'.")
def _get_sim_ids(self) -> List[int]:
# Note [::-1] puts id_sim = 1 on top (id_sim = 1 is last).
# Hopefully its the observed line
sim_ids = np.sort(
self.data[self._metadata["id_sim"]].unique()
).tolist()[::-1]
return sim_ids
def draw_plotnine(self) -> p9.ggplot:
"""
Specifies the plot using plotnine.
"""
sim_ids = self._get_sim_ids()
# Add the data to the plot
chart = p9.ggplot()
for idx in progress(sim_ids):
chart = chart + self.create_single_cdf_line_plotnine(idx)
# Format the plot
chart = self.format_view_plotnine(chart)
return chart
def create_single_cdf_line_plotnine(self, id_sim: int) -> p9.ggplot:
"""
Specifies a singe CDF line on the plot using plotnine.
"""
id_col_sim = self._metadata["id_sim"]
observed_col = self._metadata["observed"]
return p9.stat_ecdf(
mapping=p9.aes(
x=self.theme.plotting_col,
color=observed_col,
alpha=observed_col,
),
data=self.data.loc[self.data[id_col_sim] == id_sim],
)
def draw_altair(self) -> alt.TopLevelMixin:
"""
Specifies the plot through Altair.
"""
sim_ids = self._get_sim_ids()
# Add the data to the plot
chart = self.create_single_cdf_line_altair(sim_ids[0])
for idx in progress(sim_ids[1:]):
chart += self.create_single_cdf_line_altair(idx)
# Format the plot
chart = self.format_view_altair(chart)
return chart
def create_single_cdf_line_altair(self, id_sim: int) -> alt.TopLevelMixin:
"""
Specifies a singe CDF line on the plot using Altair.
"""
# Get data and metadata
current_data = self._url if self._url is not None else self.data
id_col_sim = self._metadata["id_sim"]
observed_col = self._metadata["observed"]
# Declare mappings of data to x-axes, y-axes, color and opacity
observed_domain = [True, False]
color_range = [self.theme.color_observed, self.theme.color_simulated]
opacity_range = [1, 0.5]
encoding_x = alt.X(
self.theme.plotting_col,
type="quantitative",
title=self.theme.label_x,
)
encoding_y = alt.Y(
"density",
type="quantitative",
title=self.theme.label_y,
)
encoding_color = alt.Color(
observed_col,
type="nominal",
scale=alt.Scale(
domain=observed_domain,
range=color_range,
),
)
encoding_opacity = alt.Opacity(
observed_col,
type="nominal",
scale=alt.Scale(
domain=observed_domain,
range=opacity_range,
),
)
# Create the single cdf chart by filtering, transforming, and encoding
# data to the lines on the plot.
chart = (
alt.Chart(current_data)
.transform_filter(alt.datum[id_col_sim] == id_sim)
.transform_density(
self.theme.plotting_col,
as_=[self.theme.plotting_col, "density"],
groupby=[observed_col],
cumulative=True,
steps=25,
)
.mark_line()
.encode(
encoding_x,
encoding_y,
encoding_color,
encoding_opacity,
)
)
return chart
def format_view_plotnine(self, chart: p9.ggplot) -> p9.ggplot:
"""
Apply chart formatting options from `self.theme`.
"""
figure_size = (self.theme.width_inches, self.theme.height_inches)
chart = (
chart
+ p9.theme(
axis_text=p9.element_text(size=self.theme.fontsize),
axis_title_y=p9.element_text(
rotation=self.theme.rotation_y,
margin={"r": self.theme.padding_y_plotnine, "units": "pt"},
),
figure_size=figure_size,
dpi=self.theme.dpi_print,
)
+ p9.xlab(self.theme.label_x)
+ p9.ylab(self.theme.label_y)
+ p9.scale_color_manual(
(self.theme.color_simulated, self.theme.color_observed),
labels=p9.utils.waiver(),
)
+ p9.scale_alpha_manual((0.5, 1), labels=p9.utils.waiver())
)
if self.theme.title is not None:
chart = chart + p9.ggtitle(self.theme.title)
return chart
def format_view_altair(self, chart: alt.TopLevelMixin) -> alt.TopLevelMixin:
"""
Apply chart formatting options from `self.theme`.
"""
chart = (
chart.configure_axisX(
labelFontSize=self.theme.fontsize,
labelAngle=self.theme.rotation_x_ticks,
titleFontSize=self.theme.fontsize,
)
.configure_axisY(
labelFontSize=self.theme.fontsize,
titleFontSize=self.theme.fontsize,
titleAngle=self.theme.rotation_y,
titlePadding=self.theme.padding_y_altair,
)
.properties(
width=self.theme.width_pixels,
height=self.theme.height_pixels,
)
)
if self.theme.title is not None:
chart = chart.properties(
width=self.theme.width_pixels,
height=self.theme.height_pixels,
title=self.theme.title,
).configure_title(fontSize=self.theme.fontsize)
return chart
def save(self, filename: str) -> bool:
"""
Saves the view of the data using the appropriate backend for the
filename's extension. Returns True if saving succeeded.
"""
ext = os.path.splitext(filename)[1]
if ext not in base.EXTENSIONS:
raise ValueError(f"Format MUST be in {base.EXTENSIONS}")
if ext in base.EXTENSIONS_PLOTNINE:
chart = self.draw_plotnine()
elif ext in base.EXTENSIONS_ALTAIR:
chart = self.draw_altair()
chart.save(filename)
return True
| 256 | 7,623 | 22 |