hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba596998bb17250c636379d582718007fccc4be7 | 520 | py | Python | sell_staff/migrations/0002_auto_20201123_1455.py | Ishikashah2510/nirvaas_main | 5eaf92756d06261a7f555b10aad864a34c9e761b | [
"MIT"
] | null | null | null | sell_staff/migrations/0002_auto_20201123_1455.py | Ishikashah2510/nirvaas_main | 5eaf92756d06261a7f555b10aad864a34c9e761b | [
"MIT"
] | null | null | null | sell_staff/migrations/0002_auto_20201123_1455.py | Ishikashah2510/nirvaas_main | 5eaf92756d06261a7f555b10aad864a34c9e761b | [
"MIT"
] | 3 | 2020-12-30T11:35:22.000Z | 2021-01-07T13:10:26.000Z | # Generated by Django 3.1.3 on 2020-11-23 09:25
import django.core.files.storage
from django.db import migrations, models
| 26 | 156 | 0.655769 | # Generated by Django 3.1.3 on 2020-11-23 09:25
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sell_staff', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='items',
name='Item_photo',
field=models.ImageField(storage=django.core.files.storage.FileSystemStorage(location='D:/codes/python_codes/nirvaas_main/media'), upload_to=''),
),
]
| 0 | 373 | 23 |
42b02546d446dc752a8de017e36e151146bb611a | 1,008 | py | Python | apprentice_learner/urls.py | afcarl/apprentice_learner_api | c59f4e6d6f03eb807d61a4bcfa1e5771d8b969cc | [
"MIT"
] | 1 | 2019-04-22T16:38:57.000Z | 2019-04-22T16:38:57.000Z | apprentice_learner/urls.py | afcarl/apprentice_learner_api | c59f4e6d6f03eb807d61a4bcfa1e5771d8b969cc | [
"MIT"
] | null | null | null | apprentice_learner/urls.py | afcarl/apprentice_learner_api | c59f4e6d6f03eb807d61a4bcfa1e5771d8b969cc | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from apprentice_learner import views
app_name = 'apprentice_api'
urlpatterns = [
url(r'^create/$', views.create, name = "create"),
url(r'^request/(?P<agent_id>[0-9]+)/$', views.request, name="request"),
url(r'^train/(?P<agent_id>[0-9]+)/$', views.train, name="train"),
url(r'^check/(?P<agent_id>[0-9]+)/$', views.check, name="check"),
url(r'^report/(?P<agent_id>[0-9]+)/$', views.report, name="report"),
url(r'^request/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.request_by_name, name="request_by_name"),
url(r'^train/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.train_by_name, name="train_by_name"),
url(r'^check/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.check_by_name, name="check_by_name"),
url(r'^report/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.report_by_name, name="report_by_name"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 53.052632 | 107 | 0.676587 | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from apprentice_learner import views
app_name = 'apprentice_api'
urlpatterns = [
url(r'^create/$', views.create, name = "create"),
url(r'^request/(?P<agent_id>[0-9]+)/$', views.request, name="request"),
url(r'^train/(?P<agent_id>[0-9]+)/$', views.train, name="train"),
url(r'^check/(?P<agent_id>[0-9]+)/$', views.check, name="check"),
url(r'^report/(?P<agent_id>[0-9]+)/$', views.report, name="report"),
url(r'^request/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.request_by_name, name="request_by_name"),
url(r'^train/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.train_by_name, name="train_by_name"),
url(r'^check/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.check_by_name, name="check_by_name"),
url(r'^report/(?P<agent_name>[a-zA-Z0-9_-]{1,200})/$', views.report_by_name, name="report_by_name"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 0 | 0 | 0 |
149af82596d8471e480030d64ffea479f630a7ae | 1,288 | py | Python | config/LoggingConfiguration.py | ronnyfriedland/minioclient | b4a55ed62f1d61f39254a4003ee65b3a778f3526 | [
"MIT"
] | 1 | 2020-06-10T10:32:32.000Z | 2020-06-10T10:32:32.000Z | config/LoggingConfiguration.py | ronnyfriedland/minioclient | b4a55ed62f1d61f39254a4003ee65b3a778f3526 | [
"MIT"
] | 2 | 2019-11-11T19:16:52.000Z | 2019-11-12T07:05:56.000Z | config/LoggingConfiguration.py | ronnyfriedland/minioclient | b4a55ed62f1d61f39254a4003ee65b3a778f3526 | [
"MIT"
] | null | null | null | from config.Configuration import Configuration
import logging
class LoggingConfiguration(Configuration):
"""
Author: Ronny Friedland
Handles logging configuration
"""
CONFIG_DEFAULTS = {'loglevel':'INFO'}
def check_config(self):
"""
Check if config.ini contains logging settings
:return: true if configuration contains logging settings
"""
return super().check_config(section="logging")
def refresh_config(self):
"""
Refreshed logging configuration and (re-)-init logging
"""
super().refresh_config()
if self.check_config() is not False:
level = super().read_config("logging", "loglevel")
else:
level = "INFO"
if level == "DEBUG":
logging.basicConfig(filename='minioclient.log', level=logging.DEBUG)
elif level == "INFO":
logging.basicConfig(filename='minioclient.log', level=logging.INFO)
elif level == "WARN":
logging.basicConfig(filename='minioclient.log', level=logging.WARN)
else:
logging.basicConfig(filename='minioclient.log', level=logging.ERROR)
| 29.272727 | 80 | 0.631988 | from config.Configuration import Configuration
import logging
class LoggingConfiguration(Configuration):
"""
Author: Ronny Friedland
Handles logging configuration
"""
CONFIG_DEFAULTS = {'loglevel':'INFO'}
def __init__(self, config_file = Configuration.config_file):
super().__init__(config_file)
def check_config(self):
"""
Check if config.ini contains logging settings
:return: true if configuration contains logging settings
"""
return super().check_config(section="logging")
def refresh_config(self):
"""
Refreshed logging configuration and (re-)-init logging
"""
super().refresh_config()
if self.check_config() is not False:
level = super().read_config("logging", "loglevel")
else:
level = "INFO"
if level == "DEBUG":
logging.basicConfig(filename='minioclient.log', level=logging.DEBUG)
elif level == "INFO":
logging.basicConfig(filename='minioclient.log', level=logging.INFO)
elif level == "WARN":
logging.basicConfig(filename='minioclient.log', level=logging.WARN)
else:
logging.basicConfig(filename='minioclient.log', level=logging.ERROR)
| 77 | 0 | 27 |
b77ad8cc5db628e76e31d19b2fa47cd72b004797 | 1,670 | py | Python | cli/gardener_ci/_oci.py | mliepold/cc-utils | 3f8c4b0d11d6a52d1605026f478371411daab81e | [
"BSD-3-Clause"
] | null | null | null | cli/gardener_ci/_oci.py | mliepold/cc-utils | 3f8c4b0d11d6a52d1605026f478371411daab81e | [
"BSD-3-Clause"
] | null | null | null | cli/gardener_ci/_oci.py | mliepold/cc-utils | 3f8c4b0d11d6a52d1605026f478371411daab81e | [
"BSD-3-Clause"
] | null | null | null | import dataclasses
import pprint
import sys
import ccc.oci
import oci
__cmd_name__ = 'oci'
| 22.266667 | 85 | 0.646707 | import dataclasses
import pprint
import sys
import ccc.oci
import oci
__cmd_name__ = 'oci'
def cp(src:str, tgt:str):
oci_client = ccc.oci.oci_client()
oci.replicate_artifact(
src_image_reference=src,
tgt_image_reference=tgt,
oci_client=oci_client,
)
def ls(image: str):
oci_client = ccc.oci.oci_client()
print(oci_client.tags(image_reference=image))
def manifest(image_reference: str, pretty:bool=True):
oci_client = ccc.oci.oci_client()
if pretty:
manifest = oci_client.manifest(image_reference=image_reference)
pprint.pprint(dataclasses.asdict(manifest))
else:
manifest = oci_client.manifest_raw(image_reference=image_reference)
print(manifest.text)
def cfg(image_reference: str):
oci_client = ccc.oci.oci_client()
manifest = oci_client.manifest(image_reference=image_reference)
pprint.pprint(
oci_client.blob(
image_reference=image_reference,
digest=manifest.config.digest,
stream=False,
).json(),
)
def blob(image_reference: str, digest: str, outfile: str):
oci_client = ccc.oci.oci_client()
if outfile == '-':
if sys.stdout.isatty():
print('must not stream binary content to stdout (pipe to other process)')
exit(1)
outfh = sys.stdout
write = outfh.buffer.write
else:
outfh = open(outfile, 'wb')
write = outfh.write
blob = oci_client.blob(
image_reference=image_reference,
digest=digest,
stream=True,
)
for chunk in blob.iter_content():
write(chunk)
outfh.flush()
| 1,457 | 0 | 115 |
dd9bbf8526198f00fb8980811e2cc9ecc05c908e | 697 | py | Python | sch/migrations/0001_initial.py | nikhilxifer/bloodbankmanagement | a31ce090c33fa2499f26ec685220d207d9f08394 | [
"MIT"
] | null | null | null | sch/migrations/0001_initial.py | nikhilxifer/bloodbankmanagement | a31ce090c33fa2499f26ec685220d207d9f08394 | [
"MIT"
] | null | null | null | sch/migrations/0001_initial.py | nikhilxifer/bloodbankmanagement | a31ce090c33fa2499f26ec685220d207d9f08394 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2021-06-09 14:32
from django.db import migrations, models
| 24.892857 | 79 | 0.519369 | # Generated by Django 3.1 on 2021-06-09 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='search1',
fields=[
('ID', models.IntegerField(primary_key=True, serialize=False)),
('Name', models.CharField(max_length=120)),
('State', models.CharField(max_length=120)),
('City', models.CharField(max_length=120)),
('Contact', models.IntegerField()),
],
options={
'db_table': 'b1121',
},
),
]
| 0 | 585 | 23 |
ca2aafd31bdda323c2b2d51628c87574d4b747b8 | 1,720 | py | Python | src/abstract_structures/queues/linked_queue.py | mlatifi/Python-and-Algorithms-and-Data-Structures | 47fed555093b0797a5d8043573a3278028b26a2b | [
"MIT"
] | 5 | 2017-08-03T06:33:49.000Z | 2021-08-06T13:20:57.000Z | src/abstract_structures/queues/linked_queue.py | ritahu/Python-and-Algorithms-and-Data-Structures | d2c082d261a68b06f533703867ae8a90ac7f4df1 | [
"MIT"
] | null | null | null | src/abstract_structures/queues/linked_queue.py | ritahu/Python-and-Algorithms-and-Data-Structures | d2c082d261a68b06f533703867ae8a90ac7f4df1 | [
"MIT"
] | 6 | 2017-04-27T13:30:49.000Z | 2020-11-01T20:28:55.000Z | #!/usr/bin/env python
__author__ = "bt3"
''' Queue acts as a container for nodes (objects) that are inserted and removed according FIFO'''
if __name__ == '__main__':
queue = LinkedQueue()
print("Is the queue empty? ", queue.isEmpty())
print("Adding 0 to 10 in the queue...")
for i in range(10):
queue.enqueue(i)
print("Is the queue empty? ", queue.isEmpty())
queue._print()
print("Queue size: ", queue.size())
print("Queue peek : ", queue.peek())
print("Dequeue...", queue.dequeue())
print("Queue peek: ", queue.peek())
queue._print()
| 21.234568 | 97 | 0.547093 | #!/usr/bin/env python
__author__ = "bt3"
''' Queue acts as a container for nodes (objects) that are inserted and removed according FIFO'''
class Node(object):
def __init__(self, value=None, pointer=None):
self.value = value
self.pointer = None
class LinkedQueue(object):
def __init__(self):
self.head = None
self.tail = None
def isEmpty(self):
return not bool(self.head)
def dequeue(self):
if self.head:
value = self.head.value
self.head = self.head.pointer
return value
else:
print('Queue is empty, cannot dequeue.')
def enqueue(self, value):
node = Node(value)
if not self.head:
self.head = node
self.tail = node
else:
if self.tail:
self.tail.pointer = node
self.tail = node
def size(self):
node = self.head
num_nodes = 0
while node:
num_nodes += 1
node = node.pointer
return num_nodes
def peek(self):
return self.head.value
def _print(self):
node = self.head
while node:
print(node.value)
node = node.pointer
if __name__ == '__main__':
queue = LinkedQueue()
print("Is the queue empty? ", queue.isEmpty())
print("Adding 0 to 10 in the queue...")
for i in range(10):
queue.enqueue(i)
print("Is the queue empty? ", queue.isEmpty())
queue._print()
print("Queue size: ", queue.size())
print("Queue peek : ", queue.peek())
print("Dequeue...", queue.dequeue())
print("Queue peek: ", queue.peek())
queue._print()
| 854 | 3 | 260 |
132ae91e8cc24c13ceba95761dfe1777e9a63d5b | 814 | py | Python | Python/Python_Web_Development/FlaskMegaTutorial/microblog/migrations/versions/04f42852fee8_add_about_me_last_seen_fields_to_users_.py | nitin-cherian/LifeLongLearning | 84084792058358365162c645742c70064a2d5fd6 | [
"MIT"
] | 4 | 2018-01-19T17:15:06.000Z | 2018-01-24T00:06:42.000Z | Python/Python_Web_Development/FlaskMegaTutorial/microblog/migrations/versions/04f42852fee8_add_about_me_last_seen_fields_to_users_.py | nitin-cherian/LifeLongLearning | 84084792058358365162c645742c70064a2d5fd6 | [
"MIT"
] | null | null | null | Python/Python_Web_Development/FlaskMegaTutorial/microblog/migrations/versions/04f42852fee8_add_about_me_last_seen_fields_to_users_.py | nitin-cherian/LifeLongLearning | 84084792058358365162c645742c70064a2d5fd6 | [
"MIT"
] | null | null | null | """add about_me, last_seen fields to users table.
Revision ID: 04f42852fee8
Revises: 60ae9f5ea6d5
Create Date: 2018-03-27 06:43:46.542593
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '04f42852fee8'
down_revision = '60ae9f5ea6d5'
branch_labels = None
depends_on = None
| 26.258065 | 86 | 0.694103 | """add about_me, last_seen fields to users table.
Revision ID: 04f42852fee8
Revises: 60ae9f5ea6d5
Create Date: 2018-03-27 06:43:46.542593
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '04f42852fee8'
down_revision = '60ae9f5ea6d5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
| 436 | 0 | 46 |
e2508963f16fb9876ae5bcfdfbf8321d39c4f391 | 8,125 | py | Python | src/cam.py | FredrikM97/Medical-ROI | 54246341460c04caeced2ef6dcab984f6c260c9d | [
"Apache-2.0"
] | null | null | null | src/cam.py | FredrikM97/Medical-ROI | 54246341460c04caeced2ef6dcab984f6c260c9d | [
"Apache-2.0"
] | null | null | null | src/cam.py | FredrikM97/Medical-ROI | 54246341460c04caeced2ef6dcab984f6c260c9d | [
"Apache-2.0"
] | null | null | null | """
This module contain various types of functions/classes to access and generate CAM.
"""
import warnings
from typing import List, Tuple, Union
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import torch
import torchcam
import torchvision
from src.display.cmap import parula_map
from src.files import preprocess
from src.files.preprocess import image2axial, to_grid
from src.types.string import split_custom_filename
class CAM:
""" """
def __init__(self, model, cam_type=torchcam.cams.GradCAMpp,target_layer:str="model.layer4", cam_kwargs:dict={}):
"""
Parameters
----------
model :
cam_type :
(Default value = torchcam.cams.GradCAMpp)
target_layer : str
(Default value = "model.layer4")
cam_kwargs : dict
(Default value = {})
Returns
-------
"""
self._CLASSES=[0,1,2]
self.CAM_TYPE = cam_type
self.TARGET_LAYER = target_layer
self.model = model
self.extractor = cam_type(model, target_layer=target_layer, **cam_kwargs)
def class_score(self, input_image:'np.ndarray', device='cuda', input_shape=(79,95,79)) -> 'Tuple[torch.Tensor, int]':
"""Calculate the class scores and the highest probability of the target class
Args:
input_image('np.ndarray'):
device: (Default value = 'cuda')
input_shape: (Default value = (79,95,79))
Returns:
'Tuple[torch.Tensor,int]'': All the probabilities and the best probability class
Raises:
"""
image = preprocess.preprocess_image(input_image)
image = preprocess.batchisize_to_5D(image)
image_tensor = torch.from_numpy(image).float()
model = self.model.to(device).eval()
image_tensor = image_tensor.to(device)
# Check that image have the correct shape
assert tuple(image_tensor.shape) == (1, 1, *input_shape), f"Got image shape: {image_tensor.shape} expected: {(1, 1, *input_shape)}"
assert model.device == image_tensor.device, f"Model and image are not on same device: Model: {model.device} Image: {image_tensor.device}"
class_scores = model(image_tensor)
return class_scores, class_scores.squeeze(0).argmax().item()
def activations(self, class_idx:int=None, class_scores:'torch.Tensor'=None) -> 'np.ndarray':
"""Retrieve the map based on the score from the model
Args:
class_idx(int, optional): (Default value = None)
class_scores('torch.Tensor', optional): (Default value = None)
Returns:
np.ndarray: Tensor with activations from image with shape tensor[D,H,W]
Raises:
"""
return self.extractor(class_idx, class_scores, normalized=False).detach().cpu()
@staticmethod
def plot(images:list=[], masks:list=[], labels=[],cmap:list=parula_map, alpha:float=0.7, class_label:str=None, predicted_override:bool=None, architecture:str=None) -> plt.Figure:
"""Create a plot from the given class activation map and input image. CAM is calculated from the models weights and the probability distribution of each class.
Args:
images(list, optional): (Default value = [])
masks(list, optional): (Default value = [])
labels: (Default value = [])
cmap(list, optional): Color object (Default value = parula_map)
alpha(float, optional): int (Default value = 0.7)
class_label(str, optional): str (Default value = None)
predicted_override(bool, optional): Define if the cam class is overwritten (Default value = None)
architecture(str, optional): Name of the architecture provided to add in plot title (Default value = None)
Returns:
type: output (Figure): Figure reference to plot
Raises:
"""
#class_idx = class_idx if isinstance(class_idx, list) else [class_idx]
if (max_length :=len(masks)) > len(images):
pass
else:
max_length = len(images)
if max_length == 0:
raise ValueError("Number of images/masks cant be zero!")
fig, axes = plt.subplots(ncols=max_length,nrows=1,figsize=(max_length*8,8))
if max_length > 1:
# Add images
for i, image in enumerate(images):
im = axes[i].imshow(image,cmap='Greys_r', vmin=image.min(), vmax=image.max())
# Add masks
for i, mask in enumerate(masks):
im = axes[i].imshow(mask,cmap=cmap, alpha=alpha,vmin=mask.min(), vmax=mask.max())
else:
for i, image in enumerate(images):
im = axes.imshow(image,cmap='Greys_r', vmin=image.min(), vmax=image.max())
# Add masks
for i, mask in enumerate(masks):
im = axes.imshow(mask,cmap=cmap, alpha=alpha,vmin=mask.min(), vmax=mask.max())
# Add labels
classes = {
0:'CN',
1:'MCI',
2:'AD'
}
for i, label in enumerate(labels):
title_list = [out for out, con in [
(f'{architecture}',architecture),
#(f'{type(self.extractor).__name__}',True),
(f'Patient: {class_label}',class_label),
(f'Predicted: {classes[label]}',label),
(f'Overrided',predicted_override)] if con != None
]
if max_length > 1:
axes[i].set_title(', '.join(title_list))
else:
axes.set_title(', '.join(title_list))
if max_length > 1:
for a in axes.flatten():
a.set_axis_off()
a.set_xticklabels([])
a.set_yticklabels([])
else:
axes.set_axis_off()
axes.set_xticklabels([])
axes.set_yticklabels([])
# Remove axis data to show colorbar more clean
ax = axes.ravel().tolist() if max_length > 1 else axes
plt.subplots_adjust(wspace=0.01, hspace=0)
cbar = fig.colorbar(im, ax=ax, shrink=1)
return fig
@staticmethod
def get_cam(model, cam_type:'torchcam.cams.gradcam._GradCAM', input_shape:Tuple=(79,95,79),target_layer:str=None,CAM_kwargs:dict={}) -> 'torchcam.cams.gradcam._GradCAM':
"""Generate CAM object
Args:
model:
cam_type('torchcam.cams.gradcam._GradCAM'):
input_shape(Tuple, optional): (Default value = (79,95,79))
target_layer(str, optional): (Default value = None)
CAM_kwargs(dict, optional): (Default value = {})
Returns:
Raises:
"""
extractor = cam_type(model, input_shape=(1,*input_shape), target_layer=target_layer, **CAM_kwargs)
return extractor
@staticmethod
def average_image(images:list) -> 'torch.Tensor':
"""Calculate average over multiple images
Args:
images(list):
Returns:
Raises:
"""
return torch.mean(torch.stack(images), axis=0)
@staticmethod
def repeat_stack(image:'torch.Tensor', repeat:int=1, grid_kwargs:dict={}) -> 'torch.Tensor':
"""Repeat am image in a grid N number of times.
Args:
image('torch.Tensor'):
repeat(int, optional): (Default value = 1)
grid_kwargs(dict, optional): (Default value = {})
Returns:
Raises:
"""
return torch.stack([to_grid(image, **grid_kwargs)]*repeat)
@staticmethod
def preprocess(filename:str) -> np.ndarray:
"""Preprocess image to a valid format
Args:
filename(str):
Returns:
Raises:
"""
class_label = split_custom_filename(filename,'/')[4]
image = image2axial(nib.load(filename).get_fdata())
image[image <= 0]=0
image = preprocess.preprocess_image(image)
return image | 33.29918 | 182 | 0.586954 | """
This module contain various types of functions/classes to access and generate CAM.
"""
import warnings
from typing import List, Tuple, Union
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import torch
import torchcam
import torchvision
from src.display.cmap import parula_map
from src.files import preprocess
from src.files.preprocess import image2axial, to_grid
from src.types.string import split_custom_filename
class CAM:
""" """
def __init__(self, model, cam_type=torchcam.cams.GradCAMpp,target_layer:str="model.layer4", cam_kwargs:dict={}):
"""
Parameters
----------
model :
cam_type :
(Default value = torchcam.cams.GradCAMpp)
target_layer : str
(Default value = "model.layer4")
cam_kwargs : dict
(Default value = {})
Returns
-------
"""
self._CLASSES=[0,1,2]
self.CAM_TYPE = cam_type
self.TARGET_LAYER = target_layer
self.model = model
self.extractor = cam_type(model, target_layer=target_layer, **cam_kwargs)
def class_score(self, input_image:'np.ndarray', device='cuda', input_shape=(79,95,79)) -> 'Tuple[torch.Tensor, int]':
"""Calculate the class scores and the highest probability of the target class
Args:
input_image('np.ndarray'):
device: (Default value = 'cuda')
input_shape: (Default value = (79,95,79))
Returns:
'Tuple[torch.Tensor,int]'': All the probabilities and the best probability class
Raises:
"""
image = preprocess.preprocess_image(input_image)
image = preprocess.batchisize_to_5D(image)
image_tensor = torch.from_numpy(image).float()
model = self.model.to(device).eval()
image_tensor = image_tensor.to(device)
# Check that image have the correct shape
assert tuple(image_tensor.shape) == (1, 1, *input_shape), f"Got image shape: {image_tensor.shape} expected: {(1, 1, *input_shape)}"
assert model.device == image_tensor.device, f"Model and image are not on same device: Model: {model.device} Image: {image_tensor.device}"
class_scores = model(image_tensor)
return class_scores, class_scores.squeeze(0).argmax().item()
def activations(self, class_idx:int=None, class_scores:'torch.Tensor'=None) -> 'np.ndarray':
"""Retrieve the map based on the score from the model
Args:
class_idx(int, optional): (Default value = None)
class_scores('torch.Tensor', optional): (Default value = None)
Returns:
np.ndarray: Tensor with activations from image with shape tensor[D,H,W]
Raises:
"""
return self.extractor(class_idx, class_scores, normalized=False).detach().cpu()
@staticmethod
def plot(images:list=[], masks:list=[], labels=[],cmap:list=parula_map, alpha:float=0.7, class_label:str=None, predicted_override:bool=None, architecture:str=None) -> plt.Figure:
"""Create a plot from the given class activation map and input image. CAM is calculated from the models weights and the probability distribution of each class.
Args:
images(list, optional): (Default value = [])
masks(list, optional): (Default value = [])
labels: (Default value = [])
cmap(list, optional): Color object (Default value = parula_map)
alpha(float, optional): int (Default value = 0.7)
class_label(str, optional): str (Default value = None)
predicted_override(bool, optional): Define if the cam class is overwritten (Default value = None)
architecture(str, optional): Name of the architecture provided to add in plot title (Default value = None)
Returns:
type: output (Figure): Figure reference to plot
Raises:
"""
#class_idx = class_idx if isinstance(class_idx, list) else [class_idx]
if (max_length :=len(masks)) > len(images):
pass
else:
max_length = len(images)
if max_length == 0:
raise ValueError("Number of images/masks cant be zero!")
fig, axes = plt.subplots(ncols=max_length,nrows=1,figsize=(max_length*8,8))
if max_length > 1:
# Add images
for i, image in enumerate(images):
im = axes[i].imshow(image,cmap='Greys_r', vmin=image.min(), vmax=image.max())
# Add masks
for i, mask in enumerate(masks):
im = axes[i].imshow(mask,cmap=cmap, alpha=alpha,vmin=mask.min(), vmax=mask.max())
else:
for i, image in enumerate(images):
im = axes.imshow(image,cmap='Greys_r', vmin=image.min(), vmax=image.max())
# Add masks
for i, mask in enumerate(masks):
im = axes.imshow(mask,cmap=cmap, alpha=alpha,vmin=mask.min(), vmax=mask.max())
# Add labels
classes = {
0:'CN',
1:'MCI',
2:'AD'
}
for i, label in enumerate(labels):
title_list = [out for out, con in [
(f'{architecture}',architecture),
#(f'{type(self.extractor).__name__}',True),
(f'Patient: {class_label}',class_label),
(f'Predicted: {classes[label]}',label),
(f'Overrided',predicted_override)] if con != None
]
if max_length > 1:
axes[i].set_title(', '.join(title_list))
else:
axes.set_title(', '.join(title_list))
if max_length > 1:
for a in axes.flatten():
a.set_axis_off()
a.set_xticklabels([])
a.set_yticklabels([])
else:
axes.set_axis_off()
axes.set_xticklabels([])
axes.set_yticklabels([])
# Remove axis data to show colorbar more clean
ax = axes.ravel().tolist() if max_length > 1 else axes
plt.subplots_adjust(wspace=0.01, hspace=0)
cbar = fig.colorbar(im, ax=ax, shrink=1)
return fig
@staticmethod
def get_cam(model, cam_type:'torchcam.cams.gradcam._GradCAM', input_shape:Tuple=(79,95,79),target_layer:str=None,CAM_kwargs:dict={}) -> 'torchcam.cams.gradcam._GradCAM':
"""Generate CAM object
Args:
model:
cam_type('torchcam.cams.gradcam._GradCAM'):
input_shape(Tuple, optional): (Default value = (79,95,79))
target_layer(str, optional): (Default value = None)
CAM_kwargs(dict, optional): (Default value = {})
Returns:
Raises:
"""
extractor = cam_type(model, input_shape=(1,*input_shape), target_layer=target_layer, **CAM_kwargs)
return extractor
@staticmethod
def average_image(images:list) -> 'torch.Tensor':
"""Calculate average over multiple images
Args:
images(list):
Returns:
Raises:
"""
return torch.mean(torch.stack(images), axis=0)
@staticmethod
def repeat_stack(image:'torch.Tensor', repeat:int=1, grid_kwargs:dict={}) -> 'torch.Tensor':
"""Repeat am image in a grid N number of times.
Args:
image('torch.Tensor'):
repeat(int, optional): (Default value = 1)
grid_kwargs(dict, optional): (Default value = {})
Returns:
Raises:
"""
return torch.stack([to_grid(image, **grid_kwargs)]*repeat)
@staticmethod
def preprocess(filename:str) -> np.ndarray:
"""Preprocess image to a valid format
Args:
filename(str):
Returns:
Raises:
"""
class_label = split_custom_filename(filename,'/')[4]
image = image2axial(nib.load(filename).get_fdata())
image[image <= 0]=0
image = preprocess.preprocess_image(image)
return image | 0 | 0 | 0 |
69ef23366f317ce1c6e8f4704475e1dca0a6acea | 74 | py | Python | charing-be/test/test.py | YinhaoHe/84-Charing-Cross-Road | 358f195792938e06c25b8b6ed526b087f6752f0e | [
"MIT"
] | null | null | null | charing-be/test/test.py | YinhaoHe/84-Charing-Cross-Road | 358f195792938e06c25b8b6ed526b087f6752f0e | [
"MIT"
] | null | null | null | charing-be/test/test.py | YinhaoHe/84-Charing-Cross-Road | 358f195792938e06c25b8b6ed526b087f6752f0e | [
"MIT"
] | null | null | null | print "SELECT pair_email FROM users where email = %s" , '236294386@qq.com' | 74 | 74 | 0.743243 | print "SELECT pair_email FROM users where email = %s" , '236294386@qq.com' | 0 | 0 | 0 |
dbde4d25c1933c6a398c3ec1379b9a6c559bdb6a | 28,529 | py | Python | SynapticPlasticityModel/plotting_utils.py | baiydaavi/RL_models_with_choice_selective_sequences | 94cf48fef53c4db6ee22eb96bad7f882fb182411 | [
"Apache-2.0"
] | null | null | null | SynapticPlasticityModel/plotting_utils.py | baiydaavi/RL_models_with_choice_selective_sequences | 94cf48fef53c4db6ee22eb96bad7f882fb182411 | [
"Apache-2.0"
] | null | null | null | SynapticPlasticityModel/plotting_utils.py | baiydaavi/RL_models_with_choice_selective_sequences | 94cf48fef53c4db6ee22eb96bad7f882fb182411 | [
"Apache-2.0"
] | 1 | 2022-03-03T19:42:13.000Z | 2022-03-03T19:42:13.000Z | import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import statsmodels.api as sm
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator
class GenerateTestPlots:
"""Generates plots using data collected during model run."""
def __init__(self, results):
"""Initialize the plot generator.
Args:
results (dict): Model run data.
"""
self.step_time = results["step_time"]
self.times = results["times"]
self.high_prob_blocks = results["high_prob_blocks"]
self.choices = results["choices"]
self.rewarded_sides = results["rewarded_sides"]
self.rewarded_trials = results["rewarded_trials"]
self.values = results["values"]
self.RPEs = results["RPEs"]
self.stimulated_trials = results["stimulated_trials"]
self.right_decision_value = results["right_decision_value"]
self.left_decision_value = results["left_decision_value"]
self.NAc_activity = results["NAc_activity"]
self.peak_reward_times = results["peak_reward_times"]
| 37.538158 | 90 | 0.547688 | import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import statsmodels.api as sm
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator
class GenerateTestPlots:
"""Generates plots using data collected during model run."""
def __init__(self, results):
"""Initialize the plot generator.
Args:
results (dict): Model run data.
"""
self.step_time = results["step_time"]
self.times = results["times"]
self.high_prob_blocks = results["high_prob_blocks"]
self.choices = results["choices"]
self.rewarded_sides = results["rewarded_sides"]
self.rewarded_trials = results["rewarded_trials"]
self.values = results["values"]
self.RPEs = results["RPEs"]
self.stimulated_trials = results["stimulated_trials"]
self.right_decision_value = results["right_decision_value"]
self.left_decision_value = results["left_decision_value"]
self.NAc_activity = results["NAc_activity"]
self.peak_reward_times = results["peak_reward_times"]
def behavior(self, start_num_trials=500, num_tr=200, save=None):
print(f"reward rate = " f"{np.mean(self.rewarded_trials)}")
block_switches = np.where(np.diff(self.high_prob_blocks) != 0)[0] + 1
print(
f"mean block length ="
f"{np.mean(np.diff(block_switches))}"
"\u00B1"
f"{np.std(np.diff(block_switches))}"
)
end_num_trials = start_num_trials + num_tr
rew_side = self.choices * self.rewarded_trials
decision_value = self.left_decision_value - self.right_decision_value
max_decision_value = np.max(
np.abs(decision_value[start_num_trials:end_num_trials])
)
plt.figure(figsize=(16, 5))
plt.scatter(
np.arange(start_num_trials, end_num_trials),
self.high_prob_blocks[start_num_trials:end_num_trials] * 1.5,
s=40,
color="black",
label="high probability side",
)
plt.scatter(
np.arange(start_num_trials, end_num_trials),
self.choices[start_num_trials:end_num_trials] * 1.3,
s=20,
color="blue",
label="chosen side",
)
plt.scatter(
np.arange(start_num_trials, end_num_trials),
rew_side[start_num_trials:end_num_trials] * 1.1,
s=30 * np.abs(rew_side[start_num_trials:end_num_trials]),
color="green",
label="rewarded side",
)
plt.plot(
np.arange(start_num_trials, end_num_trials),
decision_value[start_num_trials:end_num_trials] / max_decision_value,
color="aqua",
label="value averaged over time",
)
fontP = FontProperties()
fontP.set_size("xx-large")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc="lower left",
ncol=3,
mode="expand",
borderaxespad=0.0,
prop=fontP,
)
plt.xlabel("Trial number", fontsize=20)
if save:
plt.savefig(save, bbox_inches="tight")
def stay_probability(self, mode=None, save=None):
if mode == "optogenetic":
unrewarded_trials = np.abs(self.rewarded_trials - 1)
next_choice = np.roll(self.choices, -1)
next_id = np.roll(self.stimulated_trials, -1)
norm_chosen_side = self.choices[next_id == 0]
norm_next_choice = next_choice[next_id == 0]
norm_rewarded_trials = self.rewarded_trials[next_id == 0]
norm_unrewarded_trials = unrewarded_trials[next_id == 0]
norm_return_rew = norm_rewarded_trials * (
(norm_next_choice == norm_chosen_side) * 1
)
norm_prob_rew_return = np.sum(norm_return_rew) / np.sum(norm_rewarded_trials)
norm_return_unrew = norm_unrewarded_trials * (
(norm_next_choice == norm_chosen_side) * 1
)
norm_prob_unrew_return = np.sum(norm_return_unrew) / np.sum(
norm_unrewarded_trials
)
opto_chosen_side = self.choices[next_id == 1]
opto_next_choice = next_choice[next_id == 1]
opto_rewarded_trials = self.rewarded_trials[next_id == 1]
opto_unrewarded_trials = unrewarded_trials[next_id == 1]
opto_return_rew = opto_rewarded_trials * (
(opto_next_choice == opto_chosen_side) * 1
)
opto_prob_rew_return = np.sum(opto_return_rew) / np.sum(opto_rewarded_trials)
opto_return_unrew = opto_unrewarded_trials * (
(opto_next_choice == opto_chosen_side) * 1
)
opto_prob_unrew_return = np.sum(opto_return_unrew) / np.sum(
opto_unrewarded_trials
)
fig = plt.figure(figsize=(9, 3))
plt.subplot(1, 2, 1)
plt.bar(
[1, 2, 4, 5],
[
norm_prob_rew_return,
opto_prob_rew_return,
norm_prob_unrew_return,
opto_prob_unrew_return,
],
width=0.6,
color=["k", "aqua", "k", "aqua"],
)
plt.xticks(
[1.5, 4.5], ["current trial\n reward", "current trial\n no reward"]
)
plt.ylim([0.5, 1.0])
plt.ylabel("probability of return")
norm_prob_rew_return = 0.0
norm_prob_unrew_return = 0.0
opto_prob_rew_return = 0.0
opto_prob_unrew_return = 0.0
unrewarded_trials = np.abs(self.rewarded_trials - 1)
next_choice = np.roll(self.choices, -1)
next_id = self.stimulated_trials
norm_chosen_side = self.choices[next_id == 0]
norm_next_choice = next_choice[next_id == 0]
norm_rewarded_trials = self.rewarded_trials[next_id == 0]
norm_unrewarded_trials = unrewarded_trials[next_id == 0]
norm_return_rew = norm_rewarded_trials * (
(norm_next_choice == norm_chosen_side) * 1
)
norm_prob_rew_return += np.sum(norm_return_rew) / np.sum(norm_rewarded_trials)
norm_return_unrew = norm_unrewarded_trials * (
(norm_next_choice == norm_chosen_side) * 1
)
norm_prob_unrew_return += np.sum(norm_return_unrew) / np.sum(
norm_unrewarded_trials
)
opto_chosen_side = self.choices[next_id == 1]
opto_next_choice = next_choice[next_id == 1]
opto_rewarded_trials = self.rewarded_trials[next_id == 1]
opto_unrewarded_trials = unrewarded_trials[next_id == 1]
opto_return_rew = opto_rewarded_trials * (
(opto_next_choice == opto_chosen_side) * 1
)
opto_prob_rew_return += np.sum(opto_return_rew) / np.sum(opto_rewarded_trials)
opto_return_unrew = opto_unrewarded_trials * (
(opto_next_choice == opto_chosen_side) * 1
)
opto_prob_unrew_return += np.sum(opto_return_unrew) / np.sum(
opto_unrewarded_trials
)
# fig = plt.figure(figsize=(3,3))
plt.subplot(1, 2, 2)
plt.bar(
[1, 2, 4, 5],
[
norm_prob_rew_return,
opto_prob_rew_return,
norm_prob_unrew_return,
opto_prob_unrew_return,
],
width=0.6,
color=["k", "aqua", "k", "aqua"],
)
plt.xticks(
[1.5, 4.5], ["previous trial\n reward", "previous trial\n no reward"]
)
plt.ylim([0.5, 1.0])
plt.ylabel("probability of return")
else:
prob_rew_return = 0.0
prob_unrew_return = 0.0
unrewarded_trials = (self.rewarded_trials - 1) * -1
next_choice = np.roll(self.choices, -1)
return_rew = self.rewarded_trials * ((next_choice == self.choices) * 1)
prob_rew_return += np.sum(return_rew) / np.sum(self.rewarded_trials)
return_unrew = unrewarded_trials * ((next_choice == self.choices) * 1)
prob_unrew_return += np.sum(return_unrew) / np.sum(unrewarded_trials)
plt.subplot(1, 2, 1)
plt.bar(
[1, 1.5],
[prob_rew_return, prob_unrew_return],
width=0.2,
color=["green", "red"],
)
plt.xticks([1, 1.5], ["previously\n rewarded", "previously\n unrewarded"])
plt.ylabel("probability of return")
plt.ylim([0.0, 1.1])
if save:
plt.savefig(save, bbox_inches="tight")
def choice_regression(self, mode=None, trials_back=11, save=None):
if mode == "optogenetic":
reward_mat = np.zeros((trials_back - 1, len(self.rewarded_trials)))
reward_vect = self.choices * self.rewarded_trials
for i in np.arange(1, trials_back):
reward_mat[i - 1, :] = np.roll(reward_vect, i)
# makes unreward matrix
unrewarded_trials = np.abs(self.rewarded_trials - 1)
unreward_mat = np.zeros((trials_back - 1, len(unrewarded_trials)))
unreward_vec = self.choices * unrewarded_trials
for i in np.arange(1, trials_back):
unreward_mat[i - 1, :] = np.roll(unreward_vec, i)
# makes laser matrix
laser_mat = np.zeros((trials_back - 1, len(self.rewarded_trials)))
for i in np.arange(1, trials_back):
laser_mat[i - 1, :] = np.roll(self.stimulated_trials, i)
y = self.choices
x = np.concatenate(
(
np.ones([1, len(y)]),
reward_mat,
unreward_mat,
reward_mat * laser_mat,
unreward_mat * laser_mat,
laser_mat,
),
axis=0,
)
y_new = np.asarray((y + 1) / 2, dtype=int)
log_reg = sm.Logit(y_new, x.T).fit()
# Plots regression
reward_coefs = log_reg.params[1:trials_back]
unreward_coefs = log_reg.params[trials_back : int(trials_back * 2 - 1)]
rewlaser_coefs = log_reg.params[
int(trials_back * 2 - 1) : int(trials_back * 3 - 2)
]
norewlaser_coefs = log_reg.params[
int(trials_back * 3 - 2) : int(trials_back * 4 - 3)
]
fig = plt.figure(figsize=(12, 6))
plt.plot(reward_coefs, "b", label="rewarded trials no stimulation")
plt.plot(unreward_coefs, "r", label="unrewarded trials no stimulation")
plt.plot(
reward_coefs + rewlaser_coefs,
linestyle="dotted",
color="b",
label="rewarded trials with stimulation",
)
plt.plot(
unreward_coefs + norewlaser_coefs,
linestyle="dotted",
color="r",
label="unrewarded trials with stimulation",
)
# plt.plot(laser_coefs,'k')
plt.axhline(y=0, linestyle="dotted", color="gray")
plt.xticks(
np.arange(0, trials_back - 1, 2),
[str(i) for i in np.arange(-1, -trials_back, -2)],
)
plt.xlabel("trials back")
plt.ylabel("regression coefficients")
plt.legend()
else:
reward_mat = np.zeros((trials_back - 1, len(self.rewarded_trials)))
reward_vect = self.choices * self.rewarded_trials
for i in np.arange(1, trials_back):
reward_mat[i - 1, :] = np.roll(reward_vect, i)
# makes unreward matrix
unrewarded_trials = np.abs(self.rewarded_trials - 1)
unreward_mat = np.zeros((trials_back - 1, len(unrewarded_trials)))
unreward_vec = self.choices * unrewarded_trials
for i in np.arange(1, trials_back):
unreward_mat[i - 1, :] = np.roll(unreward_vec, i)
y = self.choices
x = np.concatenate((np.ones([1, len(y)]), reward_mat, unreward_mat), axis=0)
y_new = np.asarray((y + 1) / 2, dtype=int)
log_reg = sm.Logit(y_new, x.T).fit()
fig = plt.figure(figsize=(10, 3))
plt.plot(
np.arange(0, trials_back - 1),
log_reg.params[1:trials_back],
color="blue",
linewidth=1,
label="model - rewarded trials",
)
plt.plot(
np.arange(0, trials_back - 1),
log_reg.params[trials_back:None],
color="red",
linewidth=1,
label="model - unrewarded trials",
)
plt.axhline(y=0, linestyle="dotted", color="gray")
plt.legend()
plt.xticks(
np.arange(0, trials_back - 1, 2),
[str(i) for i in np.arange(-1, -trials_back, -2)],
)
plt.xlabel("trials back")
plt.ylabel("regression coefficients")
if save:
plt.savefig(save, bbox_inches="tight")
def block_value_plot(self, save=None):
block_switches = np.where(np.diff(self.high_prob_blocks) != 0)[0] + 1
early_trials = block_switches[0 : len(block_switches) - 1]
middle_trials = early_trials + 4
late_trials = early_trials + 14
# block identity of each trial
block_iden = self.high_prob_blocks[early_trials]
# indeces for block and actual choice
left_left_early = early_trials[
(block_iden == 1) & (self.choices[early_trials] == 1)
]
left_right_early = early_trials[
(block_iden == 1) & (self.choices[early_trials] == -1)
]
right_right_early = early_trials[
(block_iden == -1) & (self.choices[early_trials] == -1)
]
right_left_early = early_trials[
(block_iden == -1) & (self.choices[early_trials] == 1)
]
left_left_middle = middle_trials[
(block_iden == 1) & (self.choices[middle_trials] == 1)
]
left_right_middle = middle_trials[
(block_iden == 1) & (self.choices[middle_trials] == -1)
]
right_right_middle = middle_trials[
(block_iden == -1) & (self.choices[middle_trials] == -1)
]
right_left_middle = middle_trials[
(block_iden == -1) & (self.choices[middle_trials] == 1)
]
left_left_late = late_trials[(block_iden == 1) & (self.choices[late_trials] == 1)]
left_right_late = late_trials[
(block_iden == 1) & (self.choices[late_trials] == -1)
]
right_right_late = late_trials[
(block_iden == -1) & (self.choices[late_trials] == -1)
]
right_left_late = late_trials[
(block_iden == -1) & (self.choices[late_trials] == 1)
]
fig = plt.figure(figsize=(6, 6))
plt.subplot(3, 2, 1)
plt.title("Left block")
val_trace_l = np.mean(self.values[left_left_early, :], axis=0)
val_trace_r = np.mean(self.values[left_right_early, :], axis=0)
val_sem_l = stats.sem(self.values[left_left_early, :], axis=0)
val_sem_r = stats.sem(self.values[left_right_early, :], axis=0)
plt.errorbar(
self.times,
val_trace_l,
val_sem_l,
color="blue",
ecolor="skyblue",
linewidth=1.0,
)
plt.errorbar(
self.times,
val_trace_r,
val_sem_r,
color="green",
ecolor="lime",
linewidth=1.0,
)
plt.ylim([-0.03, 0.22])
plt.subplot(3, 2, 2)
plt.title("Right block")
val_trace_l = np.mean(self.values[right_left_early, :], axis=0)
val_trace_r = np.mean(self.values[right_right_early, :], axis=0)
val_sem_l = stats.sem(self.values[right_left_early, :], axis=0)
val_sem_r = stats.sem(self.values[right_right_early, :], axis=0)
plt.errorbar(
self.times,
val_trace_l,
val_sem_l,
color="blue",
ecolor="skyblue",
linewidth=1.0,
)
plt.errorbar(
self.times,
val_trace_r,
val_sem_r,
color="green",
ecolor="lime",
linewidth=1.0,
)
plt.ylim([-0.03, 0.22])
plt.text(4, 0.19, "Trial 1", fontsize=15, color="k")
plt.subplot(3, 2, 3)
val_trace_l = np.mean(self.values[left_left_middle, :], axis=0)
val_trace_r = np.mean(self.values[left_right_middle, :], axis=0)
val_sem_l = stats.sem(self.values[left_left_middle, :], axis=0)
val_sem_r = stats.sem(self.values[left_right_middle, :], axis=0)
plt.errorbar(
self.times,
val_trace_l,
val_sem_l,
color="blue",
ecolor="skyblue",
linewidth=1.0,
)
plt.errorbar(
self.times,
val_trace_r,
val_sem_r,
color="green",
ecolor="lime",
linewidth=1.0,
)
plt.ylim([-0.03, 0.22])
plt.ylabel("self.values", fontsize=15)
plt.subplot(3, 2, 4)
val_trace_l = np.mean(self.values[right_left_middle, :], axis=0)
val_trace_r = np.mean(self.values[right_right_middle, :], axis=0)
val_sem_l = stats.sem(self.values[right_left_middle, :], axis=0)
val_sem_r = stats.sem(self.values[right_right_middle, :], axis=0)
plt.errorbar(
self.times,
val_trace_l,
val_sem_l,
color="blue",
ecolor="skyblue",
linewidth=1.0,
)
plt.errorbar(
self.times,
val_trace_r,
val_sem_r,
color="green",
ecolor="lime",
linewidth=1.0,
)
plt.ylim([-0.03, 0.22])
plt.text(4, 0.19, "Trial 5", fontsize=15, color="k")
plt.text(8, 0.15, "Left Press", fontsize=15, color="blue")
plt.text(8, 0.05, "Right Press", fontsize=15, color="green")
plt.subplot(3, 2, 5)
val_trace_l = np.mean(self.values[left_left_late, :], axis=0)
val_trace_r = np.mean(self.values[left_right_late, :], axis=0)
val_sem_l = stats.sem(self.values[left_left_late, :], axis=0)
val_sem_r = stats.sem(self.values[left_right_late, :], axis=0)
plt.errorbar(
self.times,
val_trace_l,
val_sem_l,
color="blue",
ecolor="skyblue",
linewidth=1.0,
)
plt.errorbar(
self.times,
val_trace_r,
val_sem_r,
color="green",
ecolor="lime",
linewidth=1.0,
)
plt.ylim([-0.03, 0.22])
plt.subplot(3, 2, 6)
val_trace_l = np.mean(self.values[right_left_late, :], axis=0)
val_trace_r = np.mean(self.values[right_right_late, :], axis=0)
val_sem_l = stats.sem(self.values[right_left_late, :], axis=0)
val_sem_r = stats.sem(self.values[right_right_late, :], axis=0)
plt.errorbar(
self.times,
val_trace_l,
val_sem_l,
color="blue",
ecolor="skyblue",
linewidth=1.0,
)
plt.errorbar(
self.times,
val_trace_r,
val_sem_r,
color="green",
ecolor="lime",
linewidth=1.0,
)
plt.ylim([-0.03, 0.22])
plt.text(4, 0.19, "Trial 15", fontsize=15, color="k")
plt.text(-4.9, -0.12, "Time(s)", fontsize=15)
if save:
plt.savefig(save, bbox_inches="tight")
def dopamine_regression(self, trials_back=6, save=None):
y_vec_cs = np.zeros(len(self.RPEs))
for i in range(len(self.RPEs)):
y_vec_cs[i] = np.mean(
self.RPEs[
i,
int(self.peak_reward_times[i]) : int(
self.peak_reward_times[i] + 1 / self.step_time
),
]
)
# makes x matrix of reward identity on previous trials
x_mat = np.zeros((trials_back, len(self.rewarded_trials)))
for i in np.arange(0, trials_back):
x_mat[i, :] = np.roll(self.rewarded_trials, i)
y = np.reshape(y_vec_cs, [len(y_vec_cs), 1])
x = np.concatenate((np.ones([1, len(y_vec_cs)]), x_mat), axis=0)
regresion_results = sm.OLS(y, x.T).fit()
plt.figure(figsize=(11, 4))
plt.title("Regressing DA activity at reward time against outcome", fontsize=20)
plt.scatter(np.arange(trials_back), regresion_results.params[1:None])
plt.axhline(y=0, linestyle="dashed", color="k")
plt.xlabel("trials back", fontsize=15)
plt.ylabel("regression coefficients", fontsize=15)
print(regresion_results.params[1:None])
if save:
plt.savefig(save, bbox_inches="tight")
def block_switch(self, trial_back=10, save=None):
switch_high = np.where(np.diff(self.high_prob_blocks) != 0)[0] + 1
early_trials = switch_high[0 : len(switch_high) - 1]
block_iden = self.high_prob_blocks[early_trials]
# finds times of left to right
block_switch = early_trials[block_iden == -1] - 1
time_window = np.arange(-trial_back, trial_back + 1)
r_choice_mat = np.zeros([len(block_switch), len(time_window)])
for i in np.arange(1, len(block_switch)):
r_choice_mat[i, :] = self.choices[time_window + block_switch[i]]
r_choice_mat[i, :] = (r_choice_mat[i, :] + 1) / 2
# same except right to left
block_switch = early_trials[block_iden == 1] - 1
time_window = np.arange(-trial_back, trial_back + 1)
l_choice_mat = np.zeros([len(block_switch), len(time_window)])
for i in np.arange(1, len(block_switch)):
l_choice_mat[i, :] = self.choices[time_window + block_switch[i]] * -1
l_choice_mat[i, :] = (l_choice_mat[i, :] + 1) / 2
final_choice_mat = np.concatenate([l_choice_mat, r_choice_mat], axis=0)
plot_trace = np.mean(final_choice_mat, axis=0)
sem_trace = stats.sem(final_choice_mat, axis=0)
plot_trace2 = np.mean(-1 * (final_choice_mat - 1), axis=0)
sem_trace2 = stats.sem(-1 * (final_choice_mat - 1), axis=0)
ax = plt.figure(figsize=(5, 6)).gca()
ax.axvline(x=0, linestyle="dotted", color="gray")
ax.errorbar(time_window, plot_trace, sem_trace)
ax.errorbar(time_window, plot_trace2, sem_trace2)
ax.set_ylim(0, 1)
ax.xaxis.set_major_locator(MaxNLocator(integer=True, min_n_ticks=10))
ax.text(
-10,
1.05,
"Blue - pre-switch \nhigh probability\nchoice ",
fontsize=15,
color="dodgerblue",
)
ax.text(
1,
1.05,
"Orange - post-switch \nhigh probability\nchoice ",
fontsize=15,
color="orange",
)
plt.xlabel("Trials from block switch")
plt.ylabel("p(choice)")
# plt.axhline(y=plot_trace[time_window==1],color='k',linestyle='dotted')
print(plot_trace[time_window == 1])
if save:
plt.savefig(save, bbox_inches="tight")
def rpe_plot(self, save=None):
rpe_rewarded = np.mean(self.RPEs[self.rewarded_trials == 1, :], axis=0)
rpe_unrewarded = np.mean(self.RPEs[self.rewarded_trials == 0, :], axis=0)
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plt.title("rewarded trials", fontsize=15)
plt.plot(self.times, rpe_rewarded, color="green")
plt.ylim(-0.4, 1)
plt.xlabel("time", fontsize=20)
plt.ylabel("RPE", fontsize=20)
plt.subplot(1, 2, 2)
plt.title("unrewarded trials", fontsize=15)
plt.plot(self.times, rpe_unrewarded, color="grey")
plt.ylim(-0.4, 1)
plt.xlabel("time", fontsize=20)
if save:
plt.savefig(save, bbox_inches="tight")
def plot_NAc_activity(self, max_heatmap_val=0.005, save=None):
block_switches = np.where(np.diff(self.high_prob_blocks) != 0)[0] + 1
early_trials = block_switches[0 : len(block_switches) - 1]
middle_trials = early_trials + 4
late_trials = early_trials + 14
# block identity of each trial
block_iden = self.high_prob_blocks[early_trials]
# indeces for block and actual choice
left_left_early = early_trials[
(block_iden == 1) & (self.choices[early_trials] == 1)
]
left_right_early = early_trials[
(block_iden == 1) & (self.choices[early_trials] == -1)
]
right_right_early = early_trials[
(block_iden == -1) & (self.choices[early_trials] == -1)
]
right_left_early = early_trials[
(block_iden == -1) & (self.choices[early_trials] == 1)
]
left_left_middle = middle_trials[
(block_iden == 1) & (self.choices[middle_trials] == 1)
]
left_right_middle = middle_trials[
(block_iden == 1) & (self.choices[middle_trials] == -1)
]
right_right_middle = middle_trials[
(block_iden == -1) & (self.choices[middle_trials] == -1)
]
right_left_middle = middle_trials[
(block_iden == -1) & (self.choices[middle_trials] == 1)
]
left_left_late = late_trials[(block_iden == 1) & (self.choices[late_trials] == 1)]
left_right_late = late_trials[
(block_iden == 1) & (self.choices[late_trials] == -1)
]
right_right_late = late_trials[
(block_iden == -1) & (self.choices[late_trials] == -1)
]
right_left_late = late_trials[
(block_iden == -1) & (self.choices[late_trials] == 1)
]
NAc_heatmap = np.zeros(
(12, self.NAc_activity.shape[1], self.NAc_activity.shape[2])
)
NAc_heatmap[0, :, :] = np.mean(self.NAc_activity[left_left_early, :, :], axis=0)
NAc_heatmap[1, :, :] = np.mean(self.NAc_activity[left_right_early, :, :], axis=0)
NAc_heatmap[2, :, :] = np.mean(self.NAc_activity[right_left_early, :, :], axis=0)
NAc_heatmap[3, :, :] = np.mean(self.NAc_activity[right_right_early, :, :], axis=0)
NAc_heatmap[4, :, :] = np.mean(self.NAc_activity[left_left_middle, :, :], axis=0)
NAc_heatmap[5, :, :] = np.mean(self.NAc_activity[left_right_middle, :, :], axis=0)
NAc_heatmap[6, :, :] = np.mean(self.NAc_activity[right_left_middle, :, :], axis=0)
NAc_heatmap[7, :, :] = np.mean(
self.NAc_activity[right_right_middle, :, :], axis=0
)
NAc_heatmap[8, :, :] = np.mean(self.NAc_activity[left_left_late, :, :], axis=0)
NAc_heatmap[9, :, :] = np.mean(self.NAc_activity[left_right_late, :, :], axis=0)
NAc_heatmap[10, :, :] = np.mean(self.NAc_activity[right_left_late, :, :], axis=0)
NAc_heatmap[11, :, :] = np.mean(self.NAc_activity[right_right_late, :, :], axis=0)
fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(20, 15))
for i, ax in enumerate(axes.flat):
im = ax.imshow(
NAc_heatmap[i, :, :],
extent=[self.times[0], self.times[-1], self.NAc_activity.shape[1], 0],
aspect="auto",
cmap=cm.cividis,
vmin=0.0,
vmax=max_heatmap_val,
)
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.95, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
if save:
plt.savefig(save, bbox_inches="tight")
| 27,159 | 0 | 216 |
64580f2692073985264822164567bb614d8a4c4c | 1,769 | py | Python | programing/geeksforgeeks.py | meobach/Algorithms | 3e502132924ac9e647216bfef5b5f105e8c09b5b | [
"MIT"
] | null | null | null | programing/geeksforgeeks.py | meobach/Algorithms | 3e502132924ac9e647216bfef5b5f105e8c09b5b | [
"MIT"
] | null | null | null | programing/geeksforgeeks.py | meobach/Algorithms | 3e502132924ac9e647216bfef5b5f105e8c09b5b | [
"MIT"
] | null | null | null | # arr=Arr([2,1,3,4,5,6,1,1,6,2,4,5,3,1])
# print(arr.reverse())
arr=Arr([3, -5,-1])
print(arr.minimize_flip_k_length_subarr([1,2,3]))
| 29.483333 | 57 | 0.520068 | class Arr:
def __init__(self,item) -> None:
self.length=len(item)
self.arr=item
def reverse(self):
for i in range(int(self.length/2)):
temp=self.arr[i]
self.arr[i]=self.arr[self.length-i-1]
self.arr[self.length-i-1]=temp
return self.arr
def prefix_sum_circular_arr(self):
circular_arr=self.arr+self.arr
prefix_sum=[]
current_sum=0
prefix_residual=0
for i in range(self.length*2):
current_sum+=circular_arr[i]
prefix_sum.append(current_sum)
count=0
print(prefix_sum)
i=0
while(i<self.length):
for j in range(i,i+self.length):
if(count==self.length):
return i
else:
if(prefix_sum[j]-prefix_residual>=0):
count+=1
else:
prefix_residual=prefix_sum[j]
count=0
i=j+1
break
return -1
def maximum_product_length_string(self,str):
return str
def minimize_flip_k_length_subarr(self,arr):
return arr
def num_sub_arr_even_product(self,arr):
#for i in range(len(arr)):
return arr
#print(arr)
#return prefix_sum
# arr=Arr([2,1,3,4,5,6,1,1,6,2,4,5,3,1])
# print(arr.reverse())
class Number:
def __init__(self,number) -> None:
self.num=number
def sum_digit(self):
my_num=self.num
result=0
while(my_num!=0):
result+=my_num%10
my_num=int(my_num/10)
return result
arr=Arr([3, -5,-1])
print(arr.minimize_flip_k_length_subarr([1,2,3]))
| 1,402 | -19 | 252 |
1e67bd8da46926fc9941be5fbf374117f1a2b1eb | 1,159 | py | Python | old code/confusion.py | dll-ncai/AI-ForestWatch | 54c4090abfd98c5e6bc0e2a867f508f57656e698 | [
"MIT"
] | 2 | 2021-12-05T11:57:02.000Z | 2021-12-12T20:56:48.000Z | old code/confusion.py | dll-ncai/AI-ForestWatch | 54c4090abfd98c5e6bc0e2a867f508f57656e698 | [
"MIT"
] | null | null | null | old code/confusion.py | dll-ncai/AI-ForestWatch | 54c4090abfd98c5e6bc0e2a867f508f57656e698 | [
"MIT"
] | 2 | 2021-11-12T15:15:09.000Z | 2021-12-05T11:57:04.000Z | # Copyright (c) 2021, Technische Universität Kaiserslautern (TUK) & National University of Sciences and Technology (NUST).
# All rights reserved.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from __future__ import division
import sys
import pickle
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
if __name__ == '__main__':
confusion(file_path=sys.argv[1])
| 30.5 | 122 | 0.666954 | # Copyright (c) 2021, Technische Universität Kaiserslautern (TUK) & National University of Sciences and Technology (NUST).
# All rights reserved.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from __future__ import division
import sys
import pickle
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
def confusion(file_path):
# all_labels = ['{}'.format(x) for x in range(3)]
all_labels = ['noise', 'forest', 'not-forest']
with open(file_path, 'rb') as this:
matrix = pickle.load(this)
print(matrix)
# matrix = [[9.99424875e-01, 0.00000000e+00, 5.75103273e-04],
# [0.00000000e+00, 8.61850142e-01, 1.38149858e-01],
# [8.33658269e-05, 2.07877174e-01, 7.92039454e-01]]
# df_cm = pd.DataFrame(matrix, index=[i for i in all_labels],
# columns=[i for i in all_labels])
# plt.figure(figsize=(10,7))
# sn.heatmap(df_cm, cmap='BuPu', annot=True)
# plt.show()
if __name__ == '__main__':
confusion(file_path=sys.argv[1])
| 627 | 0 | 23 |
16e53805572b8113ae7560f4666939311a8abbfc | 2,218 | py | Python | app/chord.py | roi3363/music-pytheory | 53cc3c4f0025acef75411ff85e1ce365912267be | [
"MIT"
] | 2 | 2019-09-24T09:41:37.000Z | 2020-07-26T20:21:05.000Z | app/chord.py | roi3363/music-pytheory | 53cc3c4f0025acef75411ff85e1ce365912267be | [
"MIT"
] | null | null | null | app/chord.py | roi3363/music-pytheory | 53cc3c4f0025acef75411ff85e1ce365912267be | [
"MIT"
] | null | null | null | import data.music_theory as music_data
from app.note import Note
from app.base import Base
class Chord(Base):
"""
"""
def invert_chord(self, chord, inversion = 'root'):
"""
Args:
chord (list): The notes of the chord
inversion (str): The inversion name (root, first, second; third for 4-note chords)
Returns:
list: The notes of the inverted chord
"""
if inversion == 'root':
return chord
elif len(chord) == 3:
if inversion == 'first':
return chord[1], chord[2], chord[0]
elif inversion == 'second':
return chord[2], chord[0], chord[1]
elif len(chord) == 4:
if inversion == 'first':
return chord[1], chord[2], chord[3], chord[0]
elif inversion == 'second':
return chord[2], chord[3], chord[0], chord[1]
elif inversion == 'third':
return chord[3], chord[0], chord[1], chord[2]
return chord
| 34.65625 | 94 | 0.5789 | import data.music_theory as music_data
from app.note import Note
from app.base import Base
class Chord(Base):
"""
"""
def __init__(self, root, quality ='major', interval = None, inversion = None):
self.validate_arguments()
super().__init__()
self._root = Note(root)
self._quality = quality
self._interval = interval
self._inversion = inversion
quality_name = music_data.chord_qualities[self._quality]
self.name = f'{self._root}{quality_name}'
if self._interval:
self.name += f'{self._interval}'
chrom_scale = self.get_chromatic_scale(self._root.accidental) * 2
root_index = chrom_scale.index(self._root)
chrom_scale = chrom_scale[root_index:]
if self._interval:
chord_degrees = music_data.chords_types[f'{self._quality}{self._interval}']
else:
chord_degrees = music_data.chords_types[f'{self._quality}']
chord_notes = [chrom_scale[x] for x in chord_degrees]
if self._inversion:
self.chord_notes = self.invert_chord(chord_notes, self._inversion)
else:
self.chord_notes = chord_notes
def invert_chord(self, chord, inversion = 'root'):
"""
Args:
chord (list): The notes of the chord
inversion (str): The inversion name (root, first, second; third for 4-note chords)
Returns:
list: The notes of the inverted chord
"""
if inversion == 'root':
return chord
elif len(chord) == 3:
if inversion == 'first':
return chord[1], chord[2], chord[0]
elif inversion == 'second':
return chord[2], chord[0], chord[1]
elif len(chord) == 4:
if inversion == 'first':
return chord[1], chord[2], chord[3], chord[0]
elif inversion == 'second':
return chord[2], chord[3], chord[0], chord[1]
elif inversion == 'third':
return chord[3], chord[0], chord[1], chord[2]
return chord
def __repr__(self):
return self.name
def validate_arguments(self):
pass
| 1,082 | 0 | 80 |
27919122a1a37252cf66cbce135362572c292d99 | 1,341 | py | Python | lineflow/datasets/small_parallel_enja.py | sobamchan/lineflow | 708a875c090b7df48c9eca3f630915a9c6e5bbd6 | [
"MIT"
] | null | null | null | lineflow/datasets/small_parallel_enja.py | sobamchan/lineflow | 708a875c090b7df48c9eca3f630915a9c6e5bbd6 | [
"MIT"
] | null | null | null | lineflow/datasets/small_parallel_enja.py | sobamchan/lineflow | 708a875c090b7df48c9eca3f630915a9c6e5bbd6 | [
"MIT"
] | null | null | null | from lineflow.download import cached_download
from lineflow.datasets import Seq2SeqDataset
TRAIN_EN_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/train.en'
TRAIN_JA_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/train.ja'
DEV_EN_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/dev.en'
DEV_JA_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/dev.ja'
TEST_EN_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/test.en'
TEST_JA_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/test.ja'
| 44.7 | 110 | 0.722595 | from lineflow.download import cached_download
from lineflow.datasets import Seq2SeqDataset
TRAIN_EN_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/train.en'
TRAIN_JA_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/train.ja'
DEV_EN_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/dev.en'
DEV_JA_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/dev.ja'
TEST_EN_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/test.en'
TEST_JA_URL = 'https://raw.githubusercontent.com/odashi/small_parallel_enja/master/test.ja'
class SmallParallelEnJa(Seq2SeqDataset):
def __init__(self, split: str = 'train') -> None:
if split == 'train':
en_path = cached_download(TRAIN_EN_URL)
ja_path = cached_download(TRAIN_JA_URL)
elif split == 'dev':
en_path = cached_download(DEV_EN_URL)
ja_path = cached_download(DEV_JA_URL)
elif split == 'test':
en_path = cached_download(TEST_EN_URL)
ja_path = cached_download(TEST_JA_URL)
else:
raise ValueError(f"only 'train', 'dev' and 'test' are valid for 'split', but '{split}' is given.")
super().__init__(source_file_path=en_path, target_file_path=ja_path)
| 625 | 19 | 49 |
c76a98f0436678fb21037c006fad63fbc11d9a00 | 284 | py | Python | src/uykfe/sequence/simple/simple.py | Robbt/uykfe | c4b3d2a49ce14b2b6847a4b59bd55201c62ea8c3 | [
"BSD-2-Clause"
] | null | null | null | src/uykfe/sequence/simple/simple.py | Robbt/uykfe | c4b3d2a49ce14b2b6847a4b59bd55201c62ea8c3 | [
"BSD-2-Clause"
] | null | null | null | src/uykfe/sequence/simple/simple.py | Robbt/uykfe | c4b3d2a49ce14b2b6847a4b59bd55201c62ea8c3 | [
"BSD-2-Clause"
] | null | null | null |
from uykfe.sequence.db import DbControl
| 28.4 | 90 | 0.704225 |
from uykfe.sequence.db import DbControl
class SimpleControl(DbControl):
def weighted_artists(self, state, track):
for (weight, artist) in super(SimpleControl, self).weighted_artists(state, track):
yield (weight * self._unplayed(state, artist), artist)
| 178 | 10 | 54 |
1e128480da9319aae5d4a027db4f8bdc82c64ae1 | 251 | py | Python | tests/whoosh_tests/tests/__init__.py | speedplane/django-haystack | 4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96 | [
"BSD-3-Clause"
] | 1 | 2017-10-12T14:25:06.000Z | 2017-10-12T14:25:06.000Z | tests/whoosh_tests/tests/__init__.py | speedplane/django-haystack | 4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96 | [
"BSD-3-Clause"
] | 1 | 2016-08-03T18:01:43.000Z | 2016-08-03T18:03:00.000Z | tests/whoosh_tests/tests/__init__.py | speedplane/django-haystack | 4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96 | [
"BSD-3-Clause"
] | null | null | null | import warnings
warnings.simplefilter('ignore', Warning)
from whoosh_tests.tests.test_forms import *
from whoosh_tests.tests.test_inputs import *
from whoosh_tests.tests.test_whoosh_query import *
from whoosh_tests.tests.test_whoosh_backend import *
| 31.375 | 52 | 0.844622 | import warnings
warnings.simplefilter('ignore', Warning)
from whoosh_tests.tests.test_forms import *
from whoosh_tests.tests.test_inputs import *
from whoosh_tests.tests.test_whoosh_query import *
from whoosh_tests.tests.test_whoosh_backend import *
| 0 | 0 | 0 |
6e542c21b275803ac419ea5e1f0ca19c451e8a0b | 407 | py | Python | clientmanager/migrations/0003_document_status.py | trebla64/django-app-ff | 4615e4cd23e9d9b6745533a8f60ffab8ff5215b6 | [
"CC0-1.0"
] | null | null | null | clientmanager/migrations/0003_document_status.py | trebla64/django-app-ff | 4615e4cd23e9d9b6745533a8f60ffab8ff5215b6 | [
"CC0-1.0"
] | null | null | null | clientmanager/migrations/0003_document_status.py | trebla64/django-app-ff | 4615e4cd23e9d9b6745533a8f60ffab8ff5215b6 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-06 08:15
from django.db import migrations, models
| 21.421053 | 69 | 0.60688 | # Generated by Django 3.2.8 on 2021-10-06 08:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clientmanager', '0002_client_manager'),
]
operations = [
migrations.AddField(
model_name='document',
name='status',
field=models.CharField(default='pending', max_length=64),
),
]
| 0 | 293 | 23 |
881799f5680f2888d83b164e8bc97741ebb51dec | 43 | py | Python | Chapter16/cf_rfem_hist_price/venv/lib/python3.6/locale.py | wtwong316/Mastering-Elasticsearch-7.0 | 8e88f938c9feb201649bd23c4d517bc6af93fbaa | [
"MIT"
] | 25 | 2019-03-08T01:03:03.000Z | 2022-02-14T17:38:32.000Z | Chapter16/cf_rfem_hist_price/venv/lib/python3.6/locale.py | wtwong316/Mastering-Elasticsearch-7.0 | 8e88f938c9feb201649bd23c4d517bc6af93fbaa | [
"MIT"
] | 2 | 2019-02-15T17:34:37.000Z | 2019-07-11T14:37:02.000Z | Chapter16/cf_rfem_hist_price/venv/lib/python3.6/locale.py | wtwong316/Mastering-Elasticsearch-7.0 | 8e88f938c9feb201649bd23c4d517bc6af93fbaa | [
"MIT"
] | 31 | 2019-01-15T20:16:50.000Z | 2022-03-01T05:47:38.000Z | /home/wai/anaconda3/lib/python3.6/locale.py | 43 | 43 | 0.813953 | /home/wai/anaconda3/lib/python3.6/locale.py | 0 | 0 | 0 |
e0b5c22d429a097b1237ab5e3fce5fb295657cd1 | 143 | py | Python | nose2/tests/functional/support/scenario/tests_in_package/setup.py | ltfish/nose2 | e47363dad10056cf906daf387613c21d74f37e56 | [
"BSD-2-Clause"
] | null | null | null | nose2/tests/functional/support/scenario/tests_in_package/setup.py | ltfish/nose2 | e47363dad10056cf906daf387613c21d74f37e56 | [
"BSD-2-Clause"
] | null | null | null | nose2/tests/functional/support/scenario/tests_in_package/setup.py | ltfish/nose2 | e47363dad10056cf906daf387613c21d74f37e56 | [
"BSD-2-Clause"
] | 1 | 2019-11-24T12:11:52.000Z | 2019-11-24T12:11:52.000Z | from setuptools import setup, find_packages
setup(name='pkg1',
packages=find_packages(),
test_suite='nose2.collector.collector')
| 20.428571 | 45 | 0.734266 | from setuptools import setup, find_packages
setup(name='pkg1',
packages=find_packages(),
test_suite='nose2.collector.collector')
| 0 | 0 | 0 |
62257ae4a782556aa3adeb2c6ac9c25afeee0495 | 2,575 | py | Python | 20211217/sort_contours.py | sgzqc/wechat | 6589915c46b8f51d28dba61c6da9702821f5b47c | [
"MIT"
] | 8 | 2021-12-02T10:01:55.000Z | 2022-03-18T13:00:18.000Z | 20211217/sort_contours.py | sgzqc/wechat | 6589915c46b8f51d28dba61c6da9702821f5b47c | [
"MIT"
] | null | null | null | 20211217/sort_contours.py | sgzqc/wechat | 6589915c46b8f51d28dba61c6da9702821f5b47c | [
"MIT"
] | 10 | 2021-10-01T23:38:15.000Z | 2022-03-21T05:04:07.000Z | import numpy as np
import argparse
import cv2
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=False, help='path to the image',default='./image/lego_blocks_1.png')
ap.add_argument('-m', '--method', required=False, help='Sorting method',default='left-to-right')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
accumEdged = np.zeros(image.shape[:2], dtype='uint8')
for chan in cv2.split(image):
chan = cv2.medianBlur(chan, 11)
edged = cv2.Canny(chan, 50, 200)
accumEdged = cv2.bitwise_or(accumEdged, edged)
# find contours and keep the largest ones
cnts = cv2.findContours(accumEdged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
orig = image.copy()
# unsorted
for (i, c) in enumerate(cnts):
orig = draw_contour(orig, c, i)
cv2.imshow('Unsorted', orig)
# sorted
(cnts, boundingboxes) = sort_contours(cnts, method=args['method'])
image2 = image.copy()
for (i, c) in enumerate(cnts):
image2 = draw_contour(image2, c, i)
cv2.imshow('Sorted', image2)
out = cv2.hconcat([image,orig,image2])
cv2.imwrite("./result/out1.jpg",out)
cv2.waitKey(0)
| 31.024096 | 114 | 0.632621 | import numpy as np
import argparse
import cv2
def grab_contours(cnts):
# if the length the contours tuple returned by cv2.findContours
# is '2' then we are using either OpenCV v2.4, v4-beta, or
# v4-official
if len(cnts) == 2:
cnts = cnts[0]
# if the length of the contours tuple is '3' then we are using
# either OpenCV v3, v4-pre, or v4-alpha
elif len(cnts) == 3:
cnts = cnts[1]
return cnts
def sort_contours(cnts, method='left-to-right'):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if sort in reverse
if method == 'right-to-left' or method == 'bottom-to-top':
reverse = True
# handle if sort against y rather than x of the bounding box
if method == 'bottom-to-top' or method == 'top-to-bottom':
i = 1
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key=lambda b: b[1][i], reverse=reverse))
return (cnts, boundingBoxes)
def draw_contour(image, c, i):
M = cv2.moments(c)
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
cv2.putText(image, '#{}'.format(i + 1), (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
return image
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=False, help='path to the image',default='./image/lego_blocks_1.png')
ap.add_argument('-m', '--method', required=False, help='Sorting method',default='left-to-right')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
accumEdged = np.zeros(image.shape[:2], dtype='uint8')
for chan in cv2.split(image):
chan = cv2.medianBlur(chan, 11)
edged = cv2.Canny(chan, 50, 200)
accumEdged = cv2.bitwise_or(accumEdged, edged)
# find contours and keep the largest ones
cnts = cv2.findContours(accumEdged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
orig = image.copy()
# unsorted
for (i, c) in enumerate(cnts):
orig = draw_contour(orig, c, i)
cv2.imshow('Unsorted', orig)
# sorted
(cnts, boundingboxes) = sort_contours(cnts, method=args['method'])
image2 = image.copy()
for (i, c) in enumerate(cnts):
image2 = draw_contour(image2, c, i)
cv2.imshow('Sorted', image2)
out = cv2.hconcat([image,orig,image2])
cv2.imwrite("./result/out1.jpg",out)
cv2.waitKey(0)
| 1,165 | 0 | 69 |
fe75f8645b99e6a9ba1b849178eb856864f1527c | 1,025 | py | Python | GUIforINFO.py | freshicet/Home_Depot | 91379144daeef7548bb7976379766df7ec90b383 | [
"Unlicense",
"MIT"
] | null | null | null | GUIforINFO.py | freshicet/Home_Depot | 91379144daeef7548bb7976379766df7ec90b383 | [
"Unlicense",
"MIT"
] | null | null | null | GUIforINFO.py | freshicet/Home_Depot | 91379144daeef7548bb7976379766df7ec90b383 | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Small GUI for getting all the info for the Home Depot survey.
from tkinter import *
master = Tk()
Label(master, text="First Name").grid(row=0)
Label(master, text="Last Name").grid(row=1)
Label(master, text="E-mail").grid(row=2)
Label(master, text="Zip Code").grid(row=3)
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
e3.grid(row=2, column=1)
e4.grid(row=3, column=1)
Button(master, text='Quit', command=master.quit).grid(
row=7, column=0, sticky=W, pady=4)
Button(master, text='Enter', command=show_entry_fields).grid(
row=8, column=1, sticky=W, pady=4)
mainloop()
| 22.777778 | 63 | 0.650732 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Small GUI for getting all the info for the Home Depot survey.
from tkinter import *
def show_entry_fields():
First_Name = e1.get()
Last_Name = e2.get()
email = e3.get()
zip_Code = e4.get()
file = open('File_INFO.txt', 'w')
file.write(First_Name + '\n')
file.write(Last_Name + '\n')
file.write(email + '\n')
file.write(zip_Code)
file.close()
master.destroy()
master = Tk()
Label(master, text="First Name").grid(row=0)
Label(master, text="Last Name").grid(row=1)
Label(master, text="E-mail").grid(row=2)
Label(master, text="Zip Code").grid(row=3)
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
e3.grid(row=2, column=1)
e4.grid(row=3, column=1)
Button(master, text='Quit', command=master.quit).grid(
row=7, column=0, sticky=W, pady=4)
Button(master, text='Enter', command=show_entry_fields).grid(
row=8, column=1, sticky=W, pady=4)
mainloop()
| 296 | 0 | 23 |
4c62d89701ad7d468174036c7f208033dd9bfb01 | 873 | py | Python | web-server/plugins/slycat-blob-parser.py | sandialabs/slycat | efbb91ce4ea4da08b58399eb820f6d6987408e44 | [
"BSD-3-Clause"
] | 66 | 2015-01-06T20:53:50.000Z | 2022-03-30T17:25:04.000Z | web-server/plugins/slycat-blob-parser.py | agentdavidjoseph/slycat | 690e1cb07a6fa990d7206265e18edb22ae3f62e7 | [
"BSD-3-Clause"
] | 782 | 2015-01-05T15:42:30.000Z | 2022-03-11T03:27:52.000Z | web-server/plugins/slycat-blob-parser.py | agentdavidjoseph/slycat | 690e1cb07a6fa990d7206265e18edb22ae3f62e7 | [
"BSD-3-Clause"
] | 23 | 2015-04-29T08:16:27.000Z | 2022-01-05T23:21:54.000Z | # Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
import numpy
import slycat.web.server
| 39.681818 | 117 | 0.749141 | # Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
import numpy
import slycat.web.server
def parse(database, model, input, files, aids, **kwargs):
if len(files) != len(aids):
cherrypy.log.error("slycat-blob-parser.py parse", "Number of files and artifact ids must match.")
raise Exception("Number of files and artifact ids must match.")
content_type = kwargs.get("content-type", "application/octet-stream")
for file, aid in zip(files, aids):
slycat.web.server.put_model_file(database, model, aid, file, content_type, input)
def register_slycat_plugin(context):
context.register_parser("slycat-blob-parser", "Binary Files", [], parse)
| 523 | 0 | 46 |
8c6d306387daa7e43907a9bed19723fe958470fa | 1,087 | py | Python | proteus/tests/MeshAdaptPUMI/test_meshLoad.py | burgreen/proteus | 033bbd3fd0ff11d53d8e85b2da1af49e10af9c5d | [
"MIT"
] | null | null | null | proteus/tests/MeshAdaptPUMI/test_meshLoad.py | burgreen/proteus | 033bbd3fd0ff11d53d8e85b2da1af49e10af9c5d | [
"MIT"
] | null | null | null | proteus/tests/MeshAdaptPUMI/test_meshLoad.py | burgreen/proteus | 033bbd3fd0ff11d53d8e85b2da1af49e10af9c5d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from ctypes import *
from proteus import MeshTools
from proteus import cmeshTools
from proteus.MeshAdaptPUMI import MeshAdaptPUMI
from nose.tools import eq_ as eq
from nose.tools import ok_ as ok
import os
def test_meshLoadPUMI(verbose=0):
"""Test to load serial PUMI model and mesh"""
testDir=os.path.dirname(os.path.abspath(__file__))
cubeMdl=testDir + '/cube.dmg'
cube670p1=testDir + '/cube.smb'
meshAdaptInstance = MeshAdaptPUMI.MeshAdaptPUMI()
meshAdaptInstance.loadModelAndMesh(cubeMdl, cube670p1)
mesh = MeshTools.TetrahedralMesh()
mesh.cmesh = cmeshTools.CMesh()
meshAdaptInstance.constructFromSerialPUMIMesh(mesh.cmesh)
cmeshTools.allocateGeometricInfo_tetrahedron(mesh.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(mesh.cmesh)
mesh.buildFromC(mesh.cmesh)
eq(mesh.nElements_global,670)
eq(mesh.nNodes_global,190)
eq(mesh.nEdges_global,977)
eq(mesh.nElementBoundaries_global,1458)
if __name__ == '__main__':
import nose
nose.main(defaultTest='test_meshLoad:test_meshLoadPUMI')
| 33.96875 | 61 | 0.769089 | #!/usr/bin/env python
from ctypes import *
from proteus import MeshTools
from proteus import cmeshTools
from proteus.MeshAdaptPUMI import MeshAdaptPUMI
from nose.tools import eq_ as eq
from nose.tools import ok_ as ok
import os
def test_meshLoadPUMI(verbose=0):
"""Test to load serial PUMI model and mesh"""
testDir=os.path.dirname(os.path.abspath(__file__))
cubeMdl=testDir + '/cube.dmg'
cube670p1=testDir + '/cube.smb'
meshAdaptInstance = MeshAdaptPUMI.MeshAdaptPUMI()
meshAdaptInstance.loadModelAndMesh(cubeMdl, cube670p1)
mesh = MeshTools.TetrahedralMesh()
mesh.cmesh = cmeshTools.CMesh()
meshAdaptInstance.constructFromSerialPUMIMesh(mesh.cmesh)
cmeshTools.allocateGeometricInfo_tetrahedron(mesh.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(mesh.cmesh)
mesh.buildFromC(mesh.cmesh)
eq(mesh.nElements_global,670)
eq(mesh.nNodes_global,190)
eq(mesh.nEdges_global,977)
eq(mesh.nElementBoundaries_global,1458)
if __name__ == '__main__':
import nose
nose.main(defaultTest='test_meshLoad:test_meshLoadPUMI')
| 0 | 0 | 0 |
caddaf8c2f2f5897b36a354154f9f52968e3d5cf | 458 | py | Python | clubapp/forms.py | thomaskise/pythonclub | 40361a945b9b95b17c3a8e628f18de4d062f77d1 | [
"Apache-2.0"
] | 2 | 2019-04-04T18:34:11.000Z | 2019-04-09T04:58:10.000Z | clubapp/forms.py | thomaskise/pythonclub | 40361a945b9b95b17c3a8e628f18de4d062f77d1 | [
"Apache-2.0"
] | null | null | null | clubapp/forms.py | thomaskise/pythonclub | 40361a945b9b95b17c3a8e628f18de4d062f77d1 | [
"Apache-2.0"
] | null | null | null | from django import forms
from .models import Meeting, Resource | 28.625 | 56 | 0.68559 | from django import forms
from .models import Meeting, Resource
class MeetingForm(forms.ModelForm):
class Meta:
model=Meeting
fields='__all__'
#can list individual fields seperated by a comma
#also definition of form fields
class ResourceForm(forms.ModelForm):
class Meta:
model=Resource
fields='__all__'
#can list individual fields seperated by a comma
#also definition of form fields | 0 | 350 | 46 |
48781dd4e4933f0deb04ff6e449eade807322d1a | 4,589 | py | Python | demo/cubemap_video_max.py | RyosukeHori/mmsegmentation | 5024918af521dce29ce2ea257aa378e305cef67f | [
"Apache-2.0"
] | null | null | null | demo/cubemap_video_max.py | RyosukeHori/mmsegmentation | 5024918af521dce29ce2ea257aa378e305cef67f | [
"Apache-2.0"
] | null | null | null | demo/cubemap_video_max.py | RyosukeHori/mmsegmentation | 5024918af521dce29ce2ea257aa378e305cef67f | [
"Apache-2.0"
] | null | null | null | from argparse import ArgumentParser
import numpy as np
import py360convert
import os
import cv2
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot, ret_result
from mmseg.core.evaluation import get_palette
from PIL import Image
import mmcv
if __name__ == '__main__':
main()
| 37.614754 | 110 | 0.593158 | from argparse import ArgumentParser
import numpy as np
import py360convert
import os
import cv2
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot, ret_result
from mmseg.core.evaluation import get_palette
from PIL import Image
import mmcv
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='cityscapes',
help='Color palette used for segmentation map')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_segmentor(args.config, args.checkpoint, device=args.device)
res_width, res_height = 224, 224
for j in range(74, 9166):
# test a single image
img_path = './demo/1013_take_009/{:06d}.png'.format(j)
orig_img = np.array(Image.open(img_path))
if len(orig_img.shape) == 2:
orig_img = orig_img[..., None]
height, width, _ = orig_img.shape
# normal segmentation image as OR Image
im_gray = ret_result(model, img_path, inference_segmentor(model, img_path), get_palette(args.palette))
im_gray = im_gray.astype(np.uint8)
OR_im = np.zeros(im_gray.shape).astype(im_gray.dtype)
retval, im_th = cv2.threshold(im_gray, 127, 255, cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cv2.drawContours(OR_im, contours, max_index, 255, -1)
# scroll by 45 deg
for i in range(360 // 45):
# scroll image
im_scroll = np.roll(orig_img, width // 8 * i, axis=1)
tmp = Image.fromarray(im_scroll)
tmp.save("./demo/tmp/tmp0.png")
# e2c
img = py360convert.e2c(im_scroll, face_w=width // 4, mode='bilinear')
img = Image.fromarray(img)
im_path = os.getcwd() + "/demo/Image/tmp.png"
img.save(im_path)
img.save("./demo/tmp/tmp1.png")
# segmentation
result = inference_segmentor(model, im_path)
img = ret_result(model, args.img, result, get_palette(args.palette))
tmp = Image.fromarray(img)
tmp.save("./demo/tmp/tmp2.png")
if len(img.shape) == 2:
img = img[..., None]
# c2e
img_e = py360convert.c2e(img, h=height, w=width, mode='bilinear')
# scroll
img_e = np.roll(img_e, -width // 8 * i, axis=1).astype(np.uint8).squeeze(2)
tmp = Image.fromarray(img_e)
tmp.save("./demo/tmp/tmp3.png")
#cv2.imshow("img_e", img_e)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# extract contors
external = np.zeros(im_gray.shape).astype(im_gray.dtype)
retval, im_th = cv2.threshold(img_e, 127, 255, cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
continue
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cv2.drawContours(external, contours, max_index, 255, -1)
tmp = Image.fromarray(external)
tmp.save("./demo/tmp/tmp4.png")
# OR operation
OR_im = cv2.bitwise_or(OR_im, external)
#cv2.imshow("and", OR_im)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
external = OR_im
if (any(OR_im[:, 0]) or any(OR_im[:, -1])) and not (all(OR_im[0]) or all(OR_im[-1])):
external2 = external.copy()
areas[max_index] = 0
max_index = np.argmax(areas)
cv2.drawContours(external2, contours, max_index, 255, -1)
if any(external2[:, 0]) and any(external2[:, -1]):
external = external2
# save result
out = Image.fromarray(external)
#out.show()
out2 = out.resize((res_width, res_height))
out2.save('./demo/1013_take_009_cont/{:06d}.png'.format(j))
#img2.save('./demo/1_e2c_col.png'.format(i))
print('{:06d}.png saved'.format(j))
if __name__ == '__main__':
main()
| 4,257 | 0 | 23 |
8415119534766c8a202d2b0fad8956d28398d2e9 | 932 | py | Python | netests/converters/vrf/nxos/ssh.py | Netests/netests | 1a48bda461761c4ec854d6fa0c38629049009a4a | [
"MIT"
] | 14 | 2020-06-08T07:34:59.000Z | 2022-03-14T08:52:03.000Z | netests/converters/vrf/nxos/ssh.py | Netests/netests | 1a48bda461761c4ec854d6fa0c38629049009a4a | [
"MIT"
] | null | null | null | netests/converters/vrf/nxos/ssh.py | Netests/netests | 1a48bda461761c4ec854d6fa0c38629049009a4a | [
"MIT"
] | 3 | 2020-06-19T03:57:05.000Z | 2020-06-22T22:46:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from netests.constants import NOT_SET
from netests.protocols.vrf import VRF, ListVRF
| 25.888889 | 72 | 0.553648 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from netests.constants import NOT_SET
from netests.protocols.vrf import VRF, ListVRF
def _nxos_vrf_ssh_converter(
hostname: str(),
cmd_output,
options={}
) -> ListVRF:
if not isinstance(cmd_output, dict):
cmd_output = json.loads(cmd_output)
vrf_list = ListVRF(list())
for vrf in cmd_output.get('TABLE_vrf', NOT_SET).get('ROW_vrf'):
vrf_list.vrf_lst.append(
VRF(
vrf_name=vrf.get('vrf_name', NOT_SET),
vrf_id=vrf.get('vrf_id', NOT_SET),
vrf_type=NOT_SET,
l3_vni=NOT_SET,
rd=vrf.get('rd') if vrf.get('rd') != '0:0' else NOT_SET,
rt_imp=NOT_SET,
rt_exp=NOT_SET,
imp_targ=NOT_SET,
exp_targ=NOT_SET,
options=options
)
)
return vrf_list
| 763 | 0 | 23 |
b420e19501b17b9d1f75e7151a97ea68cce75225 | 10,477 | py | Python | pomdp_problems/load_unload/load_unload.py | justinchiu/pomdp-py | 27fd8cc3b215b428289d89ec9ed44d88910fc4ea | [
"MIT"
] | 87 | 2020-02-16T03:12:10.000Z | 2022-03-31T08:38:10.000Z | pomdp_problems/load_unload/load_unload.py | justinchiu/pomdp-py | 27fd8cc3b215b428289d89ec9ed44d88910fc4ea | [
"MIT"
] | 15 | 2020-08-01T00:25:33.000Z | 2022-02-19T22:37:11.000Z | pomdp_problems/load_unload/load_unload.py | justinchiu/pomdp-py | 27fd8cc3b215b428289d89ec9ed44d88910fc4ea | [
"MIT"
] | 26 | 2020-02-20T01:15:33.000Z | 2022-03-30T16:21:37.000Z | """The load unload problem. An agent is placed on a one dimensional grid world
and is tasked with loading itself up on the right side of the world and
unloading on the left. The agent can observe whether or not it is in the load or
unload block but can not tell its exact location of whether it is loaded or
unloaded. Therefore the agent must maintain belief about it's location and load
status.
States are defined by the location of the agent and whether or not it is loaded
Actions: "move-left", "move-right"
Rewards:
+100 for moving into the unload block while loaded
-1 otherwise
"""
import pomdp_py
import random
import numpy as np
import sys
import copy
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
EPSILON = 1e-3
LOAD_LOCATION = 10
# Observation model
class LUObservationModel(pomdp_py.ObservationModel):
"""This problem is small enough for the probabilities to be directly given
externally"""
def argmax(self, next_state, action, normalized=False, **kwargs):
"""Returns the most likely observation"""
return self.sample(next_state, action)
# Transition Model
class LUTransitionModel(pomdp_py.TransitionModel):
"""This problem is small enough for the probabilities to be directly given
externally"""
def argmax(self, state, action, normalized=False, **kwargs):
"""Returns the most likely next state"""
return self.sample(state, action)
# Reward Model
# Policy Model
class LUPolicyModel(pomdp_py.RandomRollout):
"""This is an extremely dumb policy model; To keep consistent
with the framework."""
def argmax(self, state, normalized=False, **kwargs):
"""Returns the most likely reward"""
raise NotImplementedError
if __name__ == '__main__':
main()
| 36.890845 | 89 | 0.626802 | """The load unload problem. An agent is placed on a one dimensional grid world
and is tasked with loading itself up on the right side of the world and
unloading on the left. The agent can observe whether or not it is in the load or
unload block but can not tell its exact location of whether it is loaded or
unloaded. Therefore the agent must maintain belief about it's location and load
status.
States are defined by the location of the agent and whether or not it is loaded
Actions: "move-left", "move-right"
Rewards:
+100 for moving into the unload block while loaded
-1 otherwise
"""
import pomdp_py
import random
import numpy as np
import sys
import copy
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
EPSILON = 1e-3
LOAD_LOCATION = 10
class LUState(pomdp_py.State):
def __init__(self, x, loaded):
if type(x) != int or x < 0:
raise ValueError("Invalid state: {}\n".format((x, loaded)) +
"x must be an integer > 0")
if type(loaded) != bool:
raise ValueError("Invalid state: {}\n".format((x, loaded)) +
"loaded must be a boolean")
if x == 0 and loaded == True:
raise ValueError("Agent can not be loaded in the 0th position")
if x == LOAD_LOCATION and loaded == False:
raise ValueError("Agent can not be unloaded in the last position")
self.x = x
self.loaded = loaded
def __hash__(self):
return hash((self.x, self.loaded))
def __eq__(self, other):
if isinstance(other, LUState):
return self.x == other.x and self.loaded == self.loaded
elif type(other) == tuple:
return self.x == other[0] and self.loaded == other[1]
def __str__(self):
return str((self.x, self.loaded))
def __repr__(self):
return "State({})".format(self)
class LUAction(pomdp_py.Action):
def __init__(self, name):
if name not in ["move-left", "move-right"]:
raise ValueError("Invalid action: %s" % name)
self.name = name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if isinstance(other, LUAction):
return self.name == other.name
elif type(other) == str:
return self.name == other
def __str__(self):
return self.name
def __repr__(self):
return "Action(%s)" % self.name
class LUObservation(pomdp_py.Observation):
def __init__(self, obs):
if obs not in ["load", "unload", "middle"]:
raise ValueError("Invalid observation: {}\n".format(name) +
"Observation must be an integer > 0")
self.name = obs
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if isinstance(other, LUObservation):
return self.name == other.name
elif type(other) == str:
return self.name == other
def __str__(self):
return str(self.name)
def __repr__(self):
return "Observation(%s)" % str(self.x)
# Observation model
class LUObservationModel(pomdp_py.ObservationModel):
"""This problem is small enough for the probabilities to be directly given
externally"""
def probability(self, observation, next_state, action, normalized=False, **kwargs):
if observation != self.sample(next_state, action):
# return EPSILON to avoid degradation of particles
return EPSILON
else:
return 1 - EPSILON
def sample(self, next_state, action, normalized=False, **kwargs):
if next_state.x == 0:
return LUObservation("unload")
elif next_state.x == LOAD_LOCATION:
return LUObservation("load")
else:
return LUObservation("middle")
def argmax(self, next_state, action, normalized=False, **kwargs):
"""Returns the most likely observation"""
return self.sample(next_state, action)
# Transition Model
class LUTransitionModel(pomdp_py.TransitionModel):
"""This problem is small enough for the probabilities to be directly given
externally"""
def probability(self, next_state, state, action, normalized=False, **kwargs):
if next_state != self.sample(state, action):
return EPSILON
else:
return 1 - EPSILON
def sample(self, state, action, normalized=False, **kwargs):
if ((state.x == LOAD_LOCATION and action == "move-right") or
(state.x == 0 and action == "move-left")):
# trying to make invalid move, stay in the same place
return state
if action == "move-right":
# make sure we're always loaded in the far right cell
if state.x == LOAD_LOCATION - 1:
return LUState(state.x + 1, True)
return LUState(state.x + 1, state.loaded)
if action == "move-left":
# make sure we're always unloaded in the first cell
if state.x == 1:
return LUState(state.x - 1, False)
return LUState(state.x - 1, state.loaded)
def argmax(self, state, action, normalized=False, **kwargs):
"""Returns the most likely next state"""
return self.sample(state, action)
# Reward Model
class LURewardModel(pomdp_py.RewardModel):
def probability(self, reward, state, action, next_state, normalized=False, **kwargs):
if reward == self.sample(state, action):
return 1.0
else:
return 0.0
def sample(self, state, action, next_state, normalized=False, **kwargs):
# if we are unloaded things, give reward 100, otherwise give -1
if action == "move-left" and state.loaded == True and state.x == 1:
return 100
else:
return -1
def argmax(self, state, action, next_state, normalized=False, **kwargs):
"""Returns the most likely reward"""
return self.sample(state, action)
# Policy Model
class LUPolicyModel(pomdp_py.RandomRollout):
"""This is an extremely dumb policy model; To keep consistent
with the framework."""
def __init__(self):
self._all_actions = {LUAction("move-right"), LUAction("move-left")}
def probability(self, action, state, normalized=False, **kwargs):
raise NotImplementedError # Never used
def sample(self, state, normalized=False, **kwargs):
return self.get_all_actions().random()
def argmax(self, state, normalized=False, **kwargs):
"""Returns the most likely reward"""
raise NotImplementedError
def get_all_actions(self, **kwargs):
return self._all_actions
class LoadUnloadProblem(pomdp_py.POMDP):
def __init__(self, init_state, init_belief):
"""init_belief is a Distribution."""
agent = pomdp_py.Agent(init_belief,
LUPolicyModel(),
LUTransitionModel(),
LUObservationModel(),
LURewardModel())
env = pomdp_py.Environment(init_state,
LUTransitionModel(),
LURewardModel())
super().__init__(agent, env, name="LoadUnloadProblem")
def generate_random_state():
# Flip a coin to determine if we are loaded
loaded = np.random.rand() > 0.5
location = np.random.randint(0, LOAD_LOCATION + 1)
if location == 0:
loaded = False
if location == LOAD_LOCATION:
loaded = True
return LUState(location, loaded)
def generate_init_belief(num_particles):
particles = []
for _ in range(num_particles):
particles.append(generate_random_state())
return pomdp_py.Particles(particles)
def test_planner(load_unload_problem, planner, nsteps=3, discount=0.95):
gamma = 1.0
total_reward = 0
total_discounted_reward = 0
fig = plt.figure()
plt.title("Load/Unload problem (Red = empty, Blue = full)")
plt.xlabel("Position")
ax = fig.add_subplot(111)
ax.set_xlim(-1, LOAD_LOCATION+1)
ax.set_ylim(0, 2)
x, y = [], []
scat, = ax.plot(x, y, marker="x", markersize=20, ls=" ", color="black")
def update(t):
nonlocal gamma, total_reward, total_discounted_reward
print("==== Step %d ====" % (t+1))
action = planner.plan(load_unload_problem.agent)
env_reward = load_unload_problem.env.state_transition(action, execute=True)
true_state = copy.deepcopy(load_unload_problem.env.state)
real_observation = load_unload_problem.env.provide_observation(
load_unload_problem.agent.observation_model, action)
load_unload_problem.agent.update_history(action, real_observation)
planner.update(load_unload_problem.agent, action, real_observation)
total_reward += env_reward
total_discounted_reward += env_reward * gamma
gamma *= discount
print("True state: %s" % true_state)
print("Action: %s" % str(action))
print("Observation: %s" % str(real_observation))
print("Reward: %s" % str(env_reward))
print("Reward (Cumulative): %s" % str(total_reward))
print("Reward (Cumulative Discounted): %s" % str(total_discounted_reward))
print("Belief: %s" % str(load_unload_problem.agent.sample_belief()))
if isinstance(planner, pomdp_py.POUCT):
print("__num_sims__: %d" % planner.last_num_sims)
print("__plan_time__: %.5f" % planner.last_planning_time)
if isinstance(planner, pomdp_py.PORollout):
print("__best_reward__: %d" % planner.last_best_reward)
new_x, new_y = [true_state.x], [1]
scat.set_data(new_x, new_y)
scat.set_color("b" if true_state.loaded else "r")
return scat,
ani = FuncAnimation(fig, update, frames=nsteps, interval=500)
plt.show()
def main():
init_state = generate_random_state()
init_belief = generate_init_belief(num_particles=100)
load_unload_problem = LoadUnloadProblem(init_state, init_belief)
print("** Testing POMCP **")
pomcp = pomdp_py.POMCP(max_depth=100, discount_factor=0.95,
num_sims=100, exploration_const=110,
rollout_policy=load_unload_problem.agent.policy_model)
test_planner(load_unload_problem, pomcp, nsteps=100)
if __name__ == '__main__':
main()
| 7,003 | 859 | 809 |
f3a1df15ec24260cf5c5ef37c81224449331dcaf | 1,333 | py | Python | oops_fhir/r4/code_system/code_search_support.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/code_system/code_search_support.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/code_system/code_search_support.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["CodeSearchSupport"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class CodeSearchSupport:
"""
CodeSearchSupport
The degree to which the server supports the code search parameter on
ValueSet, if it is supported.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/code-search-support
"""
explicit = CodeSystemConcept(
{
"code": "explicit",
"definition": "The search for code on ValueSet only includes codes explicitly detailed on includes or expansions.",
"display": "Explicit Codes",
}
)
"""
Explicit Codes
The search for code on ValueSet only includes codes explicitly detailed on includes or expansions.
"""
all_ = CodeSystemConcept(
{
"code": "all",
"definition": "The search for code on ValueSet only includes all codes based on the expansion of the value set.",
"display": "Implicit Codes",
}
)
"""
Implicit Codes
The search for code on ValueSet only includes all codes based on the expansion of the value set.
"""
| 24.236364 | 127 | 0.651163 | from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["CodeSearchSupport"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class CodeSearchSupport:
"""
CodeSearchSupport
The degree to which the server supports the code search parameter on
ValueSet, if it is supported.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/code-search-support
"""
explicit = CodeSystemConcept(
{
"code": "explicit",
"definition": "The search for code on ValueSet only includes codes explicitly detailed on includes or expansions.",
"display": "Explicit Codes",
}
)
"""
Explicit Codes
The search for code on ValueSet only includes codes explicitly detailed on includes or expansions.
"""
all_ = CodeSystemConcept(
{
"code": "all",
"definition": "The search for code on ValueSet only includes all codes based on the expansion of the value set.",
"display": "Implicit Codes",
}
)
"""
Implicit Codes
The search for code on ValueSet only includes all codes based on the expansion of the value set.
"""
class Meta:
resource = _resource
| 0 | 19 | 27 |
71cdd1f0685eebf6a131858e768d45b3c37617a0 | 1,428 | py | Python | tests/c_pointer_test.py | jacobwilliams/flist | 6255b824d81b1f97aa48aaabe912b9af313dce49 | [
"BSD-3-Clause"
] | 26 | 2015-11-14T18:41:51.000Z | 2021-11-23T14:55:41.000Z | tests/c_pointer_test.py | jacobwilliams/flist | 6255b824d81b1f97aa48aaabe912b9af313dce49 | [
"BSD-3-Clause"
] | 3 | 2016-10-04T14:42:56.000Z | 2020-02-20T21:48:04.000Z | tests/c_pointer_test.py | jacobwilliams/flist | 6255b824d81b1f97aa48aaabe912b9af313dce49 | [
"BSD-3-Clause"
] | 6 | 2015-11-15T15:34:10.000Z | 2018-11-07T13:16:26.000Z |
""" Some experiments with python ctypes """
# build the shared library using something like:
#
# ifort -shared -fPIC key_module.f90 linked_list_module.f90 tests/blah_module.f90 tests/c_pointer_test_module.f90 -o test.so
from ctypes import *
# load the shared library:
test = CDLL('test.so')
# define the procedure interfaces:
initialize_list = test.initialize_list
create_model = test.create_model
access_model = test.access_model
destroy_model = test.destroy_model
destroy_list = test.destroy_list
initialize_list.restype = None
access_model.restype = None
destroy_model.restype = None
destroy_list.restype = None
create_model.argtypes = [c_int]
create_model.restype = POINTER(c_int) # `c_long` also seems to work ?
# not really sure what is correct for this.
# Fortran returns a `type(c_ptr)`, maybe
# in this context it doesn't matter
print('')
print( 'calling initialize_list...')
initialize_list()
print( 'calling create_model...')
i = c_int(989) # some input to the model
cp = create_model(i)
#cp = byref(cp) # - don't need this if subroutine argument has `value` attribute
print( 'calling access_model...')
for j in range(10):
access_model(cp)
print( 'calling destroy_model...')
destroy_model(cp)
print( 'calling destroy_list...')
destroy_list()
| 26.943396 | 124 | 0.672969 |
""" Some experiments with python ctypes """
# build the shared library using something like:
#
# ifort -shared -fPIC key_module.f90 linked_list_module.f90 tests/blah_module.f90 tests/c_pointer_test_module.f90 -o test.so
from ctypes import *
# load the shared library:
test = CDLL('test.so')
# define the procedure interfaces:
initialize_list = test.initialize_list
create_model = test.create_model
access_model = test.access_model
destroy_model = test.destroy_model
destroy_list = test.destroy_list
initialize_list.restype = None
access_model.restype = None
destroy_model.restype = None
destroy_list.restype = None
create_model.argtypes = [c_int]
create_model.restype = POINTER(c_int) # `c_long` also seems to work ?
# not really sure what is correct for this.
# Fortran returns a `type(c_ptr)`, maybe
# in this context it doesn't matter
print('')
print( 'calling initialize_list...')
initialize_list()
print( 'calling create_model...')
i = c_int(989) # some input to the model
cp = create_model(i)
#cp = byref(cp) # - don't need this if subroutine argument has `value` attribute
print( 'calling access_model...')
for j in range(10):
access_model(cp)
print( 'calling destroy_model...')
destroy_model(cp)
print( 'calling destroy_list...')
destroy_list()
| 0 | 0 | 0 |
d99e2c9e3d3cd7002f6fd3ed81916d4e62da0af9 | 931 | py | Python | src/txdocumint/error.py | fusionapp/documint-client-python | 87818629e5c1641c103d4e8837095c622e5180ce | [
"MIT"
] | null | null | null | src/txdocumint/error.py | fusionapp/documint-client-python | 87818629e5c1641c103d4e8837095c622e5180ce | [
"MIT"
] | 13 | 2016-04-30T09:00:07.000Z | 2018-07-02T13:22:28.000Z | src/txdocumint/error.py | fusionapp/documint-client-python | 87818629e5c1641c103d4e8837095c622e5180ce | [
"MIT"
] | 1 | 2018-07-05T07:42:43.000Z | 2018-07-05T07:42:43.000Z | class DocumintErrorCause(object):
"""
Specific error cause.
"""
class DocumintError(RuntimeError):
"""
Structured Documint error.
"""
class MalformedDocumintError(RuntimeError):
"""
An error, indicated by status code, was malformed.
:ivar bytes data: Error response data.
"""
__all__ = ['DocumintError', 'MalformedDocumintError']
| 23.871795 | 62 | 0.615467 | class DocumintErrorCause(object):
"""
Specific error cause.
"""
def __init__(self, type, reason, description=None):
self.type = type
self.reason = reason
self.description = description
def __repr__(self):
return '<{} type={} reason={} description={}>'.format(
self.__class__.__name__,
self.type,
self.reason,
self.description)
class DocumintError(RuntimeError):
"""
Structured Documint error.
"""
def __init__(self, causes):
RuntimeError.__init__(self, causes)
self.causes = causes
class MalformedDocumintError(RuntimeError):
"""
An error, indicated by status code, was malformed.
:ivar bytes data: Error response data.
"""
def __init__(self, data):
RuntimeError.__init__(self, data)
self.data = data
__all__ = ['DocumintError', 'MalformedDocumintError']
| 449 | 0 | 105 |
35b7cff2cc3b238cd4bfb994a7196a1afa8029b3 | 1,305 | py | Python | geokey/users/tests/test_templatetags.py | universityofsussex/geokey | 25e161dbc81841c57c148053dbe99facc81e84b8 | [
"Apache-2.0"
] | null | null | null | geokey/users/tests/test_templatetags.py | universityofsussex/geokey | 25e161dbc81841c57c148053dbe99facc81e84b8 | [
"Apache-2.0"
] | null | null | null | geokey/users/tests/test_templatetags.py | universityofsussex/geokey | 25e161dbc81841c57c148053dbe99facc81e84b8 | [
"Apache-2.0"
] | null | null | null | """Tests for template tags of users."""
from django.test import TestCase
from geokey.categories.tests.model_factories import CategoryFactory
from ..templatetags import filter_tags
| 29 | 72 | 0.603831 | """Tests for template tags of users."""
from django.test import TestCase
from geokey.categories.tests.model_factories import CategoryFactory
from ..templatetags import filter_tags
class TemplateTagsTest(TestCase):
def test_show_restrict(self):
category = CategoryFactory.create()
self.assertEqual(
filter_tags.show_restrict({str(category.id): {}}, category),
'<a href="#" class="text-danger activate-detailed">'
'Restrict further</a>'
)
self.assertEqual(
filter_tags.show_restrict({'2': {}}, category),
''
)
def test_is_selected(self):
dict = ["1", "2", "3"]
self.assertEqual(filter_tags.is_selected(1, dict), 'selected')
self.assertEqual(filter_tags.is_selected(4, dict), '')
def test_is_in(self):
dict = {
'1': {},
'2': {}
}
self.assertTrue(filter_tags.is_in(dict, 1))
self.assertFalse(filter_tags.is_in(dict, 4))
def test_minval(self):
self.assertEqual(filter_tags.minval({'minval': 5}), 5)
self.assertEqual(filter_tags.minval({}), '')
def test_maxval(self):
self.assertEqual(filter_tags.maxval({'maxval': 5}), 5)
self.assertEqual(filter_tags.maxval({}), '')
| 952 | 12 | 157 |
13a4e7e7126b95c3f73e3e4068d9d93479ca736a | 1,120 | py | Python | regression.py | xiaoJacky/machineLearning | 518f2e7c13ebf2674e6e5dde95fd305cbd4b99eb | [
"Apache-2.0"
] | 1 | 2017-08-15T01:36:12.000Z | 2017-08-15T01:36:12.000Z | regression.py | xiaoJacky/machineLearning | 518f2e7c13ebf2674e6e5dde95fd305cbd4b99eb | [
"Apache-2.0"
] | null | null | null | regression.py | xiaoJacky/machineLearning | 518f2e7c13ebf2674e6e5dde95fd305cbd4b99eb | [
"Apache-2.0"
] | null | null | null | #-*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
'''
最小二乘法回归
参考博客:http://blog.csdn.net/wangyangzhizhou/article/details/60133958
line 代数推导
matrixLine 矩阵推导
'''
if __name__ == '__main__':
# line()
matrixLine()
| 20.363636 | 66 | 0.5625 | #-*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
'''
最小二乘法回归
参考博客:http://blog.csdn.net/wangyangzhizhou/article/details/60133958
line 代数推导
matrixLine 矩阵推导
'''
def calcAB(x,y):
n = len(x)
sumX,sumY,sumXY,sumXX =0,0,0,0
for i in range(0,n):
sumX += x[i]
sumY += y[i]
sumXX += x[i]*x[i]
sumXY += x[i]*y[i]
a = (n*sumXY -sumX*sumY)/(n*sumXX -sumX*sumX)
b = (sumXX*sumY - sumX*sumXY)/(n*sumXX-sumX*sumX)
return a,b,
def line():
xi = [1,2,3,4,5,6,7,8,9,10]
yi = [10,11.5,12,13,14.5,15.5,16.8,17.3,18,18.7]
a, b = calcAB(xi,yi)
print("y = %10.5fx + %10.5f" %(a,b))
x = np.linspace(0,10)
y = a * x + b
plt.plot(x,y)
plt.scatter(xi,yi)
plt.show()
def matrixLine():
x = [1,2,3,4,5,6,7,8,9,10]
y = [10,11.5,12,13,14.5,15.5,16.8,17.3,18,18.7]
A = np.vstack([x,np.ones(len(x))]).T
a, b = np.linalg.lstsq(A,y)[0]
print("y = %10.5fx + %10.5f" %(a,b))
x = np.array(x)
y = np.array(y)
plt.plot(x,y,'o',label='data',markersize=10)
plt.plot(x,a*x+b,'r',label='line')
plt.show()
if __name__ == '__main__':
# line()
matrixLine()
| 814 | 0 | 69 |
45ed68759dc66b9a7b5994d75aea2cdb1b6b467e | 9,405 | py | Python | pixiedust/utils/sampleData.py | jordangeorge/pixiedust | 00e49f4e8ba44cc248685146d3ad7e2d04ac6cd4 | [
"Apache-2.0"
] | null | null | null | pixiedust/utils/sampleData.py | jordangeorge/pixiedust | 00e49f4e8ba44cc248685146d3ad7e2d04ac6cd4 | [
"Apache-2.0"
] | null | null | null | pixiedust/utils/sampleData.py | jordangeorge/pixiedust | 00e49f4e8ba44cc248685146d3ad7e2d04ac6cd4 | [
"Apache-2.0"
] | null | null | null | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from six import iteritems
import pixiedust
from pixiedust.utils.shellAccess import ShellAccess
from pixiedust.utils.template import PixiedustTemplateEnvironment
from pixiedust.utils.environment import Environment,scalaGateway
import pandas as pd
import uuid
import tempfile
from collections import OrderedDict
from IPython.display import display, HTML, Javascript
try:
from urllib.request import Request, urlopen, URLError, HTTPError
except ImportError:
from urllib2 import Request, urlopen, URLError, HTTPError
dataDefs = OrderedDict([
("1", {
"displayName": "Car performance data",
"url": "https://github.com/ibm-watson-data-lab/open-data/raw/master/cars/cars.csv",
"topic": "transportation",
"publisher": "IBM",
"schema2": [('mpg','int'),('cylinders','int'),('engine','double'),('horsepower','int'),('weight','int'),
('acceleration','double'),('year','int'),('origin','string'),('name','string')]
}),
("2", {
"displayName": "Sample retail sales transactions, January 2009",
"url": "https://raw.githubusercontent.com/ibm-watson-data-lab/open-data/master/salesjan2009/salesjan2009.csv",
"topic": "Economy & Business",
"publisher": "IBM Cloud Data Services"
}),
("3", {
"displayName": "Total population by country",
"url": "https://apsportal.ibm.com/exchange-api/v1/entries/889ca053a19986a4445839358a91963e/data?accessKey=657b130d504ab539947e51b50f0e338e",
"topic": "Society",
"publisher": "IBM Cloud Data Services"
}),
("4", {
"displayName": "GoSales Transactions for Naive Bayes Model",
"url": "https://apsportal.ibm.com/exchange-api/v1/entries/8044492073eb964f46597b4be06ff5ea/data?accessKey=bec2ed69d9c84bed53826348cdc5690b",
"topic": "Leisure",
"publisher": "IBM"
}),
("5", {
"displayName": "Election results by County",
"url": "https://openobjectstore.mybluemix.net/Election/county_election_results.csv",
"topic": "Society",
"publisher": "IBM"
}),
("6", {
"displayName": "Million dollar home sales in NE Mass late 2016",
"url": "https://openobjectstore.mybluemix.net/misc/milliondollarhomes.csv",
"topic": "Economy & Business",
"publisher": "Redfin.com"
}),
("7", {
"displayName": "Boston Crime data, 2-week sample",
"url": "https://raw.githubusercontent.com/ibm-watson-data-lab/open-data/master/crime/boston_crime_sample.csv",
"topic": "Society",
"publisher": "City of Boston"
})
])
@scalaGateway
#Use of progress Monitor doesn't render correctly when previewed a saved notebook, turning it off until solution is found
useProgressMonitor = False | 44.785714 | 205 | 0.606273 | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from six import iteritems
import pixiedust
from pixiedust.utils.shellAccess import ShellAccess
from pixiedust.utils.template import PixiedustTemplateEnvironment
from pixiedust.utils.environment import Environment,scalaGateway
import pandas as pd
import uuid
import tempfile
from collections import OrderedDict
from IPython.display import display, HTML, Javascript
try:
from urllib.request import Request, urlopen, URLError, HTTPError
except ImportError:
from urllib2 import Request, urlopen, URLError, HTTPError
dataDefs = OrderedDict([
("1", {
"displayName": "Car performance data",
"url": "https://github.com/ibm-watson-data-lab/open-data/raw/master/cars/cars.csv",
"topic": "transportation",
"publisher": "IBM",
"schema2": [('mpg','int'),('cylinders','int'),('engine','double'),('horsepower','int'),('weight','int'),
('acceleration','double'),('year','int'),('origin','string'),('name','string')]
}),
("2", {
"displayName": "Sample retail sales transactions, January 2009",
"url": "https://raw.githubusercontent.com/ibm-watson-data-lab/open-data/master/salesjan2009/salesjan2009.csv",
"topic": "Economy & Business",
"publisher": "IBM Cloud Data Services"
}),
("3", {
"displayName": "Total population by country",
"url": "https://apsportal.ibm.com/exchange-api/v1/entries/889ca053a19986a4445839358a91963e/data?accessKey=657b130d504ab539947e51b50f0e338e",
"topic": "Society",
"publisher": "IBM Cloud Data Services"
}),
("4", {
"displayName": "GoSales Transactions for Naive Bayes Model",
"url": "https://apsportal.ibm.com/exchange-api/v1/entries/8044492073eb964f46597b4be06ff5ea/data?accessKey=bec2ed69d9c84bed53826348cdc5690b",
"topic": "Leisure",
"publisher": "IBM"
}),
("5", {
"displayName": "Election results by County",
"url": "https://openobjectstore.mybluemix.net/Election/county_election_results.csv",
"topic": "Society",
"publisher": "IBM"
}),
("6", {
"displayName": "Million dollar home sales in NE Mass late 2016",
"url": "https://openobjectstore.mybluemix.net/misc/milliondollarhomes.csv",
"topic": "Economy & Business",
"publisher": "Redfin.com"
}),
("7", {
"displayName": "Boston Crime data, 2-week sample",
"url": "https://raw.githubusercontent.com/ibm-watson-data-lab/open-data/master/crime/boston_crime_sample.csv",
"topic": "Society",
"publisher": "City of Boston"
})
])
@scalaGateway
def sampleData(dataId=None):
global dataDefs
return SampleData(dataDefs).sampleData(dataId)
class SampleData(object):
env = PixiedustTemplateEnvironment()
def __init__(self, dataDefs):
self.dataDefs = dataDefs
def sampleData(self, dataId = None):
if dataId is None:
self.printSampleDataList()
elif str(dataId) in dataDefs:
return self.loadSparkDataFrameFromSampleData(dataDefs[str(dataId)])
elif "https://" in str(dataId) or "http://" in str(dataId) or "file://" in str(dataId):
return self.loadSparkDataFrameFromUrl(str(dataId))
else:
print("Unknown sample data identifier. Please choose an id from the list below")
self.printSampleDataList()
def printSampleDataList(self):
display( HTML( self.env.getTemplate("sampleData.html").render( dataDefs = iteritems(self.dataDefs) ) ))
#for key, val in iteritems(self.dataDefs):
# print("{0}: {1}".format(key, val["displayName"]))
def dataLoader(self, path, schema=None):
if schema is not None and Environment.hasSpark:
from pyspark.sql.types import StructType,StructField,IntegerType,DoubleType,StringType
def getType(t):
if t == 'int':
return IntegerType()
elif t == 'double':
return DoubleType()
else:
return StringType()
if Environment.sparkVersion == 1:
print("Loading file using 'com.databricks.spark.csv'")
load = ShellAccess.sqlContext.read.format('com.databricks.spark.csv')
if schema is not None:
return load.options(header='true', mode="DROPMALFORMED").load(path, schema=StructType([StructField(item[0], getType(item[1]), True) for item in schema]))
else:
return load.options(header='true', mode="DROPMALFORMED", inferschema='true').load(path)
elif Environment.sparkVersion == 2:
print("Loading file using 'SparkSession'")
if schema is not None:
return ShellAccess.SparkSession.builder.getOrCreate().read.csv(path, header=True, mode="DROPMALFORMED", schema=StructType([StructField(item[0], getType(item[1]), True) for item in schema]))
else:
return ShellAccess.SparkSession.builder.getOrCreate().read.csv(path, header=True, mode="DROPMALFORMED", inferSchema='true')
else:
print("Loading file using 'pandas'")
return pd.read_csv(path)
def loadSparkDataFrameFromSampleData(self, dataDef):
return Downloader(dataDef).download(self.dataLoader)
def loadSparkDataFrameFromUrl(self, dataUrl):
i = dataUrl.rfind('/')
dataName = dataUrl[(i+1):]
dataDef = {
"displayName": dataUrl,
"url": dataUrl
}
return Downloader(dataDef).download(self.dataLoader)
#Use of progress Monitor doesn't render correctly when previewed a saved notebook, turning it off until solution is found
useProgressMonitor = False
class Downloader(object):
def __init__(self, dataDef):
self.dataDef = dataDef
self.headers = {"User-Agent": "PixieDust Sample Data Downloader/1.0"}
self.prefix = str(uuid.uuid4())[:8]
def download(self, dataLoader):
displayName = self.dataDef["displayName"]
bytesDownloaded = 0
if "path" in self.dataDef:
path = self.dataDef["path"]
else:
url = self.dataDef["url"]
req = Request(url, None, self.headers)
print("Downloading '{0}' from {1}".format(displayName, url))
with tempfile.NamedTemporaryFile(delete=False) as f:
bytesDownloaded = self.write(urlopen(req), f)
path = f.name
self.dataDef["path"] = path = f.name
if path:
try:
if bytesDownloaded > 0:
print("Downloaded {} bytes".format(bytesDownloaded))
print("Creating {1} DataFrame for '{0}'. Please wait...".format(displayName, 'pySpark' if Environment.hasSpark else 'pandas'))
return dataLoader(path, self.dataDef.get("schema", None))
finally:
print("Successfully created {1} DataFrame for '{0}'".format(displayName, 'pySpark' if Environment.hasSpark else 'pandas'))
def report(self, bytes_so_far, chunk_size, total_size):
if useProgressMonitor:
if bytes_so_far == 0:
display( HTML( """
<div>
<span id="pm_label{0}">Starting download...</span>
<progress id="pm_progress{0}" max="100" value="0" style="width:200px"></progress>
</div>""".format(self.prefix)
)
)
else:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
display(
Javascript("""
$("#pm_label{prefix}").text("{label}");
$("#pm_progress{prefix}").attr("value", {percent});
""".format(prefix=self.prefix, label="Downloaded {0} of {1} bytes".format(bytes_so_far, total_size), percent=percent))
)
def write(self, response, file, chunk_size=8192):
total_size = response.headers['Content-Length'].strip() if 'Content-Length' in response.headers else 100
total_size = int(total_size)
bytes_so_far = 0
self.report(bytes_so_far, chunk_size, total_size)
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
file.write(chunk)
total_size = bytes_so_far if bytes_so_far > total_size else total_size
self.report(bytes_so_far, chunk_size, total_size)
return bytes_so_far | 5,499 | 210 | 190 |
74c68dfa0c1af26f9531ec184b91550bd2fa0f44 | 568 | py | Python | plotting.py | Sumanshekhar17/3rd-year-project | b991311fe09fc7b181193a8d74d7f0ae98090331 | [
"MIT"
] | 2 | 2021-09-01T03:50:03.000Z | 2021-09-01T03:50:04.000Z | plotting.py | Sumanshekhar17/3rd-year-project | b991311fe09fc7b181193a8d74d7f0ae98090331 | [
"MIT"
] | null | null | null | plotting.py | Sumanshekhar17/3rd-year-project | b991311fe09fc7b181193a8d74d7f0ae98090331 | [
"MIT"
] | null | null | null | reset
set terminal postscript eps enhanced "Helvetica" 20 color
set output "velocity_field_cpu.eps"
unset logscale
set xrange [0 : 1]
set yrange [0 : 1]
set size square
set nokey
factor = 0.1
plot "velocity_cpu_128.dat" using 1:2:($3*factor):($4*factor) every 2 with vec lc 3
set output "velocity_field_gpu.eps"
unset logscale
set xrange [0 : 1]
set yrange [0 : 1]
set nokey
factor = 0.1
#plot "velocity_gpu.dat" using 1:2:($3*factor):($4*factor) every 2 with vec lc 3
plot "velocity_gpu_128_share.dat" using 1:2:($3*factor):($4*factor) every 2 with vec lc 3
| 20.285714 | 89 | 0.721831 | reset
set terminal postscript eps enhanced "Helvetica" 20 color
set output "velocity_field_cpu.eps"
unset logscale
set xrange [0 : 1]
set yrange [0 : 1]
set size square
set nokey
factor = 0.1
plot "velocity_cpu_128.dat" using 1:2:($3*factor):($4*factor) every 2 with vec lc 3
set output "velocity_field_gpu.eps"
unset logscale
set xrange [0 : 1]
set yrange [0 : 1]
set nokey
factor = 0.1
#plot "velocity_gpu.dat" using 1:2:($3*factor):($4*factor) every 2 with vec lc 3
plot "velocity_gpu_128_share.dat" using 1:2:($3*factor):($4*factor) every 2 with vec lc 3
| 0 | 0 | 0 |
55d07cc127592663522871df6aa0279d8f606d72 | 7,940 | py | Python | python/GtBurst/dataCollector.py | fermi-lat/pyBurstAnalysisGUI | add53fe77ef71cb64a27751f024fb914f7cc0863 | [
"BSD-3-Clause"
] | 2 | 2019-03-06T15:48:20.000Z | 2020-05-02T15:02:57.000Z | python/GtBurst/dataCollector.py | fermi-lat/pyBurstAnalysisGUI | add53fe77ef71cb64a27751f024fb914f7cc0863 | [
"BSD-3-Clause"
] | 5 | 2019-01-23T11:35:41.000Z | 2019-03-29T17:36:19.000Z | python/GtBurst/dataCollector.py | fermi-lat/pyBurstAnalysisGUI | add53fe77ef71cb64a27751f024fb914f7cc0863 | [
"BSD-3-Clause"
] | null | null | null | #This is a mother class to get GBM and LLE data files (lle, pha and rsp)
#Author: giacomov@slac.stanford.edu
import os,sys,glob,string,errno,shutil
from GtBurst.my_fits_io import pyfits
from GtBurst.GtBurstException import GtBurstException
import ftplib, socket
import time
try:
from tkinter import *
except:
#Silently accept when tkinter import fail (no X server?)
pass
from GtBurst import downloadCallback
try:
from GtBurst.lleProgressBar import Meter
except:
#Silently accept when tkinter import fail (no X server?)
pass
pass
| 33.221757 | 210 | 0.519144 | #This is a mother class to get GBM and LLE data files (lle, pha and rsp)
#Author: giacomov@slac.stanford.edu
import os,sys,glob,string,errno,shutil
from GtBurst.my_fits_io import pyfits
from GtBurst.GtBurstException import GtBurstException
import ftplib, socket
import time
try:
from tkinter import *
except:
#Silently accept when tkinter import fail (no X server?)
pass
from GtBurst import downloadCallback
try:
from GtBurst.lleProgressBar import Meter
except:
#Silently accept when tkinter import fail (no X server?)
pass
class dataCollector(object):
def __init__(self,instrument,grbName,dataRepository=None,localRepository=None,
getTTE=True,getCSPEC=True,getRSP=True,getCTIME=True,**kwargs):
self.parent = None
for key in list(kwargs.keys()):
if key.lower()=='parent' : self.parent = kwargs['parent']
self.instrument = instrument
if(grbName.find("bn")==0):
self.grbName = grbName[2:]
else:
self.grbName = grbName
pass
self.trigName = "bn%s" %(self.grbName)
self.dataRepository = dataRepository
self.localRepository = os.path.join(localRepository,self.trigName)
self.getCTIME = getCTIME
self.getCSPEC = getCSPEC
self.getTTE = getTTE
self.getRSP = getRSP
pass
def makeLocalDir(self):
try:
os.makedirs(self.localRepository)
message = "just created"
except OSError as e:
if e.errno != errno.EEXIST:
#Couldn't create the directory
raise
else:
#Directory already existed
message = "already existent"
pass
print(("Local data repository (destination): %s (%s)" %(self.localRepository,message)))
pass
def downloadDirectoryWithFTP(self,address,filenames=None,namefilter=None):
#Connect to the server
if(address.find("ftps://")==0):
serverAddress = address.split("/")[2]
directory = "/"+"/".join(address.split("/")[3:])
else:
serverAddress = address.split("/")[0]
directory = "/"+"/".join(address.split("/")[1:])
pass
#print('**************> downloadDirectoryWithFTP (serverAddress):',serverAddress)
#print('**************> downloadDirectoryWithFTP (address):',address)
#Open FTP session
try:
ftp = ftplib.FTP_TLS(serverAddress,"anonymous",'','',timeout=60)
except socket.error as socketerror:
raise GtBurstException(11,"Error when connecting: %s" % os.strerror(socketerror.errno))
#print("Loggin in to %s..." % serverAddress),
try:
ftp.login()
ftp.prot_p()
except:
#Maybe we are already logged in
try:
ftp.cwd('/')
except:
#nope! don't know what is happening
raise
pass
pass
print("done")
self.makeLocalDir()
try:
ftp.cwd(directory)
except:
#Remove the empty directory just created
try:
os.rmdir(self.localRepository)
except:
pass
raise GtBurstException(5,"The remote directory %s is not accessible. This kind of data is probably not available for trigger %s, or the server is offline." %(serverAddress+directory,self.trigName))
pass
if(filenames is None):
filenames = []
ftp.retrlines('NLST', filenames.append)
pass
maxTrials = 10
#Build the window for the progress
if(self.parent is None):
#Do not use any graphical output
root = None
m1 = None
else:
#make a transient window
root = Toplevel()
root.transient(self.parent)
root.grab_set()
l = Label(root,text='Downloading...')
l.grid(row=0,column=0)
m1 = Meter(root, 500,20,'grey','blue',0,None,None,'white',relief='ridge', bd=3)
m1.grid(row=1,column=0)
m1.set(0.0,'Download started...')
l2 = Label(root,text='Total progress:')
l2.grid(row=2,column=0)
m2 = Meter(root, 500,20,'grey','blue',0,None,None,'white',relief='ridge', bd=3)
m2.grid(row=3,column=0)
m2.set(0.0,'Download started...')
pass
for i,filename in enumerate(filenames):
if(namefilter is not None and filename.find(namefilter)<0):
#Filename does not match, do not download it
continue
if(root is not None):
m2.set((float(i))/len(filenames))
skip = False
if(not self.getCSPEC):
if(filename.find("cspec")>=0):
skip = True
if(not self.getTTE):
if(filename.find("tte")>=0):
skip = True
if(not self.getRSP):
if(filename.find("rsp")>=0):
skip = True
if(not self.getCTIME):
if(filename.find("ctime")>=0):
skip = True
pass
#if(filename.find(".pdf")>0 or filename.find("gif") >0 or filename.find(".png")>0):
# skip = (not self.minimal)
if(skip):
print(("Skipping %s ..." %(filename)))
continue
else:
print(("Retrieving %s ..." %(filename)), end=' ')
if(root is not None):
l['text'] = "Downloading %s..." % filename
done = False
local_filename = os.path.join(self.localRepository,filename)
while(done==False):
try:
f = open(local_filename, 'wb')
except:
raise IOError("Could not open file %s for writing. Do you have write permission on %s?" %(local_filename,self.localRepository))
g = downloadCallback.get_size()
try:
ftp.dir(filename,g)
totalsize = g.size#*1024
printer = downloadCallback.Callback(totalsize, f,m1)
ftp.retrbinary('RETR '+ filename, printer)
localSize = f.tell()
f.close()
if(localSize!=totalsize):
#This will be catched by the next "except", which will retry
raise
done = True
except:
print("\nConnection lost! Trying to reconnect...")
#Reconnect
f.close()
try:
ftp.close()
except:
pass
ftp = ftplib.FTP_TLS(serverAddress,"anonymous",'','',timeout=60)
try:
ftp.login()
ftp.prot_p()
except:
pass
ftp.cwd(directory)
done = False
continue
pass
print(" done")
pass
ftp.close()
print("\nDownload files done!")
if(root is not None):
m2.set(1.0)
root.destroy()
pass
pass
def getFTP(self,errorCode=None,namefilter=None):
#Path in the repository is [year]/bn[grbname]/current
#Get the year
year = "20%s" %(self.grbName[0:2])
#trigger number
triggerNumber = "bn%s" %(self.grbName)
remotePath = "%s/%s/triggers/%s/%s/current" %(self.dataRepository,self.instrument,year,triggerNumber)
#print("getFTP.remotePath=",remotePath)
self.downloadDirectoryWithFTP(remotePath,None,namefilter)
pass
pass
| 7,180 | 161 | 25 |
dcd3f1ee0dfd454993014e91127c0cc526852235 | 1,692 | py | Python | dwitter/feed/urls.py | PythEch/dwitter | 5b48ce2f6ebc6267dc942043efcbc1010079f6d4 | [
"Apache-2.0"
] | null | null | null | dwitter/feed/urls.py | PythEch/dwitter | 5b48ce2f6ebc6267dc942043efcbc1010079f6d4 | [
"Apache-2.0"
] | null | null | null | dwitter/feed/urls.py | PythEch/dwitter | 5b48ce2f6ebc6267dc942043efcbc1010079f6d4 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',
views.feed, {'page_nr': 1, 'sort': 'hot'}, name='root'),
url(r'^page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'new'}, name='feed_page'),
url(r'^hot$',
views.feed, {'page_nr': 1, 'sort': 'hot'}, name='hot_feed'),
url(r'^hot/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'hot'}, name='hot_feed_page'),
url(r'^top$',
views.feed, {'page_nr': 1, 'sort': 'top'}, name='top_feed'),
url(r'^top/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'top'}, name='top_feed_page'),
url(r'^new$',
views.feed, {'page_nr': 1, 'sort': 'new'}, name='new_feed'),
url(r'^new/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'new'}, name='new_feed_page'),
url(r'^random$',
views.feed, {'page_nr': 1, 'sort': 'random'}, name='random_feed'),
url(r'^random/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'random'}, name='random_feed_page'),
url(r'^d/(?P<dweet_id>\d+)$',
views.dweet_show, name='dweet_show'),
url(r'^d/(?P<dweet_id>\d+)/reply$',
views.dweet_reply, name='dweet_reply'),
url(r'^d/(?P<dweet_id>\d+)/delete$',
views.dweet_delete, name='dweet_delete'),
url(r'^d/(?P<dweet_id>\d+)/like$', views.like, name='like'),
url(r'^e/(?P<dweet_id>\d+)$',
views.dweet_embed, name='dweet_embed'),
url(r'^h/(?P<hashtag_name>[\w._]+)$', views.view_hashtag, {'page_nr': 1}, name='view_hashtag'),
url(r'^h/(?P<hashtag_name>[\w._]+)/page/(?P<page_nr>\d+)$',
views.view_hashtag, name='view_hashtag_page'),
url(r'^dweet$', views.dweet, name='dweet'),
]
| 35.25 | 99 | 0.548463 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',
views.feed, {'page_nr': 1, 'sort': 'hot'}, name='root'),
url(r'^page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'new'}, name='feed_page'),
url(r'^hot$',
views.feed, {'page_nr': 1, 'sort': 'hot'}, name='hot_feed'),
url(r'^hot/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'hot'}, name='hot_feed_page'),
url(r'^top$',
views.feed, {'page_nr': 1, 'sort': 'top'}, name='top_feed'),
url(r'^top/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'top'}, name='top_feed_page'),
url(r'^new$',
views.feed, {'page_nr': 1, 'sort': 'new'}, name='new_feed'),
url(r'^new/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'new'}, name='new_feed_page'),
url(r'^random$',
views.feed, {'page_nr': 1, 'sort': 'random'}, name='random_feed'),
url(r'^random/page/(?P<page_nr>\d+)$',
views.feed, {'sort': 'random'}, name='random_feed_page'),
url(r'^d/(?P<dweet_id>\d+)$',
views.dweet_show, name='dweet_show'),
url(r'^d/(?P<dweet_id>\d+)/reply$',
views.dweet_reply, name='dweet_reply'),
url(r'^d/(?P<dweet_id>\d+)/delete$',
views.dweet_delete, name='dweet_delete'),
url(r'^d/(?P<dweet_id>\d+)/like$', views.like, name='like'),
url(r'^e/(?P<dweet_id>\d+)$',
views.dweet_embed, name='dweet_embed'),
url(r'^h/(?P<hashtag_name>[\w._]+)$', views.view_hashtag, {'page_nr': 1}, name='view_hashtag'),
url(r'^h/(?P<hashtag_name>[\w._]+)/page/(?P<page_nr>\d+)$',
views.view_hashtag, name='view_hashtag_page'),
url(r'^dweet$', views.dweet, name='dweet'),
]
| 0 | 0 | 0 |
7c87b5aa45a8066517718e04ad9e06d160f6d5f0 | 36 | py | Python | wsgi.py | svetlyak40wt/cony | bf8d52ab92782944d9ff40799b52298fd6f9bc8f | [
"BSD-3-Clause"
] | 5 | 2015-01-12T12:21:11.000Z | 2021-12-09T14:28:41.000Z | wsgi.py | svetlyak40wt/cony | bf8d52ab92782944d9ff40799b52298fd6f9bc8f | [
"BSD-3-Clause"
] | null | null | null | wsgi.py | svetlyak40wt/cony | bf8d52ab92782944d9ff40799b52298fd6f9bc8f | [
"BSD-3-Clause"
] | 1 | 2019-06-10T16:30:52.000Z | 2019-06-10T16:30:52.000Z | from cony import wsgi
app = wsgi()
| 9 | 21 | 0.694444 | from cony import wsgi
app = wsgi()
| 0 | 0 | 0 |
71af2d320f928f46da13c0c5cc7ef247457cafcf | 1,202 | py | Python | devon/makers/configuremake.py | joehewitt/devon | 5b11265e5eae3db7bfaeb49543a2a6293bd15557 | [
"BSD-3-Clause"
] | 3 | 2015-12-25T16:26:02.000Z | 2016-05-08T18:19:25.000Z | devon/makers/configuremake.py | joehewitt/devon | 5b11265e5eae3db7bfaeb49543a2a6293bd15557 | [
"BSD-3-Clause"
] | null | null | null | devon/makers/configuremake.py | joehewitt/devon | 5b11265e5eae3db7bfaeb49543a2a6293bd15557 | [
"BSD-3-Clause"
] | 1 | 2021-07-13T07:17:01.000Z | 2021-07-13T07:17:01.000Z |
import devon.maker, devon.make
from devon.tags import *
import re, os.path
# **************************************************************************************************
| 34.342857 | 100 | 0.543261 |
import devon.maker, devon.make
from devon.tags import *
import re, os.path
# **************************************************************************************************
class ConfigureMake(devon.maker.MakerManyToOne):
def getTarget(self, project):
return project.config.output
def build(self, project, out, source, target):
line = "make clean"
result = devon.make.executeCommand(project, self, line, out)
if project.config.configure:
line = project.config.configure
else:
line = "./configure"
result = devon.make.executeCommand(project, self, line, out)
line = "make"
result = devon.make.executeCommand(project, self, line, out)
line = "cp %s %s" % (self.getTarget(project), target)
result = devon.make.executeCommand(project, self, line, out)
return result
def printAction(self, project, out, target):
out << Block << "Configuring and making " << FileLink(path=target) \
<< target << Close << "..." << Close
def printResult(self, project, out, text):
out << CodeBlock << text << Close
| 839 | 27 | 158 |
3e65c79541e7c85c3c24698560ce761bee060730 | 956 | py | Python | Chapter 1/direct_surface.py | indrag49/Computational-Stat-Mech | 0877f54a0245fce815f03478f4fb219fd6314951 | [
"MIT"
] | 19 | 2018-06-29T12:22:47.000Z | 2022-03-10T03:18:18.000Z | Chapter 1/direct_surface.py | indrag49/Computational-Stat-Mech | 0877f54a0245fce815f03478f4fb219fd6314951 | [
"MIT"
] | null | null | null | Chapter 1/direct_surface.py | indrag49/Computational-Stat-Mech | 0877f54a0245fce815f03478f4fb219fd6314951 | [
"MIT"
] | 7 | 2018-11-30T01:56:36.000Z | 2021-12-23T15:29:56.000Z | import random
import math as m
def direct_surface(d):
""" d is the dimension of the sphere, sigma is the standard deviation of the gaussian"""
sigma=1./m.sqrt(d)
S=0
x=[0]*d
for k in range(d):
x[k]=gauss(sigma)[1]
S+=x[k]**2
for k in range(d):x[k]/=m.sqrt(S)
return x
X=[]
Y=[]
Z=[]
for i in range(5000):
l=direct_surface(3)
X+=[l[0], ]
Y+=[l[1], ]
Z+=[l[2], ]
fig=plt.figure()
ax=plt.axes(projection='3d')
ax.scatter3D(X, Y, Z, c=X, cmap="prism_r", s=10)
ax.set_title("direct_surface.png (d=3)")
ax.set_xlabel("X")
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
| 22.761905 | 96 | 0.528243 | import random
import math as m
def gauss(sigma):
phi=random.uniform(0, 2*m.pi)
Upsilon=-m.log(random.uniform(0, 1))
r=sigma*m.sqrt(2*Upsilon)
x=r*m.cos(phi)
y=r*m.sin(phi)
return [x, y]
def direct_surface(d):
""" d is the dimension of the sphere, sigma is the standard deviation of the gaussian"""
sigma=1./m.sqrt(d)
S=0
x=[0]*d
for k in range(d):
x[k]=gauss(sigma)[1]
S+=x[k]**2
for k in range(d):x[k]/=m.sqrt(S)
return x
X=[]
Y=[]
Z=[]
for i in range(5000):
l=direct_surface(3)
X+=[l[0], ]
Y+=[l[1], ]
Z+=[l[2], ]
fig=plt.figure()
ax=plt.axes(projection='3d')
ax.scatter3D(X, Y, Z, c=X, cmap="prism_r", s=10)
ax.set_title("direct_surface.png (d=3)")
ax.set_xlabel("X")
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
| 181 | 0 | 23 |
8e18cce450eda2b2ae936f7b5a3c009fa87b2aae | 1,116 | py | Python | neon/ipc/rpc_client.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | 1 | 2018-07-17T16:54:58.000Z | 2018-07-17T16:54:58.000Z | neon/ipc/rpc_client.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | null | null | null | neon/ipc/rpc_client.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | 2 | 2016-06-09T13:05:00.000Z | 2021-02-18T14:18:15.000Z | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from rpc import RpcClient
import sys
if len(sys.argv) != 2:
print "Usage: python rpc_client <rpc_queue_name>"
sys.exit(1)
# declare an rpc client listening on queue specified by first arg
neon_rpc = RpcClient(sys.argv[1])
arg = int(raw_input("Give an integer to pow: "))
print " [x] Making request"
response = neon_rpc.call(arg)
print " [.] Got %r" % (response,)
| 37.2 | 78 | 0.634409 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from rpc import RpcClient
import sys
if len(sys.argv) != 2:
print "Usage: python rpc_client <rpc_queue_name>"
sys.exit(1)
# declare an rpc client listening on queue specified by first arg
neon_rpc = RpcClient(sys.argv[1])
arg = int(raw_input("Give an integer to pow: "))
print " [x] Making request"
response = neon_rpc.call(arg)
print " [.] Got %r" % (response,)
| 0 | 0 | 0 |
7593f06cc9b1c45fe4e4bc186067be86fcc1e347 | 963 | py | Python | tests/file_io_standard_gzip_test.py | RJMW/pymzML | fe45a41e7bd599929e7626a32d7ec26178fc475d | [
"MIT"
] | 117 | 2015-01-23T22:34:32.000Z | 2022-03-31T22:09:06.000Z | tests/file_io_standard_gzip_test.py | MKoesters/pymzML | ac9c73a24fd08b3e3f64aef7ba113e0f0adfc39a | [
"MIT"
] | 217 | 2015-02-12T05:33:24.000Z | 2022-03-30T20:34:59.000Z | tests/file_io_standard_gzip_test.py | MKoesters/pymzML | ac9c73a24fd08b3e3f64aef7ba113e0f0adfc39a | [
"MIT"
] | 73 | 2015-04-09T16:20:24.000Z | 2022-02-22T03:05:49.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Part of pymzml test cases
"""
import os
from pymzml.file_classes.standardGzip import StandardGzip
import unittest
import random
from pymzml.spec import Spectrum, Chromatogram
import re
import struct
import test_file_paths
class StandardGzipTest(unittest.TestCase):
""""
"""
def setUp(self):
"""
"""
paths = test_file_paths.paths
self.File = StandardGzip(paths[1], "latin-1")
def tearDown(self):
"""
"""
self.File.close()
def test_getitem_5(self):
"""
"""
ID = 5
spec = self.File[ID]
self.assertIsInstance(spec, Spectrum)
self.assertEqual(spec.ID, ID)
if __name__ == "__main__":
unittest.main(verbosity=3)
| 20.0625 | 57 | 0.607477 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Part of pymzml test cases
"""
import os
from pymzml.file_classes.standardGzip import StandardGzip
import unittest
import random
from pymzml.spec import Spectrum, Chromatogram
import re
import struct
import test_file_paths
class StandardGzipTest(unittest.TestCase):
""""
"""
def setUp(self):
"""
"""
paths = test_file_paths.paths
self.File = StandardGzip(paths[1], "latin-1")
def tearDown(self):
"""
"""
self.File.close()
def test_getitem_5(self):
"""
"""
ID = 5
spec = self.File[ID]
self.assertIsInstance(spec, Spectrum)
self.assertEqual(spec.ID, ID)
def test_getitem_tic(self):
ID = "TIC"
chrom = self.File[ID]
self.assertIsInstance(chrom, Chromatogram)
self.assertEqual(chrom.ID, ID)
if __name__ == "__main__":
unittest.main(verbosity=3)
| 145 | 0 | 27 |
6b6f11960c3660e6819d245730828e01a7c206c6 | 2,542 | py | Python | src/source/sampler.py | thunlp/CrossET | d7a06d0325789ac4482c109fff0f0de9bd77c015 | [
"MIT"
] | null | null | null | src/source/sampler.py | thunlp/CrossET | d7a06d0325789ac4482c109fff0f0de9bd77c015 | [
"MIT"
] | null | null | null | src/source/sampler.py | thunlp/CrossET | d7a06d0325789ac4482c109fff0f0de9bd77c015 | [
"MIT"
] | null | null | null | import json
import time
from random import shuffle, randint
from tqdm import tqdm
| 28.244444 | 81 | 0.445712 | import json
import time
from random import shuffle, randint
from tqdm import tqdm
class sampler:
def __init__(self, data, types, batch_size):
self.batch_size = batch_size
self.data = data
self.types = types
self.n = len(data)
self.ind = {}
self.keys = []
shuffle(self.data)
for i in range(self.n):
for ty in json.loads(self.data[i])['y_str']:
self.ind.setdefault(ty, []).append(i)
for k in self.ind:
self.keys.append(k)
self.ind[k] = list(set(self.ind[k])) #去重
def get_batches(self):
j = 0
lst_time = 0
tmp = 1
while j < self.n/tmp :
samp = []
while len(samp) < self.batch_size and j < self.n/tmp:
i = j
tyid = json.loads(self.data[i])['y_str'][0]
if(len(self.ind[tyid]) > 1):
i_sim = self.ind[tyid][randint(0, len(self.ind[tyid])-1)]
while i_sim == i:
i_sim = self.ind[tyid][randint(0, len(self.ind[tyid])-1)]
if(self.data[i] == self.data[i_sim]):
pass
else:
samp.append(self.data[i])
samp.append(self.data[i_sim])
j += 1
if len(samp) < self.batch_size:
break
yield samp
if time.time() - lst_time > 5 :
print(str(i) + '/' + str(self.n/tmp))
lst_time = time.time()
def get_random_numbers(self,k,m):
tmp = []
for i in range(m):
tmp.append(i)
shuffle(tmp)
for i in range(k):
yield tmp[i]
def n_way_k_shot(self):
''' fake
'''
lst_time = 0
m = int(self.n / self.batch_size)
p = [0]*len(self.ind)
for i in range(m):
# samp = []
# for jj in self.get_random_numbers(self.batch_size>>1,len(self.ind)):
# j = self.keys[jj]
# if len(self.ind[j]) == 0:
# continue
# for k in range(2):
# samp.append(self.data[self.ind[j][p[jj]%len(self.ind[j])]])
# p[jj] += 1
samp=self.data[i*self.batch_size:(i+1)*self.batch_size]
yield samp
if time.time() - lst_time > 3 :
print(str(i) + '/' + str(m))
lst_time = time.time()
| 1,602 | 838 | 23 |
c126561a59466caffcb2634c1a769879b9fe1447 | 1,347 | py | Python | examples/pca.py | SatyadevNtv/mctorch | ebc16e4437ad62858d95a8559262ee30191d36ff | [
"MIT"
] | 188 | 2018-09-22T14:18:17.000Z | 2022-03-10T01:20:25.000Z | examples/pca.py | SatyadevNtv/mctorch | ebc16e4437ad62858d95a8559262ee30191d36ff | [
"MIT"
] | 13 | 2019-02-07T02:16:58.000Z | 2022-02-24T15:00:19.000Z | examples/pca.py | SatyadevNtv/mctorch | ebc16e4437ad62858d95a8559262ee30191d36ff | [
"MIT"
] | 30 | 2018-10-02T18:54:50.000Z | 2022-03-02T01:56:59.000Z | import torch
import mctorch.nn as nn
import mctorch.optim as optim
import numpy as np
torch.manual_seed(0)
# Random data with high variance in first two dimension
X = torch.diag(torch.FloatTensor([3,2,1])).matmul(torch.randn(3,200))
X -= X.mean(axis=0)
# 1. Initialize Parameter
manifold_param = nn.Parameter(manifold=nn.Stiefel(3,2))
# 2. Define Cost - squared reconstruction error
# 3. Optimize
optimizer = optim.rAdagrad(params = [manifold_param], lr=1e-1)
# optimizer = optim.rSGD(params = [manifold_param], lr=1e-2)
cost_step = None
for epoch in range(1000):
cost_step = cost(X, manifold_param)
# print(cost_step)
cost_step.backward()
optimizer.step()
optimizer.zero_grad()
print(cost_step)
np_X = X.detach().numpy()
np_w = manifold_param.detach().numpy()
# 4. Test Results
estimated_projector = np_w @ np_w.T
eigenvalues, eigenvectors = np.linalg.eig(np_X @ np_X.T)
indices = np.argsort(eigenvalues)[::-1][:2]
span_matrix = eigenvectors[:, indices]
projector = span_matrix @ span_matrix.T
print("Frobenius norm error between estimated and closed-form projection "
"matrix:", np.linalg.norm(projector - estimated_projector)) | 28.0625 | 75 | 0.691166 | import torch
import mctorch.nn as nn
import mctorch.optim as optim
import numpy as np
torch.manual_seed(0)
# Random data with high variance in first two dimension
X = torch.diag(torch.FloatTensor([3,2,1])).matmul(torch.randn(3,200))
X -= X.mean(axis=0)
# 1. Initialize Parameter
manifold_param = nn.Parameter(manifold=nn.Stiefel(3,2))
# 2. Define Cost - squared reconstruction error
def cost(X, w):
wTX = torch.matmul(w.transpose(1,0), X)
wwTX = torch.matmul(w, wTX)
return torch.norm((X - wwTX)**2)
# 3. Optimize
optimizer = optim.rAdagrad(params = [manifold_param], lr=1e-1)
# optimizer = optim.rSGD(params = [manifold_param], lr=1e-2)
cost_step = None
for epoch in range(1000):
cost_step = cost(X, manifold_param)
# print(cost_step)
cost_step.backward()
optimizer.step()
optimizer.zero_grad()
print(cost_step)
np_X = X.detach().numpy()
np_w = manifold_param.detach().numpy()
# 4. Test Results
estimated_projector = np_w @ np_w.T
eigenvalues, eigenvectors = np.linalg.eig(np_X @ np_X.T)
indices = np.argsort(eigenvalues)[::-1][:2]
span_matrix = eigenvectors[:, indices]
projector = span_matrix @ span_matrix.T
print("Frobenius norm error between estimated and closed-form projection "
"matrix:", np.linalg.norm(projector - estimated_projector)) | 110 | 0 | 23 |
fff190148a03e69459b59c42a7ffff8717696f9f | 147 | py | Python | level0/question81.py | kevin00000000/Python-programming-exercises | 87546906d817263ae7ddbd0276f0bb36e0d63c41 | [
"MIT"
] | null | null | null | level0/question81.py | kevin00000000/Python-programming-exercises | 87546906d817263ae7ddbd0276f0bb36e0d63c41 | [
"MIT"
] | null | null | null | level0/question81.py | kevin00000000/Python-programming-exercises | 87546906d817263ae7ddbd0276f0bb36e0d63c41 | [
"MIT"
] | null | null | null | import zlib
s = 'hello world!hello world!hello world!hello world!'
t = zlib.compress(s.encode())
print(t)
print(bytes(zlib.decompress(t)).decode()) | 29.4 | 54 | 0.734694 | import zlib
s = 'hello world!hello world!hello world!hello world!'
t = zlib.compress(s.encode())
print(t)
print(bytes(zlib.decompress(t)).decode()) | 0 | 0 | 0 |
a6fca02226b0b52aa6be593066ec3394841fec73 | 883 | py | Python | Client_Android.py | Mandar-Sharma/Sockets | 687d61e89da29b208ca3cd4d8ca03d0b84193aa8 | [
"MIT"
] | null | null | null | Client_Android.py | Mandar-Sharma/Sockets | 687d61e89da29b208ca3cd4d8ca03d0b84193aa8 | [
"MIT"
] | null | null | null | Client_Android.py | Mandar-Sharma/Sockets | 687d61e89da29b208ca3cd4d8ca03d0b84193aa8 | [
"MIT"
] | null | null | null | import os
from socket import *
import time
os.chdir("/storage/emulated/0/Project")
s = socket(AF_INET,SOCK_STREAM)
host = "192.168.64.1"
port = 9010
s.connect((host,port))
filename = 'Text.txt'
clock_start = time.clock()
time_start = time.time()
#rb- readonly : Binary
f = open(filename, 'rb')
#1024 BufferSize
l = f.read(1024)
while (l):
s.send(l)
print('Sent ', repr(l))
l = f.read(1024)
f.close()
#s.close() vs shutdown - shutdown allows receiving pending data from sender
s.shutdown(SHUT_WR)
clock_end = time.clock()
time_end = time.time()
duration_clock = clock_end - clock_start
print 'clock: start = ',clock_start, ' end = ',clock_end
print 'clock: duration_clock = ', duration_clock
duration_time = time_end - time_start
print 'time: start = ',time_start, ' end = ',time_end
print 'time: duration_time = ', duration_time
| 25.228571 | 76 | 0.676104 | import os
from socket import *
import time
os.chdir("/storage/emulated/0/Project")
s = socket(AF_INET,SOCK_STREAM)
host = "192.168.64.1"
port = 9010
s.connect((host,port))
filename = 'Text.txt'
clock_start = time.clock()
time_start = time.time()
#rb- readonly : Binary
f = open(filename, 'rb')
#1024 BufferSize
l = f.read(1024)
while (l):
s.send(l)
print('Sent ', repr(l))
l = f.read(1024)
f.close()
#s.close() vs shutdown - shutdown allows receiving pending data from sender
s.shutdown(SHUT_WR)
clock_end = time.clock()
time_end = time.time()
duration_clock = clock_end - clock_start
print 'clock: start = ',clock_start, ' end = ',clock_end
print 'clock: duration_clock = ', duration_clock
duration_time = time_end - time_start
print 'time: start = ',time_start, ' end = ',time_end
print 'time: duration_time = ', duration_time
| 0 | 0 | 0 |
b68441d9d8dd3c633e51ecf4a376cd1b7694255e | 2,771 | py | Python | scripts/ingests/2020Best_pmUKIRT_ingest.py | cfontanive/SIMPLE-db | 92dd86c043c5cf610e3097928936ed15c72f7697 | [
"BSD-3-Clause"
] | 6 | 2020-10-21T05:56:25.000Z | 2021-09-25T00:06:46.000Z | scripts/ingests/2020Best_pmUKIRT_ingest.py | cfontanive/SIMPLE-db | 92dd86c043c5cf610e3097928936ed15c72f7697 | [
"BSD-3-Clause"
] | 130 | 2020-10-27T20:25:22.000Z | 2022-03-15T21:23:23.000Z | scripts/ingests/2020Best_pmUKIRT_ingest.py | cfontanive/SIMPLE-db | 92dd86c043c5cf610e3097928936ed15c72f7697 | [
"BSD-3-Clause"
] | 8 | 2020-10-27T19:54:01.000Z | 2021-11-19T18:59:47.000Z | import sys
sys.path.append('.')
from astrodbkit2.astrodb import create_database
from astrodbkit2.astrodb import Database
from simple.schema import *
from astropy.table import Table
import numpy as np
from scripts.ingests.utils import ingest_proper_motions
from astropy.coordinates import SkyCoord
import astropy.units as u
from astroquery.simbad import Simbad
import warnings
warnings.filterwarnings("ignore", module='astroquery.simbad')
import re
import os
from pathlib import Path
import pandas as pd
SAVE_DB = True # save the data files in addition to modifying the .db file
RECREATE_DB = True # recreates the .db file from the data files
VERBOSE = False
verboseprint = print if VERBOSE else lambda *a, **k: None
db = load_db()
# load table
ingest_table = Table.read('scripts/ingests/UltracoolSheet-Main.csv', data_start=1)
#Defining variables
sources = ingest_table['name']
#ra_lit = ingest_table['pmra_lit']
#ra_lit_err = ingest_table['pmraerr_lit']
#dec_lit = ingest_table['pmdec_lit']
#dec_lit_err = ingest_table['pmdecerr_lit']
#ref_pm_lit = ingest_table['ref_pm_lit']
ra_UKIRT = ingest_table['pmra_UKIRT']
ra_UKIRT_err = ingest_table['pmraerr_UKIRT']
dec_UKIRT = ingest_table['pmdec_UKIRT']
dec_UKIRT_err = ingest_table['pmdecerr_UKIRT']
ref_pm_UKIRT = ingest_table['ref_plx_UKIRT']
#ingest_table_df = pd.DataFrame({'sources': sources, 'pm_ra' : ra_UKIRT, 'pm_ra_err' : ra_UKIRT_err, 'pm_dec' : dec_UKIRT, 'pm_dec_err' : dec_UKIRT_err, 'pm_ref' : ref_pm_UKIRT})
df = pd.read_csv('scripts/ingests/UltracoolSheet-Main.csv', usecols=['name' ,'pmra_UKIRT', 'pmraerr_UKIRT', 'pmdec_UKIRT', 'pmdecerr_UKIRT', 'ref_plx_UKIRT']) .dropna()
df.reset_index(inplace=True, drop=True)
print(df)
#Ingesting lit pm into db
#ingest_proper_motions(db, sources, ra_lit, ra_lit_err, dec_lit, dec_lit_err, ref_pm_lit, save_db=False, verbose=False)
#Ingesting UKIRT pm into db
ingest_proper_motions(db, df.name, df.pmra_UKIRT, df.pmraerr_UKIRT, df.pmdec_UKIRT, df.pmdecerr_UKIRT, df.ref_plx_UKIRT, save_db=True, verbose=False )
| 35.075949 | 178 | 0.760736 | import sys
sys.path.append('.')
from astrodbkit2.astrodb import create_database
from astrodbkit2.astrodb import Database
from simple.schema import *
from astropy.table import Table
import numpy as np
from scripts.ingests.utils import ingest_proper_motions
from astropy.coordinates import SkyCoord
import astropy.units as u
from astroquery.simbad import Simbad
import warnings
warnings.filterwarnings("ignore", module='astroquery.simbad')
import re
import os
from pathlib import Path
import pandas as pd
SAVE_DB = True # save the data files in addition to modifying the .db file
RECREATE_DB = True # recreates the .db file from the data files
VERBOSE = False
verboseprint = print if VERBOSE else lambda *a, **k: None
def load_db():
# Utility function to load the database
db_file = 'SIMPLE.db'
db_file_path = Path(db_file)
db_connection_string = 'sqlite:///SIMPLE.db' # SQLite browser
if RECREATE_DB and db_file_path.exists():
os.remove(db_file) # removes the current .db file if one already exists
if not db_file_path.exists():
create_database(db_connection_string) # creates empty database based on the simple schema
db = Database(db_connection_string) # connects to the empty database
db.load_database('data/') # loads the data from the data files into the database
else:
db = Database(db_connection_string) # if database already exists, connects to .db file
return db
db = load_db()
# load table
ingest_table = Table.read('scripts/ingests/UltracoolSheet-Main.csv', data_start=1)
#Defining variables
sources = ingest_table['name']
#ra_lit = ingest_table['pmra_lit']
#ra_lit_err = ingest_table['pmraerr_lit']
#dec_lit = ingest_table['pmdec_lit']
#dec_lit_err = ingest_table['pmdecerr_lit']
#ref_pm_lit = ingest_table['ref_pm_lit']
ra_UKIRT = ingest_table['pmra_UKIRT']
ra_UKIRT_err = ingest_table['pmraerr_UKIRT']
dec_UKIRT = ingest_table['pmdec_UKIRT']
dec_UKIRT_err = ingest_table['pmdecerr_UKIRT']
ref_pm_UKIRT = ingest_table['ref_plx_UKIRT']
#ingest_table_df = pd.DataFrame({'sources': sources, 'pm_ra' : ra_UKIRT, 'pm_ra_err' : ra_UKIRT_err, 'pm_dec' : dec_UKIRT, 'pm_dec_err' : dec_UKIRT_err, 'pm_ref' : ref_pm_UKIRT})
df = pd.read_csv('scripts/ingests/UltracoolSheet-Main.csv', usecols=['name' ,'pmra_UKIRT', 'pmraerr_UKIRT', 'pmdec_UKIRT', 'pmdecerr_UKIRT', 'ref_plx_UKIRT']) .dropna()
df.reset_index(inplace=True, drop=True)
print(df)
#Ingesting lit pm into db
#ingest_proper_motions(db, sources, ra_lit, ra_lit_err, dec_lit, dec_lit_err, ref_pm_lit, save_db=False, verbose=False)
#Ingesting UKIRT pm into db
ingest_proper_motions(db, df.name, df.pmra_UKIRT, df.pmraerr_UKIRT, df.pmdec_UKIRT, df.pmdecerr_UKIRT, df.ref_plx_UKIRT, save_db=True, verbose=False )
| 715 | 0 | 23 |
8f64baeba42a571559bd700887dd83e29e5c5e72 | 294 | py | Python | client/labml/internal/logger/destinations/__init__.py | elgalu/labml | 511f0bbfcbeb4bc34bc6966a3973ff4e7e48eeee | [
"MIT"
] | 463 | 2021-05-28T03:21:14.000Z | 2022-03-28T06:28:21.000Z | client/labml/internal/logger/destinations/__init__.py | elgalu/labml | 511f0bbfcbeb4bc34bc6966a3973ff4e7e48eeee | [
"MIT"
] | 15 | 2021-06-22T10:02:36.000Z | 2021-12-20T06:14:12.000Z | client/labml/internal/logger/destinations/__init__.py | elgalu/labml | 511f0bbfcbeb4bc34bc6966a3973ff4e7e48eeee | [
"MIT"
] | 29 | 2020-06-03T07:13:31.000Z | 2021-05-23T18:20:34.000Z | from typing import List, Union, Tuple, Optional
from labml.internal.util.colors import StyleCode
| 26.727273 | 78 | 0.676871 | from typing import List, Union, Tuple, Optional
from labml.internal.util.colors import StyleCode
class Destination:
def log(self, parts: List[Union[str, Tuple[str, Optional[StyleCode]]]], *,
is_new_line: bool,
is_reset: bool):
raise NotImplementedError()
| 149 | -3 | 49 |
95adfcedeeceb1f8475fea951258d8cb1f33e0df | 2,503 | py | Python | blog/models.py | roofxixi/July | d02965c989adde4ad1aadaffdf9f7bd334810dcf | [
"MIT"
] | null | null | null | blog/models.py | roofxixi/July | d02965c989adde4ad1aadaffdf9f7bd334810dcf | [
"MIT"
] | null | null | null | blog/models.py | roofxixi/July | d02965c989adde4ad1aadaffdf9f7bd334810dcf | [
"MIT"
] | 1 | 2021-06-05T15:32:43.000Z | 2021-06-05T15:32:43.000Z | from django.db import models
# Create your models here.
__all__ = [
'Categories',
'Article',
'Links',
'Tag'
]
| 30.52439 | 101 | 0.679584 | from django.db import models
# Create your models here.
__all__ = [
'Categories',
'Article',
'Links',
'Tag'
]
class Categories(models.Model):
name = models.CharField(max_length=32, unique=True, verbose_name='分类名称')
title = models.CharField(max_length=64, verbose_name='标题')
description = models.CharField(max_length=32, verbose_name='描述')
keywords = models.CharField(max_length=255, verbose_name='关键词')
created_time = models.DateTimeField('创建时间', auto_now_add=True)
last_modified_time = models.DateTimeField('修改时间', auto_now=True)
class Meta:
verbose_name = '分类(Categories)'
verbose_name_plural = verbose_name
ordering = ['-created_time']
def __str__(self):
return self.name
class Article(models.Model):
STATUS_CHOICES = (
('0', '发布'),
('1', '草稿'),
)
title = models.CharField(max_length=32, unique=True, verbose_name='文章标题')
url = models.CharField(max_length=255, verbose_name='链接', unique=True)
abstract = models.TextField(verbose_name='摘要')
body = models.TextField(verbose_name='文章内容')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
last_modified_time = models.DateTimeField('修改时间', auto_now=True)
status = models.CharField(default='0', max_length=1, choices=STATUS_CHOICES, verbose_name='文章状态')
categories = models.ForeignKey("Categories", verbose_name='分类')
tag = models.ManyToManyField("Tag", verbose_name='标签')
class Meta:
verbose_name = '文章(Article)'
verbose_name_plural = verbose_name
ordering = ['-created_time']
def __str__(self):
return self.title
class Links(models.Model):
name = models.CharField(max_length=32, unique=True, verbose_name='友情链接名字')
url = models.URLField(unique=True, verbose_name='URL')
add_time = models.DateTimeField(auto_now_add=True, verbose_name='添加日期')
class Meta:
verbose_name = '友情链接(Links)'
verbose_name_plural = verbose_name
ordering = ['id']
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=32, unique=True, verbose_name='标签名称')
created_time = models.DateTimeField('创建时间', auto_now_add=True)
last_modified_time = models.DateTimeField('修改时间', auto_now=True)
class Meta:
verbose_name = '标签(Tag)'
verbose_name_plural = verbose_name
ordering = ['id']
def __str__(self):
return self.name
| 89 | 2,356 | 92 |
e13243a254e486667260acf8db8e332acc4a80b0 | 1,977 | py | Python | tartiflette_plugin_scalars/us_currency.py | AutumnalDream/tartiflette-plugin-scalars | 2c73b20eac93b364a97b2192956e5fd4034ec35a | [
"MIT"
] | 8 | 2019-10-02T12:47:15.000Z | 2021-12-15T14:29:37.000Z | tartiflette_plugin_scalars/us_currency.py | AutumnalDream/tartiflette-plugin-scalars | 2c73b20eac93b364a97b2192956e5fd4034ec35a | [
"MIT"
] | 109 | 2019-09-19T13:37:43.000Z | 2022-03-28T07:08:50.000Z | tartiflette_plugin_scalars/us_currency.py | AutumnalDream/tartiflette-plugin-scalars | 2c73b20eac93b364a97b2192956e5fd4034ec35a | [
"MIT"
] | 4 | 2019-10-26T19:57:20.000Z | 2021-06-24T14:32:37.000Z | from typing import Union # pylint: disable=unused-import
from tartiflette.constants import UNDEFINED_VALUE
from tartiflette.language.ast import StringValueNode
class USCurrency:
"""
Scalar which handles USD amounts (in format $XX.YY)
"""
@staticmethod
def parse_literal(ast: "ValueNode") -> Union[int, "UNDEFINED_VALUE"]:
"""
Loads the input value from an AST node
:param ast: ast node to coerce
:type ast: ValueNode
:return: the value in cents if it can be parsed, UNDEFINED_VALUE otherwise
:rtype: Union[int, UNDEFINED_VALUE]
"""
if isinstance(ast, StringValueNode):
try:
return _parse_us_currency(ast.value)
except (ValueError, TypeError):
return UNDEFINED_VALUE
return UNDEFINED_VALUE
@staticmethod
def coerce_input(value: str) -> int:
"""
Loads the input value
:param value: the value to coerce
:type value: str
:return: the value in cents if it can be parsed
:rtype: int
:raises TypeError: if the value isn't a string or int
:raises ValueError: if the value isn't convertible to an int
"""
return _parse_us_currency(value)
@staticmethod
def coerce_output(value: int) -> str:
"""
Dumps the output value
:param value: the value to coerce
:type value: int
:return: the value as a USD string if it can be parsed
:raises TypeError: if the value isn't an int
:rtype: str
"""
if isinstance(value, int):
return "$" + "{0:.2f}".format(value / 100.00)
raise TypeError(f"USCurrency cannot represent value: < {value} >")
| 31.887097 | 82 | 0.614568 | from typing import Union # pylint: disable=unused-import
from tartiflette.constants import UNDEFINED_VALUE
from tartiflette.language.ast import StringValueNode
def _parse_us_currency(value: str) -> int:
if isinstance(value, str):
return float(value[1:]) * 100
raise TypeError(
f"USCurrency cannot represent values other than strings: < {value} >"
)
class USCurrency:
"""
Scalar which handles USD amounts (in format $XX.YY)
"""
@staticmethod
def parse_literal(ast: "ValueNode") -> Union[int, "UNDEFINED_VALUE"]:
"""
Loads the input value from an AST node
:param ast: ast node to coerce
:type ast: ValueNode
:return: the value in cents if it can be parsed, UNDEFINED_VALUE otherwise
:rtype: Union[int, UNDEFINED_VALUE]
"""
if isinstance(ast, StringValueNode):
try:
return _parse_us_currency(ast.value)
except (ValueError, TypeError):
return UNDEFINED_VALUE
return UNDEFINED_VALUE
@staticmethod
def coerce_input(value: str) -> int:
"""
Loads the input value
:param value: the value to coerce
:type value: str
:return: the value in cents if it can be parsed
:rtype: int
:raises TypeError: if the value isn't a string or int
:raises ValueError: if the value isn't convertible to an int
"""
return _parse_us_currency(value)
@staticmethod
def coerce_output(value: int) -> str:
"""
Dumps the output value
:param value: the value to coerce
:type value: int
:return: the value as a USD string if it can be parsed
:raises TypeError: if the value isn't an int
:rtype: str
"""
if isinstance(value, int):
return "$" + "{0:.2f}".format(value / 100.00)
raise TypeError(f"USCurrency cannot represent value: < {value} >")
| 195 | 0 | 23 |
e8c3c51f619356e810aecfe09d711fb7f47243cc | 770 | py | Python | kintone/icon_kintone/actions/get_record_by_id/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2020-03-18T09:14:55.000Z | 2020-03-18T09:14:55.000Z | kintone/icon_kintone/actions/get_record_by_id/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | kintone/icon_kintone/actions/get_record_by_id/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | import komand
from .schema import GetRecordByIdInput, GetRecordByIdOutput, Input, Output, Component
# Custom imports below
from icon_kintone.util.kintone import get_record
| 32.083333 | 96 | 0.680519 | import komand
from .schema import GetRecordByIdInput, GetRecordByIdOutput, Input, Output, Component
# Custom imports below
from icon_kintone.util.kintone import get_record
class GetRecordById(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='get_record_by_id',
description=Component.DESCRIPTION,
input=GetRecordByIdInput(),
output=GetRecordByIdOutput())
def run(self, params={}):
app_id = params.get(Input.APP_ID)
record_id = params.get(Input.RECORD_ID)
verify_ssl = self.connection.verify_ssl
output = get_record(self.logger, self.connection.api_key, app_id, record_id, verify_ssl)
return {Output.RECORD: output}
| 506 | 14 | 77 |
6194050e1c395ec5c5e4b253d6977c10ebfc1e77 | 6,892 | py | Python | Assets/Tools/generate_scales.py | elkwolf/ear-training | 1d210dd4723e2edd50969fb693f0110b81e849ec | [
"MIT"
] | 6 | 2020-06-01T18:39:26.000Z | 2020-10-15T12:54:08.000Z | Assets/Tools/generate_scales.py | elkwolf/ear-training | 1d210dd4723e2edd50969fb693f0110b81e849ec | [
"MIT"
] | 11 | 2020-06-03T01:35:47.000Z | 2021-04-06T18:58:00.000Z | Assets/Tools/generate_scales.py | elkwolf/ear-training | 1d210dd4723e2edd50969fb693f0110b81e849ec | [
"MIT"
] | 2 | 2021-03-30T17:28:11.000Z | 2021-03-30T17:37:04.000Z | # -*- coding: utf-8 -*-
"""Generate all the scales in different notations.
This code has been written to test whether the user interface works.
The notations and names are approximated, as I am no expert of any other
way of spelling scales than spanish, and maybe english.
For the generation of these scales, an equal-tempered scale is assumed
and a reference frequency of A4 == 440.0Hz.
"""
import os
def correct_german_spellings(spelling):
"""Correcting the exceptions in German spellings.
Aeseses -> Aseses
Eeseses -> Eseses
Heseses -> Beses
Aeses -> Ases
Eeses -> Eses
Heses -> Bes
Hes -> B
Aes -> As
Ees -> Es
----------
H♭♭♭ -> B𝄫
H𝄫 -> B♭
H♭ -> B
"""
ret = spelling.replace("Aeseses", "Aseses")
ret = ret.replace("Eeseses", "Eseses")
ret = ret.replace("Heseses", "Beses")
ret = ret.replace("Aeses", "Ases")
ret = ret.replace("Eeses", "Eses")
ret = ret.replace("Heses", "Bes")
ret = ret.replace("Hes", "B")
ret = ret.replace("Aes", "As")
ret = ret.replace("Ees", "Es")
# The former are for keys, these deal with note spellings
ret = ret.replace("H♭♭♭", "B𝄫")
ret = ret.replace("H𝄫", "B♭")
ret = ret.replace("H♭", "B")
return ret
if __name__ == '__main__':
a4 = 440.0
major_scale_semitones_to_a4 = [-9, -7, -5, -4, -2, 0, 2]
minor_scale_semitones_to_a4 = [-9, -7, -6, -4, -2, -1, 1]
major_scale_alterations = [
# C Major
[0, 0, 0, 0, 0, 0, 0],
# C# Major
[1, 1, 1, 1, 1, 1, 1],
# D Major
[1, 0, 0, 1, 0, 0, 0],
# Eb Major
[0, 0, -1, 0, 0, -1, -1],
# E Major
[1, 1, 0, 1, 1, 0, 0],
# F Major
[0, 0, 0, 0, 0, 0, -1],
# F# Major
[1, 1, 1, 1, 1, 1, 0],
# G Major
[0, 0, 0, 1, 0, 0, 0],
# Ab Major
[0, -1, -1, 0, 0, -1, -1],
# A Major
[1, 0, 0, 1, 1, 0, 0],
# Bb Major
[0, 0, -1, 0, 0, 0, -1],
# B Major
[1, 1, 0, 1, 1, 1, 0],
]
minor_scale_alterations = [
# c minor
[0, 0, -1, 0, 0, -1, -1],
# c# minor
[1, 1, 0, 1, 1, 0, 0],
# d minor
[0, 0, 0, 0, 0, 0, -1],
# eb minor
[-1, -1, -1, 0, -1, -1, -1],
# e minor
[0, 0, 0, 1, 0, 0, 0],
# f minor
[0, -1, -1, 0, 0, -1, -1],
# f# minor
[1, 0, 0, 1, 1, 0, 0],
# g minor
[0, 0, -1, 0, 0, 0, -1],
# ab minor
[-1, -1, -1, -1, -1, -1, -1],
# a minor
[0, 0, 0, 0, 0, 0, 0],
# bb minor
[0, -1, -1, 0, -1, -1, -1],
# b minor
[1, 0, 0, 1, 0, 0, 0],
]
notations = {
'North America': {
'notes': ['C', 'D', 'E', 'F', 'G', 'A', 'B'],
'modes': ['Major', 'Minor'],
'scale_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
},
'German': {
'notes': ['C', 'D', 'E', 'F', 'G', 'A', 'H'],
'modes': ['Dur', 'Moll'],
'scale_alterations': ['', 'is', 'isis', 'isisis', 'eseses', "eses", "es"],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
},
'Spanish': {
'notes': ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Si'],
'modes': ['Mayor', 'Menor'],
'scale_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
},
'French': {
'notes': ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Ti'],
'modes': ['Majeur', 'Mineur'],
'scale_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
}
}
scales = [
# (note, alteration) pairs
(0, 0), (0, 1), (1, 0), (2, -1),
(2, 0), (3, 0), (3, 1), (4, 0),
(5, -1), (5, 0), (6, -1), (6, 0)
]
for notation, d in notations.items():
print(notation)
for mode_id, mode in enumerate(d['modes']):
chromatic_increase = 0
base_semitones_to_a4 = major_scale_semitones_to_a4 if mode_id == 0 else minor_scale_semitones_to_a4
scale_alterations = major_scale_alterations if mode_id == 0 else minor_scale_alterations
for scale_id, scale in enumerate(scales):
note, alteration = scale
scale_note = d['notes'][note]
alteration_name = d['scale_alterations'][alteration]
scale_name = '{}{} {}'.format(scale_note, alteration_name, mode)
if notation == "German":
scale_name = correct_german_spellings(scale_name)
scale_semitones_to_a4 = [s + chromatic_increase for s in base_semitones_to_a4]
note_alterations = scale_alterations[scale_id]
note_indexes = [n % 7 for n in range(note, note + 7)]
scale_dir = os.path.join('Scales', notation, scale_name)
if not os.path.exists(scale_dir):
os.makedirs(scale_dir)
csv_filepath = os.path.join(scale_dir, 'fundamental_frequencies.csv')
with open(csv_filepath, encoding='utf-8', mode='w') as csv:
print(scale_name)
for alt in [0, 1, 2, -2, -1]:
for idx, note_idx in enumerate(note_indexes):
note_name = d['notes'][note_idx]
note_alteration = note_alterations[note_idx] + alt
note_alteration_name = d['note_alterations'][note_alteration]
note_name = '{}{}'.format(note_name, note_alteration_name)
if notation == "German":
note_name = correct_german_spellings(note_name)
# print('{}, '.format(note_name), end='')
if idx < len(note_indexes) - 1:
csv.write('{}, '.format(note_name))
else:
csv.write('{}\n'.format(note_name))
for idx, note_idx in enumerate(note_indexes):
note_semitones_to_a4 = scale_semitones_to_a4[idx] + alt
freq = a4 * 2.0 ** (note_semitones_to_a4 / 12.0)
# print('{:.2f}Hz, '.format(freq), end='')
if idx < len(note_indexes) - 1:
csv.write('{:.2f}, '.format(freq))
else:
csv.write('{:.2f}\n'.format(freq))
chromatic_increase += 1
print() | 37.254054 | 111 | 0.435142 | # -*- coding: utf-8 -*-
"""Generate all the scales in different notations.
This code has been written to test whether the user interface works.
The notations and names are approximated, as I am no expert of any other
way of spelling scales than spanish, and maybe english.
For the generation of these scales, an equal-tempered scale is assumed
and a reference frequency of A4 == 440.0Hz.
"""
import os
def correct_german_spellings(spelling):
"""Correcting the exceptions in German spellings.
Aeseses -> Aseses
Eeseses -> Eseses
Heseses -> Beses
Aeses -> Ases
Eeses -> Eses
Heses -> Bes
Hes -> B
Aes -> As
Ees -> Es
----------
H♭♭♭ -> B𝄫
H𝄫 -> B♭
H♭ -> B
"""
ret = spelling.replace("Aeseses", "Aseses")
ret = ret.replace("Eeseses", "Eseses")
ret = ret.replace("Heseses", "Beses")
ret = ret.replace("Aeses", "Ases")
ret = ret.replace("Eeses", "Eses")
ret = ret.replace("Heses", "Bes")
ret = ret.replace("Hes", "B")
ret = ret.replace("Aes", "As")
ret = ret.replace("Ees", "Es")
# The former are for keys, these deal with note spellings
ret = ret.replace("H♭♭♭", "B𝄫")
ret = ret.replace("H𝄫", "B♭")
ret = ret.replace("H♭", "B")
return ret
if __name__ == '__main__':
a4 = 440.0
major_scale_semitones_to_a4 = [-9, -7, -5, -4, -2, 0, 2]
minor_scale_semitones_to_a4 = [-9, -7, -6, -4, -2, -1, 1]
major_scale_alterations = [
# C Major
[0, 0, 0, 0, 0, 0, 0],
# C# Major
[1, 1, 1, 1, 1, 1, 1],
# D Major
[1, 0, 0, 1, 0, 0, 0],
# Eb Major
[0, 0, -1, 0, 0, -1, -1],
# E Major
[1, 1, 0, 1, 1, 0, 0],
# F Major
[0, 0, 0, 0, 0, 0, -1],
# F# Major
[1, 1, 1, 1, 1, 1, 0],
# G Major
[0, 0, 0, 1, 0, 0, 0],
# Ab Major
[0, -1, -1, 0, 0, -1, -1],
# A Major
[1, 0, 0, 1, 1, 0, 0],
# Bb Major
[0, 0, -1, 0, 0, 0, -1],
# B Major
[1, 1, 0, 1, 1, 1, 0],
]
minor_scale_alterations = [
# c minor
[0, 0, -1, 0, 0, -1, -1],
# c# minor
[1, 1, 0, 1, 1, 0, 0],
# d minor
[0, 0, 0, 0, 0, 0, -1],
# eb minor
[-1, -1, -1, 0, -1, -1, -1],
# e minor
[0, 0, 0, 1, 0, 0, 0],
# f minor
[0, -1, -1, 0, 0, -1, -1],
# f# minor
[1, 0, 0, 1, 1, 0, 0],
# g minor
[0, 0, -1, 0, 0, 0, -1],
# ab minor
[-1, -1, -1, -1, -1, -1, -1],
# a minor
[0, 0, 0, 0, 0, 0, 0],
# bb minor
[0, -1, -1, 0, -1, -1, -1],
# b minor
[1, 0, 0, 1, 0, 0, 0],
]
notations = {
'North America': {
'notes': ['C', 'D', 'E', 'F', 'G', 'A', 'B'],
'modes': ['Major', 'Minor'],
'scale_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
},
'German': {
'notes': ['C', 'D', 'E', 'F', 'G', 'A', 'H'],
'modes': ['Dur', 'Moll'],
'scale_alterations': ['', 'is', 'isis', 'isisis', 'eseses', "eses", "es"],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
},
'Spanish': {
'notes': ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Si'],
'modes': ['Mayor', 'Menor'],
'scale_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
},
'French': {
'notes': ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Ti'],
'modes': ['Majeur', 'Mineur'],
'scale_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
'note_alterations': ['', '♯', '𝄪', '♯♯♯', '♭♭♭', '𝄫', '♭'],
}
}
scales = [
# (note, alteration) pairs
(0, 0), (0, 1), (1, 0), (2, -1),
(2, 0), (3, 0), (3, 1), (4, 0),
(5, -1), (5, 0), (6, -1), (6, 0)
]
for notation, d in notations.items():
print(notation)
for mode_id, mode in enumerate(d['modes']):
chromatic_increase = 0
base_semitones_to_a4 = major_scale_semitones_to_a4 if mode_id == 0 else minor_scale_semitones_to_a4
scale_alterations = major_scale_alterations if mode_id == 0 else minor_scale_alterations
for scale_id, scale in enumerate(scales):
note, alteration = scale
scale_note = d['notes'][note]
alteration_name = d['scale_alterations'][alteration]
scale_name = '{}{} {}'.format(scale_note, alteration_name, mode)
if notation == "German":
scale_name = correct_german_spellings(scale_name)
scale_semitones_to_a4 = [s + chromatic_increase for s in base_semitones_to_a4]
note_alterations = scale_alterations[scale_id]
note_indexes = [n % 7 for n in range(note, note + 7)]
scale_dir = os.path.join('Scales', notation, scale_name)
if not os.path.exists(scale_dir):
os.makedirs(scale_dir)
csv_filepath = os.path.join(scale_dir, 'fundamental_frequencies.csv')
with open(csv_filepath, encoding='utf-8', mode='w') as csv:
print(scale_name)
for alt in [0, 1, 2, -2, -1]:
for idx, note_idx in enumerate(note_indexes):
note_name = d['notes'][note_idx]
note_alteration = note_alterations[note_idx] + alt
note_alteration_name = d['note_alterations'][note_alteration]
note_name = '{}{}'.format(note_name, note_alteration_name)
if notation == "German":
note_name = correct_german_spellings(note_name)
# print('{}, '.format(note_name), end='')
if idx < len(note_indexes) - 1:
csv.write('{}, '.format(note_name))
else:
csv.write('{}\n'.format(note_name))
for idx, note_idx in enumerate(note_indexes):
note_semitones_to_a4 = scale_semitones_to_a4[idx] + alt
freq = a4 * 2.0 ** (note_semitones_to_a4 / 12.0)
# print('{:.2f}Hz, '.format(freq), end='')
if idx < len(note_indexes) - 1:
csv.write('{:.2f}, '.format(freq))
else:
csv.write('{:.2f}\n'.format(freq))
chromatic_increase += 1
print() | 0 | 0 | 0 |
93b4317c019f6e5107db5321f2babd1d5d74946e | 5,142 | py | Python | Poem_generator/codes/visualization.py | shenhao-stu/2021_computer_design | 8729c1962a0aeaa12888092dda5d00723f62aad2 | [
"MIT"
] | 3 | 2021-03-29T08:25:29.000Z | 2021-09-17T22:45:18.000Z | Poem_generator/codes/visualization.py | shenhao-stu/2021_computer_design | 8729c1962a0aeaa12888092dda5d00723f62aad2 | [
"MIT"
] | null | null | null | Poem_generator/codes/visualization.py | shenhao-stu/2021_computer_design | 8729c1962a0aeaa12888092dda5d00723f62aad2 | [
"MIT"
] | 1 | 2022-02-22T02:15:51.000Z | 2022-02-22T02:15:51.000Z | # -*- coding: utf-8 -*-
# @Author: Xiaoyuan Yi
# @Last Modified by: Xiaoyuan Yi
# @Last Modified time: 2020-06-11 22:04:36
# @Email: yi-xy16@mails.tsinghua.edu.cn
# @Description:
'''
Copyright 2020 THUNLP Lab. All Rights Reserved.
This code is part of the online Chinese poetry generation system, Jiuge.
System URL: https://jiuge.thunlp.cn/ and https://jiuge.thunlp.org/.
Github: https://github.com/THUNLP-AIPoet.
'''
from matplotlib import pyplot as plt
plt.rcParams['font.family'] = ['simhei']
from matplotlib.colors import from_levels_and_colors
import numpy as np
import copy
import torch
class Visualization(object):
"""docstring for LogInfo"""
| 32.961538 | 118 | 0.604434 | # -*- coding: utf-8 -*-
# @Author: Xiaoyuan Yi
# @Last Modified by: Xiaoyuan Yi
# @Last Modified time: 2020-06-11 22:04:36
# @Email: yi-xy16@mails.tsinghua.edu.cn
# @Description:
'''
Copyright 2020 THUNLP Lab. All Rights Reserved.
This code is part of the online Chinese poetry generation system, Jiuge.
System URL: https://jiuge.thunlp.cn/ and https://jiuge.thunlp.org/.
Github: https://github.com/THUNLP-AIPoet.
'''
from matplotlib import pyplot as plt
plt.rcParams['font.family'] = ['simhei']
from matplotlib.colors import from_levels_and_colors
import numpy as np
import copy
import torch
class Visualization(object):
"""docstring for LogInfo"""
def __init__(self, topic_slots, history_slots, log_path):
super(Visualization).__init__()
self._topic_slots = topic_slots
self._history_slots = history_slots
self._log_path = log_path
def reset(self, keywords):
self._keywords = keywords
self._history_mem = [' ']*self._history_slots
self._gen_lines = []
def add_gen_line(self, line):
self._gen_lines.append(line.strip())
def normalization(self, ori_matrix):
new_matrix = ori_matrix / ori_matrix.sum(axis=1, keepdims=True)
return new_matrix
def draw(self, read_log, write_log, step, visual_mode):
assert visual_mode in [0, 1, 2]
# read_log: (1, 1, mem_slots) * L_gen
# write_log: (B, L_gen, mem_slots)
current_gen_chars = [c for c in self._gen_lines[-1]]
gen_len = len(current_gen_chars)
if len(self._gen_lines) >= 2:
last_gen_chars = [c for c in self._gen_lines[-2]]
last_gen_len = len(last_gen_chars)
else:
last_gen_chars = [''] * gen_len
last_gen_len = gen_len
# (L_gen, mem_slots)
mem_slots = self._topic_slots+self._history_slots+last_gen_len
read_matrix = torch.cat(read_log, dim=1)[0, 0:gen_len, 0:mem_slots].detach().cpu().numpy()
read_matrix = self.normalization(read_matrix)
plt.figure(figsize=(11, 5))
# visualization of reading attention weights
num_levels = 100
vmin, vmax = read_matrix.min(), read_matrix.max()
midpoint = 0
levels = np.linspace(vmin, vmax, num_levels)
midp = np.mean(np.c_[levels[:-1], levels[1:]], axis=1)
vals = np.interp(midp, [vmin, midpoint, vmax], [0, 0.5, 1])
colors = plt.cm.seismic(vals)
cmap, norm = from_levels_and_colors(levels, colors)
plt.imshow(read_matrix, cmap=cmap, interpolation='none')
# print generated chars and chars in the memory
fontsize = 14
plt.text(0.2, gen_len+0.5, "Topic Memory", fontsize=fontsize)
plt.text(self._topic_slots, gen_len+0.5, "History Memory", fontsize=fontsize)
if last_gen_len == 5:
shift = 5
else:
shift = 6
plt.text(self._topic_slots+shift, gen_len+0.5, "Local Memory", fontsize=fontsize)
# topic memory
for i in range(0, len(self._keywords)):
key = self._keywords[i]
if len(key) == 1:
key = " " + key + " "
key = key + "|"
plt.text(i-0.4,-0.7, key, fontsize=fontsize)
start_pos = self._topic_slots
end_pos = self._topic_slots + self._history_slots
# history memory
for i in range(start_pos, end_pos):
c = self._history_mem[i - start_pos]
if i == end_pos - 1:
c = c + " |"
plt.text(i-0.2,-0.7, c, fontsize=fontsize)
start_pos = end_pos
end_pos = start_pos + last_gen_len
# local memory
for i in range(start_pos, end_pos):
idx = i - start_pos
plt.text(i-0.2,-0.7, last_gen_chars[idx], fontsize=fontsize)
# generated line
for i in range(0, len(current_gen_chars)):
plt.text(-1.2, i+0.15, current_gen_chars[i], fontsize=fontsize)
plt.colorbar()
plt.tick_params(labelbottom=False, labelleft=False)
x_major_locator = plt.MultipleLocator(1)
y_major_locator = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
#plt.tight_layout()
if visual_mode == 1:
fig = plt.gcf()
fig.savefig(self._log_path + 'visual_step_{}.png'.format(step), dpi=300, quality=100, bbox_inches="tight")
elif visual_mode == 2:
plt.show()
# update history memory
if write_log is not None:
if len(last_gen_chars) == 0:
print ("last generated line is empty!")
write_log = write_log[0, :, :].detach().cpu().numpy()
history_mem = copy.deepcopy(self._history_mem)
for i, c in enumerate(last_gen_chars):
selected_slot = np.argmax(write_log[i, :])
if selected_slot >= self._history_slots:
continue
history_mem[selected_slot] = c
self._history_mem = history_mem
| 4,345 | 0 | 134 |
35944c066e9e73f69a07d3e3dc969e189308bc87 | 3,934 | py | Python | SERVER/buissness/UMS_services.py | PawaN-K-MishrA/FTP-server | 090040ed7b40a8507578cd124dd18f9911068a14 | [
"MIT"
] | null | null | null | SERVER/buissness/UMS_services.py | PawaN-K-MishrA/FTP-server | 090040ed7b40a8507578cd124dd18f9911068a14 | [
"MIT"
] | null | null | null | SERVER/buissness/UMS_services.py | PawaN-K-MishrA/FTP-server | 090040ed7b40a8507578cd124dd18f9911068a14 | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
from data.DBConnection import DBConnection
from data.user import User
| 21.855556 | 141 | 0.669039 | import sys
sys.path.append('..')
from data.DBConnection import DBConnection
from data.user import User
class UMS_services:
@staticmethod
def add(u):
result=False
conx=DBConnection.connect()
cur=conx.cursor()
query='insert into userMaster (userName,password,userType,userStatus,name,email,contact,address,gender) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)'
data=[]
data.append(u.getUserName())
data.append(u.getPassword())
data.append(u.getUserType())
data.append(u.getUserStatus())
data.append(u.getName())
data.append(u.getEmail())
data.append(u.getContact())
data.append(u.getAddress())
data.append(u.getGender())
try:
cur.execute(query,data)
except:
conx.commit()
cur.close()
conx.close()
return result
if (cur.rowcount==1):
result=True
conx.commit()
cur.close()
conx.close()
return result
@staticmethod
def view():
conx=DBConnection.connect()
cur=conx.cursor()
query='select * from userMaster'
y=[]
cur.execute(query)
x=cur.fetchall()
for i in x:
u=User()
u.setUserId(i[0])
u.setUserName(i[1])
u.setPassword(i[2])
u.setUserType(i[3])
u.setUserStatus(i[4])
u.setName(i[5])
u.setEmail(i[6])
u.setContact(i[7])
u.setAddress(i[8])
u.setGender(i[9])
y.append(u)
conx.commit()
conx.close()
cur.close()
return y
@staticmethod
def search(id):
conx=DBConnection.connect()
cur=conx.cursor()
query='select * from userMaster where userId=%s'
value=(id,)
cur.execute(query,value)
x=cur.fetchall()
u=User()
for i in x:
u.setUserId(i[0])
u.setUserName(i[1])
u.setPassword(i[2])
u.setUserType(i[3])
u.setUserStatus(i[4])
u.setName(i[5])
u.setEmail(i[6])
u.setContact(i[7])
u.setAddress(i[8])
u.setGender(i[9])
conx.commit()
cur.close()
conx.close()
return u
@staticmethod
def update(u):
result=False
conx=DBConnection.connect()
cur=conx.cursor()
data=[]
query='update userMaster set userType=%s,userStatus=%s,name=%s,email=%s,contact=%s,address=%s,gender=%s where userId=%s'
data.append(u.getUserType())
data.append(u.getUserStatus())
data.append(u.getName())
data.append(u.getEmail())
data.append(u.getContact())
data.append(u.getAddress())
data.append(u.getGender())
data.append(u.getUserId())
cur.execute(query,data)
if (cur.rowcount==1):
result=True
conx.commit()
cur.close()
conx.close()
return result
@staticmethod
def updateProfile(u,x):
result=False
conx=DBConnection.connect()
cur=conx.cursor()
data=[]
query='update userMaster set name=%s,email=%s,contact=%s,address=%s,gender=%s where userId=%s'
data.append(u.getName())
data.append(u.getEmail())
data.append(u.getContact())
data.append(u.getAddress())
data.append(u.getGender())
data.append(x)
cur.execute(query,data)
if (cur.rowcount==1):
result=True
conx.commit()
cur.close()
conx.close()
return result
@staticmethod
def insertfile(u,x,y):
result=False
conx=DBConnection.connect()
cur=conx.cursor()
query='insert into userfiles values(%s,%s,%s)'
values=(u,x,y)
cur.execute(query,values)
if (cur.rowcount!=0):
result=True
conx.commit()
cur.close()
conx.close()
return result
@staticmethod
def deletefile(i,u):
result=False
conx=DBConnection.connect()
cur=conx.cursor()
print(u)
print(i)
query='delete from userfiles where filename=%s and userid=%s'
value=(i,u)
cur.execute(query,value)
if (cur.rowcount==1):
result=True
conx.commit()
cur.close()
conx.close()
return result
@staticmethod
def change_file_name(n_fname,userid,o_fname):
result=False
conx=DBConnection.connect()
cur=conx.cursor()
query='update userfiles set filename=%s where userid=%s and filename=%s'
value=(n_fname,userid,o_fname)
cur.execute(query,value)
if (cur.rowcount==1):
result=True
conx.commit()
cur.close()
conx.close()
return result
| 3,470 | 303 | 22 |
16f381165f84836e0d5f3f01fed80f56a786bce1 | 16,514 | py | Python | tests/test_xlsx_to_arrow.py | CJWorkbench/arrow-tools | 1944e40853d82d7dad3d47a72958326cefff367a | [
"MIT"
] | 1 | 2021-11-23T03:57:03.000Z | 2021-11-23T03:57:03.000Z | tests/test_xlsx_to_arrow.py | CJWorkbench/arrow-tools | 1944e40853d82d7dad3d47a72958326cefff367a | [
"MIT"
] | null | null | null | tests/test_xlsx_to_arrow.py | CJWorkbench/arrow-tools | 1944e40853d82d7dad3d47a72958326cefff367a | [
"MIT"
] | 1 | 2021-11-23T03:57:06.000Z | 2021-11-23T03:57:06.000Z | import datetime
from pathlib import Path
import subprocess
import tempfile
from typing import Tuple, Union
import openpyxl as xl
import pyarrow
from .util import assert_table_equals
# This is hard to test, since it's really an invalid Excel file
# def test_no_sheets_is_error():
# # https://openpyxl.readthedocs.io/en/stable/optimized.html#write-only-mode
# # ... to create a workbook with no worksheets
# workbook = xl.Workbook()
# workbook.remove(workbook.active)
# workbook.get_active_sheet = lambda: None
# result, stdout = do_convert_data(workbook, include_stdout=True)
# assert_table_equals(result, pyarrow.table({}))
# assert stdout == b"Excel file has no worksheets\n"
# openpyxl doesn't write shared strings
# def test_shared_string_column():
# workbook = xl.Workbook()
# sheet = workbook.active
# sheet.append(["a"])
# sheet.append(["b"])
# assert_table_equals(
# do_convert_data(workbook, header_rows=""),
# pyarrow.table({"A": ["a", "b"]})
# )
| 32.128405 | 102 | 0.579206 | import datetime
from pathlib import Path
import subprocess
import tempfile
from typing import Tuple, Union
import openpyxl as xl
import pyarrow
from .util import assert_table_equals
def do_convert(
xlsx_path: Path,
*,
max_rows: int = 99999,
max_columns: int = 99998,
max_bytes_per_value: int = 99997,
max_bytes_total: int = 999999999,
header_rows: str = "0-1",
header_rows_file: str = "",
include_stdout: bool = False
) -> Union[pyarrow.Table, Tuple[pyarrow.Table, bytes]]:
with tempfile.NamedTemporaryFile(suffix=".arrow") as arrow_file:
args = [
"/usr/bin/xlsx-to-arrow",
"--max-rows",
str(max_rows),
"--max-columns",
str(max_columns),
"--max-bytes-per-value",
str(max_bytes_per_value),
"--max-bytes-total",
str(max_bytes_total),
"--header-rows",
header_rows,
"--header-rows-file",
header_rows_file,
xlsx_path.as_posix(),
Path(arrow_file.name).as_posix(),
]
try:
result = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
)
except subprocess.CalledProcessError as err:
# Rewrite error so it's easy to read in test-result stack trace
raise RuntimeError(
"Process failed with code %d: %s"
% (
err.returncode,
(
err.stdout.decode("utf-8", errors="replace")
+ err.stderr.decode("utf-8", errors="replace")
),
)
) from None
assert result.stderr == b""
with pyarrow.ipc.open_file(arrow_file.name) as result_reader:
table = result_reader.read_all()
if include_stdout:
return table, result.stdout
else:
assert result.stdout == b""
return table
def do_convert_data(
workbook: xl.Workbook, **kwargs
) -> Union[pyarrow.Table, Union[pyarrow.Table, str]]:
with tempfile.NamedTemporaryFile(suffix=".xlsx") as xlsx_file:
workbook.save(filename=xlsx_file.name)
return do_convert(Path(xlsx_file.name), **kwargs)
# This is hard to test, since it's really an invalid Excel file
# def test_no_sheets_is_error():
# # https://openpyxl.readthedocs.io/en/stable/optimized.html#write-only-mode
# # ... to create a workbook with no worksheets
# workbook = xl.Workbook()
# workbook.remove(workbook.active)
# workbook.get_active_sheet = lambda: None
# result, stdout = do_convert_data(workbook, include_stdout=True)
# assert_table_equals(result, pyarrow.table({}))
# assert stdout == b"Excel file has no worksheets\n"
def test_empty_sheet():
workbook = xl.Workbook()
assert_table_equals(do_convert_data(workbook, header_rows=""), pyarrow.table({}))
def test_empty_sheet_no_header_row():
workbook = xl.Workbook()
assert_table_equals(do_convert_data(workbook, header_rows="0-1"), pyarrow.table({}))
def test_number_columns():
workbook = xl.Workbook()
sheet = workbook.active
sheet.append([1, 1.1])
sheet.append([2, 2.2])
sheet.append([3, 3.3])
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table({"A": [1.0, 2.0, 3.0], "B": [1.1, 2.2, 3.3]}),
)
# openpyxl doesn't write shared strings
# def test_shared_string_column():
# workbook = xl.Workbook()
# sheet = workbook.active
# sheet.append(["a"])
# sheet.append(["b"])
# assert_table_equals(
# do_convert_data(workbook, header_rows=""),
# pyarrow.table({"A": ["a", "b"]})
# )
def test_inline_str_column():
workbook = xl.Workbook()
sheet = workbook.active
sheet.append(["a"])
sheet.append(["b"])
assert_table_equals(
do_convert_data(workbook, header_rows=""), pyarrow.table({"A": ["a", "b"]})
)
def test_date_and_datetime_columns():
workbook = xl.Workbook()
sheet = workbook.active
# These dates are chosen specially -- double precision can't represent
# every arbitrary number of microseconds accurately (let alone
# nanoseconds), but the math happens to work for these datetimes.
#
# (We aren't testing rounding here.)
sheet.append(
[datetime.date(2020, 1, 25), datetime.datetime(2020, 1, 25, 17, 25, 30, 128000)]
)
sheet.append(
[datetime.date(2020, 1, 26), datetime.datetime(2020, 1, 25, 17, 26, 27, 256)]
)
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table(
{
"A": pyarrow.array(
[datetime.datetime(2020, 1, 25), datetime.datetime(2020, 1, 26)],
pyarrow.timestamp("ns"),
),
"B": pyarrow.array(
[
datetime.datetime(2020, 1, 25, 17, 25, 30, 128000),
datetime.datetime(2020, 1, 25, 17, 26, 27, 256),
],
pyarrow.timestamp("ns"),
),
}
),
)
def test_datetime_overflow():
workbook = xl.Workbook()
sheet = workbook.active
sheet.append([datetime.date(1100, 1, 1), datetime.date(1901, 1, 1)])
sheet.append([datetime.date(1901, 1, 1), datetime.date(3000, 1, 1)])
result, stdout = do_convert_data(workbook, include_stdout=True, header_rows="")
assert_table_equals(
result,
pyarrow.table(
{
"A": pyarrow.array(
[None, datetime.datetime(1901, 1, 1)], pyarrow.timestamp("ns")
),
"B": pyarrow.array(
[datetime.datetime(1901, 1, 1), None], pyarrow.timestamp("ns")
),
}
),
)
assert (
stdout
== b"replaced out-of-range with null for 2 Timestamps; see row 0 column A\n"
)
def test_convert_datetime_to_string_and_report():
workbook = xl.Workbook()
sheet = workbook.active
sheet["A1"] = datetime.date(1981, 1, 1)
sheet["A2"] = "hi"
result, stdout = do_convert_data(workbook, include_stdout=True, header_rows="")
assert_table_equals(
result,
pyarrow.table({"A": ["1981-01-01", "hi"]}),
)
assert stdout == b"interpreted 1 Timestamps as String; see row 0 column A\n"
def test_datetime_do_not_convert_to_string_when_value_is_whitespace():
workbook = xl.Workbook()
sheet = workbook.active
sheet["A1"] = datetime.date(1981, 1, 1)
sheet["A2"] = " "
sheet["A3"] = datetime.date(1983, 1, 1)
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table(
{
"A": pyarrow.array(
[
datetime.datetime(1981, 1, 1),
None,
datetime.datetime(1983, 1, 1),
],
pyarrow.timestamp("ns"),
),
}
),
)
def test_skip_null_values():
workbook = xl.Workbook()
sheet = workbook.active
sheet["A2"] = 3.0
sheet["A4"] = 4.0
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table({"A": [None, 3.0, None, 4.0]}),
)
def test_skip_null_columns():
workbook = xl.Workbook()
sheet = workbook.active
sheet["A1"] = 3.0
sheet["A2"] = 3.0
sheet["D1"] = 4.0
sheet["D2"] = 4.0
result, stdout = do_convert_data(workbook, header_rows="", include_stdout=True)
assert_table_equals(
result,
pyarrow.table(
{
"A": [3.0, 3.0],
"B": pyarrow.array([None, None], pyarrow.utf8()),
"C": pyarrow.array([None, None], pyarrow.utf8()),
"D": [4.0, 4.0],
}
),
)
assert stdout == b"chose string type for null column B and more\n"
def test_backfill_column_at_end():
workbook = xl.Workbook()
sheet = workbook.active
sheet["A1"] = 3.0
sheet["B1"] = 4.0
sheet["B2"] = 4.0
sheet["B3"] = 4.0
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table({"A": [3.0, None, None], "B": [4.0, 4.0, 4.0]}),
)
def test_bool_becomes_str():
workbook = xl.Workbook()
sheet = workbook.active
sheet.append([True])
sheet.append([False])
result, stdout = do_convert_data(workbook, header_rows="", include_stdout=True)
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table({"A": ["TRUE", "FALSE"]}),
)
def test_invalid_zipfile():
with tempfile.NamedTemporaryFile(suffix=".xlsx") as tf:
path = Path(tf.name)
path.write_bytes(b"12345")
result, stdout = do_convert(path, include_stdout=True)
assert_table_equals(result, pyarrow.table({}))
assert stdout == b"Invalid XLSX file: xlnt::exception : failed to find zip header\n"
def test_xml_error():
result, stdout = do_convert(
Path(__file__).parent / "files" / "xml-required-attribute-missing-in-rels.xlsx",
include_stdout=True,
)
assert_table_equals(result, pyarrow.table({}))
assert (
stdout
== b"Invalid XLSX file: xl/_rels/workbook.xml.rels:2:84: error: attribute 'Target' expected\n"
)
def test_skip_rows():
workbook = xl.Workbook()
sheet = workbook.active
sheet["A1"] = "Hi"
sheet["A2"] = 1.0
sheet["A4"] = 1.0
result, stdout = do_convert_data(
workbook, max_rows=1, header_rows="0-1", include_stdout=True
)
assert_table_equals(result, pyarrow.table({"A": [1.0]}))
assert stdout == b"skipped 2 rows (after row limit of 1)\n"
def test_skip_columns():
workbook = xl.Workbook()
sheet = workbook.active
sheet["A1"] = "a"
sheet["B1"] = "b"
sheet["C1"] = "c"
result, stdout = do_convert_data(
workbook, max_columns=1, header_rows="", include_stdout=True
)
assert_table_equals(result, pyarrow.table({"A": ["a"]}))
assert stdout == b"skipped column B and more (after column limit of 1)\n"
def test_header_rows_convert_to_str():
workbook = xl.Workbook()
sheet = workbook.active
sheet.append([datetime.date(2020, 1, 25), 123.4213, 123.4213, None, ""])
sheet["A1"].number_format = "dd-mmm-yyyy"
sheet["C1"].number_format = "#.00"
sheet.append(["a", "b", "c", "d", "e"])
with tempfile.NamedTemporaryFile(suffix="-headers.arrow") as header_file:
# ignore result
do_convert_data(workbook, header_rows="0-1", header_rows_file=header_file.name)
with pyarrow.ipc.open_file(header_file.name) as header_reader:
header_table = header_reader.read_all()
assert_table_equals(
header_table,
pyarrow.table(
{
"A": ["25-Jan-2020"],
"B": ["123.4213"],
"C": ["123.42"],
"D": pyarrow.array([None], pyarrow.utf8()),
"E": [""],
}
),
)
def test_header_rows():
workbook = xl.Workbook()
sheet = workbook.active
sheet.append(["ColA", "ColB"])
sheet.append(["a", "b"])
with tempfile.NamedTemporaryFile(suffix="-headers.arrow") as header_file:
result = do_convert_data(
workbook, header_rows="0-1", header_rows_file=header_file.name
)
with pyarrow.ipc.open_file(header_file.name) as header_reader:
header_table = header_reader.read_all()
assert_table_equals(result, pyarrow.table({"A": ["a"], "B": ["b"]}))
assert_table_equals(header_table, pyarrow.table({"A": ["ColA"], "B": ["ColB"]}))
def test_header_truncated():
workbook = xl.Workbook()
sheet = workbook.active
sheet.append(["xy1", "xy2"])
sheet.append(["a", "b"])
with tempfile.NamedTemporaryFile(suffix="-headers.arrow") as header_file:
result, stdout = do_convert_data(
workbook,
max_bytes_per_value=2,
header_rows="0-1",
header_rows_file=header_file.name,
include_stdout=True,
)
with pyarrow.ipc.open_file(header_file.name) as header_reader:
header_table = header_reader.read_all()
assert_table_equals(result, pyarrow.table({"A": ["a"], "B": ["b"]}))
assert_table_equals(header_table, pyarrow.table({"A": ["xy"], "B": ["xy"]}))
assert stdout == b"".join(
[b"truncated 2 values (value byte limit is 2; see row 0 column A)\n"]
)
def test_values_truncated():
workbook = xl.Workbook()
workbook.active.append(["abcde", "fghijklmn", "opq"])
result, stdout = do_convert_data(
workbook,
max_bytes_per_value=3,
header_rows="",
include_stdout=True,
)
assert_table_equals(
result, pyarrow.table({"A": ["abc"], "B": ["fgh"], "C": ["opq"]})
)
assert stdout == b"truncated 2 values (value byte limit is 3; see row 0 column A)\n"
def test_truncate_do_not_cause_invalid_utf8():
workbook = xl.Workbook()
for s in [
# Examples from https://en.wikipedia.org/wiki/UTF-8
"AAAA",
"AA\u00A2", # ¢ (2 bytes) -- keep
"AAA\u00A2", # ¢ (2 bytes) -- drop both bytes
"A\u0939", # ह (3 bytes) -- keep
"AA\u0939", # ह (3 bytes) -- drop all three bytes
"AAA\u0939", # ह (3 bytes) -- drop all three bytes
"\U00010348", # 𐍈 (4 bytes) -- keep
"A\U00010348", # 𐍈 (4 bytes) -- drop all four bytes
"AA\U00010348", # 𐍈 (4 bytes) -- drop all four bytes
"AAA\U00010348", # 𐍈 (4 bytes) -- drop all four bytes
]:
workbook.active.append([s])
result, stdout = do_convert_data(
workbook,
max_bytes_per_value=4,
header_rows="",
include_stdout=True,
)
expected = pyarrow.table(
{
"A": [
"AAAA",
"AA\u00A2",
"AAA",
"A\u0939",
"AA",
"AAA",
"\U00010348",
"A",
"AA",
"AAA",
]
}
)
assert_table_equals(result, expected)
assert stdout == b"truncated 6 values (value byte limit is 4; see row 2 column A)\n"
def test_convert_float_to_string_and_report():
workbook = xl.Workbook()
workbook.active["A1"] = 3.4
workbook.active["A2"] = "s"
workbook.active["A3"] = -2.2
result, stdout = do_convert_data(
workbook,
header_rows="",
include_stdout=True,
)
assert_table_equals(result, pyarrow.table({"A": ["3.4", "s", "-2.2"]}))
assert stdout == b"interpreted 2 Numbers as String; see row 0 column A\n"
def test_float_do_not_convert_to_string_when_value_is_whitespace():
workbook = xl.Workbook()
workbook.active["A1"] = 3.4
workbook.active["A2"] = " "
workbook.active["A3"] = -2.2
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table({"A": [3.4, None, -2.2]}),
)
def test_float_do_not_convert_to_string_when_first_cell_is_whitespace():
workbook = xl.Workbook()
workbook.active["A1"] = " "
workbook.active["A2"] = -2.2
assert_table_equals(
do_convert_data(workbook, header_rows=""),
pyarrow.table({"A": [None, -2.2]}),
)
def test_float_convert_to_string_preserve_previously_ignored_whitespace():
workbook = xl.Workbook()
workbook.active["A1"] = 3.4
workbook.active["A2"] = " "
workbook.active["A3"] = "x"
result, stdout = do_convert_data(
workbook,
header_rows="",
include_stdout=True,
)
assert_table_equals(result, pyarrow.table({"A": ["3.4", " ", "x"]}))
# Whitespace isn't "counted" as Number, even though we treated it as a null Number
assert stdout == b"interpreted 1 Numbers as String; see row 0 column A\n"
def test_stop_after_byte_total_limit():
workbook = xl.Workbook()
workbook.active.append(["abcd", "efgh"])
workbook.active.append(["ijkl", "mnop"])
result, stdout = do_convert_data(
workbook,
max_bytes_total=8,
header_rows="",
include_stdout=True,
)
assert_table_equals(result, pyarrow.table({"A": ["abcd"], "B": ["efgh"]}))
assert stdout == b"stopped at limit of 8 bytes of data\n"
| 14,830 | 0 | 644 |
ff09700cf57e7d8296d2dca0aeb45a99125e1f13 | 31,256 | py | Python | src/fr/tagc/rainet/core/execution/processing/catrapid/ReadCatrapid.py | TAGC-Brun/RAINET-RNA | 4d5a6658c41d4ab28d7c3d168eed65fe79233b48 | [
"Linux-OpenIB"
] | null | null | null | src/fr/tagc/rainet/core/execution/processing/catrapid/ReadCatrapid.py | TAGC-Brun/RAINET-RNA | 4d5a6658c41d4ab28d7c3d168eed65fe79233b48 | [
"Linux-OpenIB"
] | null | null | null | src/fr/tagc/rainet/core/execution/processing/catrapid/ReadCatrapid.py | TAGC-Brun/RAINET-RNA | 4d5a6658c41d4ab28d7c3d168eed65fe79233b48 | [
"Linux-OpenIB"
] | null | null | null | import os
import argparse
import numpy as np
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
from fr.tagc.rainet.core.util.time.Timer import Timer
from fr.tagc.rainet.core.util.subprocess.SubprocessUtil import SubprocessUtil
#===============================================================================
# Started 20-May-2016
# Diogo Ribeiro
DESC_COMMENT = "Script to read, filter and process large catRAPID interaction files."
SCRIPT_NAME = "ReadCatrapid.py"
#===============================================================================
#===============================================================================
# General plan:
# 1) Parse catRAPID interaction file
# 2) apply interaction filters
# 3) write filtered interaction file, and other processed data files
#===============================================================================
#===============================================================================
# Processing notes:
# 1) To reduce memory consumption, the score values are rounded to 1 decimal.
# Thus, means are not precise
# 2) Filters are all applied on top of each other, first by score, then RNA, then protein, then interaction-based.
#===============================================================================
if __name__ == "__main__":
try:
# Start chrono
Timer.get_instance().start_chrono()
print "STARTING " + SCRIPT_NAME
#===============================================================================
# Get input arguments, initialise class
#===============================================================================
parser = argparse.ArgumentParser(description= DESC_COMMENT)
# positional args
parser.add_argument('catRAPIDFile', metavar='catRAPIDFile', type=str,
help='Output file from catRAPID library all vs all.')
parser.add_argument('outputFolder', metavar='outputFolder', type=str, help='Folder where to write output files.')
parser.add_argument('--interactionCutoff', metavar='interactionCutoff', type=str,
default = "OFF", help='Minimum catRAPID interaction propensity. Set as "OFF" if no filtering wanted.')
parser.add_argument('--interactionFilterFile', metavar='interactionFilterFile', type=str,
default = "", help='TSV file with list of interacting pairs we want to keep, one pair per line. UniprotAC\tEnsemblTxID. No header.')
parser.add_argument('--rnaFilterFile', metavar='rnaFilterFile', type=str,
default = "", help='File with list of RNAs we want to keep, one per line. No header.')
parser.add_argument('--proteinFilterFile', metavar='proteinFilterFile', type=str,
default = "", help='File with list of Proteins we want to keep, one per line. No header.')
parser.add_argument('--writeInteractions', metavar='writeInteractions', type=int,
default = 1, help='Whether to write interaction file after the filters.')
parser.add_argument('--batchSize', metavar='batchSize', type=int,
default = 1000000, help='How many lines to process before writing to file (to avoid excessive memory consumption).')
parser.add_argument('--writeNormalisedInteractions', metavar='writeNormalisedInteractions', type=int,
default = 0, help='Whether to write interaction file after the filters, normalised by max (unity-based normalisation) score for each RNA. --writeInteractions argument must also be 1.')
parser.add_argument('--writeInteractionMatrix', metavar='writeInteractionMatrix', type=int,
default = 0, help='Whether to write interaction matrix file after the filters. --writeInteractions argument must also be 1.')
parser.add_argument('--booleanInteraction', metavar='booleanInteraction', type=int,
default = 0, help='Whether to write interaction matrix file with 1 or 0 instead of score values. --writeInteractions and --writeInteractionMatrix argument must also be 1.')
parser.add_argument('--sampleInteractions', metavar='sampleInteractions', type=int,
default = 0, help='Whether to write file with at least one interactions for each RNA and each protein. Output file can have more than X interactions for a protein/RNA since they are co-dependent. Applied after all other filters. Default = 0 (OFF).')
#gets the arguments
args = parser.parse_args( )
# init
readCatrapid = ReadCatrapid( args.catRAPIDFile, args.outputFolder, args.interactionCutoff, args.interactionFilterFile,
args.rnaFilterFile, args.proteinFilterFile, args.writeInteractions, args.batchSize,
args.writeNormalisedInteractions, args.writeInteractionMatrix, args.booleanInteraction, args.sampleInteractions)
readCatrapid.run()
# Stop the chrono
Timer.get_instance().stop_chrono( "FINISHED " + SCRIPT_NAME )
# Use RainetException to catch errors
except RainetException as rainet:
Logger.get_instance().error( "Error during execution of %s. Aborting :\n" % SCRIPT_NAME + rainet.to_string())
| 48.234568 | 281 | 0.559829 | import os
import argparse
import numpy as np
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
from fr.tagc.rainet.core.util.time.Timer import Timer
from fr.tagc.rainet.core.util.subprocess.SubprocessUtil import SubprocessUtil
#===============================================================================
# Started 20-May-2016
# Diogo Ribeiro
DESC_COMMENT = "Script to read, filter and process large catRAPID interaction files."
SCRIPT_NAME = "ReadCatrapid.py"
#===============================================================================
#===============================================================================
# General plan:
# 1) Parse catRAPID interaction file
# 2) apply interaction filters
# 3) write filtered interaction file, and other processed data files
#===============================================================================
#===============================================================================
# Processing notes:
# 1) To reduce memory consumption, the score values are rounded to 1 decimal.
# Thus, means are not precise
# 2) Filters are all applied on top of each other, first by score, then RNA, then protein, then interaction-based.
#===============================================================================
class ReadCatrapid(object):
TEMP_STORED_INTERACTIONS_FILENAME = "/temp_storedInteractions_"
STORED_INTERACTIONS_FILENAME = "/storedInteractions.tsv"
SAMPLE_INTERACTIONS_FILENAME = "/sampleInteractions.tsv"
PROTEIN_INTERACTIONS_FILENAME = "/proteinInteractions.tsv"
RNA_INTERACTIONS_FILENAME = "/rnaInteractions.tsv"
ALL_INTERACTIONS_FILTERED_TAG = "NA" # value to give when an RNA or protein has all their interactions filtered with cutoff
NORMALISED_STORED_INTERACTIONS_FILENAME = "/storedInteractionsNormalised.tsv"
INTERACTIONS_SCORE_MATRIX = "/interaction_score_matrix.tsv"
MAXIMUM_NUMBER_VIABLE_INTERACTIONS = 170000000 # maximum number of interactions writable for interaction matrix output #170M interactions = 23Gb
def __init__(self, catrapid_file, output_folder, interaction_cutoff, interaction_filter_file, rna_filter_file, protein_filter_file,
write_interactions, batch_size, write_normalised_interactions, write_interaction_matrix, boolean_interaction, sample_interactions):
self.catRAPIDFile = catrapid_file
self.outputFolder = output_folder
self.interactionCutoff = interaction_cutoff
self.interactionFilterFile = interaction_filter_file
self.rnaFilterFile = rna_filter_file
self.proteinFilterFile = protein_filter_file
self.writeInteractions = write_interactions
self.batchSize = batch_size
self.writeNormalisedInteractions = write_normalised_interactions
self.writeInteractionMatrix = write_interaction_matrix
self.booleanInteraction = boolean_interaction
self.sampleInteractions = sample_interactions
if (write_normalised_interactions and write_interactions == 0) or (write_interaction_matrix and write_interactions == 0):
raise RainetException( "ReadCatrapid.__init__ : --writeInteractions option must be on for --writeNormalisedInteractions to run.")
if (boolean_interaction and write_interactions == 0) or (boolean_interaction and write_interaction_matrix == 0):
raise RainetException( "ReadCatrapid.__init__ : --writeInteractions and --writeInteractionMatrix option must be on for --booleanInteractions to run.")
# make output folder
if not os.path.exists( self.outputFolder):
os.mkdir( self.outputFolder)
else:
print "__init__: Output folder already exists: %s" % self.outputFolder
# #
# Read list of interacting pairs
# @return set of interacting pairs we want to keep, empty if no file given
def read_interaction_filter_file(self):
if self.interactionFilterFile != "":
with open( self.interactionFilterFile, "r") as inFile:
wantedPairs = { "_".join( line.strip().split("\t")) for line in inFile}
print "read_interaction_filter_file: read %s unique pairs interacting pairs." % len( wantedPairs)
return wantedPairs
else:
return set()
# #
# Read list of wanted rnas
# @return set of rnas we want to keep, empty if no file given
def read_rna_filter_file(self):
if self.rnaFilterFile != "":
with open( self.rnaFilterFile, "r") as inFile:
wantedList = { line.strip() for line in inFile }
print "read_interaction_filter_file: read %s unique wanted RNAs." % len( wantedList)
return wantedList
else:
return set()
# #
# Read list of wanted proteins
# @return set of proteins we want to keep, empty if no file given
def read_protein_filter_file(self):
if self.proteinFilterFile != "":
with open( self.proteinFilterFile, "r") as inFile:
wantedList = { line.strip() for line in inFile }
print "read_interaction_filter_file: read %s unique wanted Proteins." % len( wantedList)
return wantedList
else:
return set()
# #
# Read catrapid file, apply filters, and write processed output to files
def read_catrapid_file( self, wanted_pairs, wanted_RNAs, wanted_proteins):
#=======================================================================
# Example file
# sp|Q96DC8|ECHD3_HUMAN ENST00000579524 -12.33 0.10 0.00
# sp|P10645|CMGA_HUMAN ENST00000516610 10.66 0.32 0.00
# protein and rna separated by " ", other values separated by "\t"
#
# Protein is always on left side, RNA in the right side.
# Assumption that there only one interaction between each Protein-RNA pair
#=======================================================================
#=======================================================================
# initialising
#=======================================================================
# process interactionCutoff attribute
if self.interactionCutoff == "OFF":
self.interactionCutoff = float( "-inf")
else:
self.interactionCutoff = float( self.interactionCutoff)
# check if we need to filter by wanted pairs, proteins, rnas
if len( wanted_pairs) > 0: interactionFilterBool = 1
else: interactionFilterBool = 0
if len( wanted_RNAs) > 0: rnaFilterBool = 1
else: rnaFilterBool = 0
if len( wanted_proteins) > 0: proteinFilterBool = 1
else: proteinFilterBool = 0
### Protein containers ####
# # approach1: initialise protein, and sum scores throughout the file, and keep count of protein occurrences, then in the end calculate mean
# proteinInteractionsSum = {} # key -> protein ID, value -> sum of scores
proteinInteractionsCounter = {} # key -> protein ID, value -> number of times protein appears
proteinInteractionsMean = {}
# approach2: initialise protein, create a dictionary for each protein which contains the frequencies of each score instead of list of scores, in order to save memory
proteinScoreFrequencies = {} # key -> protein ID, value -> dict. key -> score, value -> frequency of score
allProtSet = set()
### RNA containers ####
# approach: initialise RNA, create a dictionary for each RNA which contains the frequencies of each score instead of list of scores, in order to save memory
rnaScoreFrequencies = {} # key -> RNA ID, value -> dict. key -> score, value -> frequency of score
allRNASet = set()
lineCount = 0
outFileCount = 1
# variable used for sampling interactions
itemCount = {} # key -> protein or RNA ID, value -> frequency
interactionSample = set()
# variable which will store text to be written into files
interactionText = ""
#=======================================================================
# read file
#=======================================================================
with open( self.catRAPIDFile, "r") as inFile:
for line in inFile:
# every X lines, write to file to liberate memory
if lineCount % self.batchSize == 0 and lineCount != 0:
Timer.get_instance().step("read_catrapid_file: reading %s lines.." % lineCount)
# print len( proteinInteractionsSum), sys.getsizeof( proteinInteractionsSum) / 1000000.0
# print len( interactionText), sys.getsizeof( interactionText) / 1000000.0
# dump dictionaries into files
if self.writeInteractions:
with open( self.outputFolder + ReadCatrapid.TEMP_STORED_INTERACTIONS_FILENAME + str( outFileCount) + ".tsv", "w") as outFile:
outFile.write( interactionText)
interactionText = ""
outFileCount += 1
lineCount += 1 # this has to be before the filterings ( 'continue')
spl = line.split(" ")
protID = spl[0].split( "|")[1]
spl2 = spl[1].split( "\t")
rnaID = spl2[0]
score = float( spl2[1])
scoreRounded = round( score, 1)
pair = "_".join( [protID, rnaID])
allRNASet.add( rnaID)
allProtSet.add( protID)
#### Apply filterings ####
# filter by score
if score < self.interactionCutoff:
continue
# if filtering by wanted RNAs and it is not present
if rnaFilterBool and rnaID not in wanted_RNAs:
continue
# if filtering by wanted Proteins and it is not present
if proteinFilterBool and protID not in wanted_proteins:
continue
# if filtering by wanted pairs and it is not present
if interactionFilterBool and pair not in wanted_pairs:
continue
# if sample interaction filtering is on
if self.sampleInteractions:
# initialise sample counter
if protID not in itemCount: itemCount[ protID] = 0
if rnaID not in itemCount: itemCount[ rnaID] = 0
# only add new sample interactions any of the items still need more samples
# this certifies that each item has at least one interaction, unless they are excluded after filter
if itemCount[ protID] < 1 or itemCount[ rnaID] < 1:
if line not in interactionSample:
interactionSample.add( line)
itemCount[ protID] += 1
itemCount[ rnaID] += 1
#### Store interaction ####
#interactionText += "%s\t%s\n" % (pair, score)
interactionText+= line
## Protein side
# # for calculating average score per protein
if protID not in proteinScoreFrequencies:
proteinScoreFrequencies[ protID] = {}
# producing dictionary with score frequencies for a protein
if scoreRounded not in proteinScoreFrequencies[ protID]:
proteinScoreFrequencies[ protID][ scoreRounded] = 0
proteinScoreFrequencies[ protID][ scoreRounded] += 1
## RNA side
if rnaID not in rnaScoreFrequencies:
rnaScoreFrequencies[ rnaID] = {}
if scoreRounded not in rnaScoreFrequencies[ rnaID]:
rnaScoreFrequencies[ rnaID][ scoreRounded] = 0
rnaScoreFrequencies[ rnaID][ scoreRounded] += 1
# write remaining interactions into file
if self.writeInteractions:
with open( self.outputFolder + ReadCatrapid.TEMP_STORED_INTERACTIONS_FILENAME + str( outFileCount) + ".tsv", "w") as outFile:
outFile.write( interactionText)
print "read_catrapid_file: read %s lines.." % lineCount
# join stored interaction files
if self.writeInteractions:
# cat files
cmd = "cat %s* > %s" % ( self.outputFolder + ReadCatrapid.TEMP_STORED_INTERACTIONS_FILENAME, self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME )
os.system( cmd)
# remove temp files
cmd = "rm %s*" % ( self.outputFolder + ReadCatrapid.TEMP_STORED_INTERACTIONS_FILENAME)
os.system( cmd)
#=======================================================================
# Write output file
#=======================================================================
### RNA file ###
with open( self.outputFolder + ReadCatrapid.RNA_INTERACTIONS_FILENAME, "w") as outFile:
# change header here
outFile.write("ensembl_id\tmean_score\tmedian_score\tmin_score\tmax_score\tstd_score\tcount\n")
for rna in allRNASet:
if rna in rnaScoreFrequencies:
# recreate all original values for a rna
listOfScores = [ scoreVal for scoreVal in rnaScoreFrequencies[ rna] for i in range( rnaScoreFrequencies[ rna][ scoreVal])]
mean = np.mean( listOfScores)
median = np.median( listOfScores)
minimum = np.min( listOfScores)
maximum = np.max( listOfScores)
std = np.std( listOfScores)
# number of Proteins/interactions above filter
count = len(listOfScores)
outFile.write( "%s\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%s\n" % (rna, mean, median, minimum, maximum, std, count) )
else:
outFile.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( rna,
ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG, ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG,
ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG, ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG,
ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG, ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG ) )
### Protein file ###
with open( self.outputFolder + ReadCatrapid.PROTEIN_INTERACTIONS_FILENAME, "w") as outFile:
# change header here
outFile.write("uniprotac\tmean_score\tmedian_score\tmin_score\tmax_score\tstd_score\tcount\n")
# calculate protein score metrics
for prot in allProtSet:
if prot in proteinScoreFrequencies:
# recreate all original values for a protein
listOfScores = [ scoreVal for scoreVal in proteinScoreFrequencies[ prot] for i in range( proteinScoreFrequencies[ prot][ scoreVal])]
mean = np.mean( listOfScores)
median = np.median( listOfScores)
minimum = np.min( listOfScores)
maximum = np.max( listOfScores)
std = np.std( listOfScores)
# number of RNAs above filter
count = len(listOfScores)
proteinInteractionsMean[ prot] = mean
proteinInteractionsCounter[ prot] = count
outFile.write( "%s\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%s\n" % (prot, mean, median, minimum, maximum, std, count) )
else:
outFile.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( prot,
ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG, ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG,
ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG, ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG,
ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG, ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG ) )
if self.sampleInteractions:
with open( self.outputFolder + ReadCatrapid.SAMPLE_INTERACTIONS_FILENAME, "w") as outFile:
for interaction in interactionSample:
outFile.write( interaction)
self.itemCount = itemCount
self.interactionSample = interactionSample
return proteinInteractionsMean, proteinInteractionsCounter
# #
# Function to write extra output file with interactions normalised by the max for each RNA,
# (using unity-based normalisation, aka min-max normalisation)
# This function runs after writing non-normalised interactions file and rna interactions file.
def write_normalised_interactions( self):
if not os.path.exists( self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME):
raise RainetException( "ReadCatrapid.write_normalised_interactions : output interactions file not found. %s" % self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME)
if not os.path.exists( self.outputFolder + ReadCatrapid.RNA_INTERACTIONS_FILENAME):
raise RainetException( "ReadCatrapid.write_normalised_interactions : output rna interactions file not found. %s" % self.outputFolder + ReadCatrapid.RNA_INTERACTIONS_FILENAME)
#===============================================================================
# Get maximum and minimum scores for each transcript
#===============================================================================
# e.g. format: ensembl_id mean_score median_score min_score max_score std_score count
# ENST00000388090 -15.11 -16.10 -45.90 26.90 10.46 1978
# column indexes
txIDCol = 0
minCol = 3
maxCol = 4
rnaMax = {} # key -> transcriptID, val -> max score among all interactions
rnaMin = {} # key -> transcriptID, val -> min score among all interactions
with open( self.outputFolder + ReadCatrapid.RNA_INTERACTIONS_FILENAME, "r") as inFile:
inFile.readline() # skip header
for line in inFile:
line = line.strip()
spl = line.split( "\t")
transcriptID = spl[ txIDCol]
minimum = spl[ minCol]
maximum = spl[ maxCol]
# if this RNA was filtered out, it will also not feature in the interactions file.
if minimum == ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG or maximum == ReadCatrapid.ALL_INTERACTIONS_FILTERED_TAG:
continue
minimum = float( minimum)
maximum = float( maximum)
if transcriptID not in rnaMax:
rnaMax[ transcriptID] = maximum
else:
raise RainetException( "ReadCatrapid.write_normalised_interactions : duplicate transcript ID. %s" % transcriptID)
if transcriptID not in rnaMin:
rnaMin[ transcriptID] = minimum
else:
raise RainetException( "ReadCatrapid.write_normalised_interactions : duplicate transcript ID. %s" % transcriptID)
#===============================================================================
# Apply normalisation
#===============================================================================
idColumn = 0
scoreColumn = 1
# e.g. format: sp|Q7Z419|R144B_HUMAN ENST00000542804 20.56 0.54 0.00
outFile = open( self.outputFolder + ReadCatrapid.NORMALISED_STORED_INTERACTIONS_FILENAME, "w")
with open( self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME, "r") as inFile:
for line in inFile:
line = line.strip()
spl = line.split( "\t")
score = round( float( spl[ scoreColumn]), 1)
transcriptID = spl[ idColumn].split( " ")[1]
# min-max normalisation
minimum = rnaMin[ transcriptID]
maximum = rnaMax[ transcriptID]
assert score >= minimum
assert score <= maximum
normalisedScore = self._min_max_normalisation( score, minimum, maximum)
# rewrite output file
text = "%s\t%.2f\t%s\n" % ( "\t".join( spl[ :scoreColumn]), normalisedScore, "\t".join( spl[ scoreColumn+1:]))
outFile.write( text)
outFile.close()
# function to calculate min-max normalisation (unity-based normalisation)
def _min_max_normalisation(self, x, minimum, maximum):
normVal = float( x - minimum) / float( maximum - minimum)
assert normVal >= 0
assert normVal <= 1
return normVal
# #
# Function to write matrix output file for interactions after filtering.
# This function runs after writing interactions file.
def write_matrix_output( self):
#===================================================================
# Initialising
#===================================================================
if not os.path.exists( self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME):
raise RainetException( "ReadCatrapid.write_matrix_output : output interactions file not found. %s" % self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME)
## Check amount of interactions to not overload the system
cmd = "wc -l %s" % ( self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME)
stout = SubprocessUtil.run_command( cmd, verbose = 0, return_stdout = 1)
try:
nlines = int( stout.split( " ")[0])
except:
raise RainetException( "Could not calculate number of lines in filtered stored interactions." % cmd)
if nlines > ReadCatrapid.MAXIMUM_NUMBER_VIABLE_INTERACTIONS:
raise RainetException( "ReadCatrapid.write_matrix_output : number of interactions to write matrix is too large to be computable: %s interactions" % nlines)
print "Writing matrix with %s interactions" % nlines
#===================================================================
# Read filtered interactions file and store interactions into memory
#===================================================================
# create data structures with all proteins, RNAs and scores of pairs
setInteractingRNAs = set()
setInteractingProts = set()
dictPairs = {}
idColumn = 0
scoreColumn = 1
# e.g. format: sp|Q7Z419|R144B_HUMAN ENST00000542804 20.56 0.54 0.00
with open( self.outputFolder + ReadCatrapid.STORED_INTERACTIONS_FILENAME, "r") as inFile:
for line in inFile:
line = line.strip()
spl = line.split( "\t")
score = float( spl[ scoreColumn])
spl2 = spl[ idColumn].split( " ")
txID = spl2[ 1]
protID = spl2[ 0]
pair = txID + "|" + protID
if pair not in dictPairs:
dictPairs[pair] = score
else:
raise RainetException("ReadCatrapid.write_matrix_output: duplicate interaction", pair)
setInteractingRNAs.add( txID)
setInteractingProts.add( protID)
#===================================================================
# Write file with interaction scores for each protein-RNA pair, matrix format
#===================================================================
# E.g.
# RNAs Prot1 Prot2
# RNA1 10.4 0.3
# RNA2 32.6 -34.5
outHandler = open( self.outputFolder + ReadCatrapid.INTERACTIONS_SCORE_MATRIX, "w")
# use sorting to keep headers in place
sortedSetInteractingProts = sorted( setInteractingProts)
sortedSetInteractingRNAs = sorted( setInteractingRNAs)
# write header with protein IDs
outHandler.write( "RNAs")
for prot in sortedSetInteractingProts:
outHandler.write( "\t%s" % prot )
outHandler.write( "\n")
# write bulk of file, one row per rna, one column per protein
for rna in sortedSetInteractingRNAs:
text = rna
for prot in sortedSetInteractingProts:
tag = rna + "|" + prot
if tag in dictPairs:
if self.booleanInteraction:
score = "1"
else:
score = dictPairs[tag]
else:
if self.booleanInteraction:
score = "0"
else:
score = "NA"
text+= "\t%s" % score
text+= "\n"
outHandler.write( text)
outHandler.close()
# run functions in proper order
def run(self):
Timer.get_instance().step( "reading filter files..")
wantedPairs = self.read_interaction_filter_file( )
wantedRNAs = self.read_rna_filter_file( )
wantedProteins = self.read_protein_filter_file( )
Timer.get_instance().step( "reading catrapid interaction file..")
self.read_catrapid_file( wantedPairs, wantedRNAs, wantedProteins)
if self.writeNormalisedInteractions:
Timer.get_instance().step( "writing normalised interaction file..")
self.write_normalised_interactions( )
if self.writeInteractionMatrix:
Timer.get_instance().step( "writing interaction matrix file..")
self.write_matrix_output()
if __name__ == "__main__":
try:
# Start chrono
Timer.get_instance().start_chrono()
print "STARTING " + SCRIPT_NAME
#===============================================================================
# Get input arguments, initialise class
#===============================================================================
parser = argparse.ArgumentParser(description= DESC_COMMENT)
# positional args
parser.add_argument('catRAPIDFile', metavar='catRAPIDFile', type=str,
help='Output file from catRAPID library all vs all.')
parser.add_argument('outputFolder', metavar='outputFolder', type=str, help='Folder where to write output files.')
parser.add_argument('--interactionCutoff', metavar='interactionCutoff', type=str,
default = "OFF", help='Minimum catRAPID interaction propensity. Set as "OFF" if no filtering wanted.')
parser.add_argument('--interactionFilterFile', metavar='interactionFilterFile', type=str,
default = "", help='TSV file with list of interacting pairs we want to keep, one pair per line. UniprotAC\tEnsemblTxID. No header.')
parser.add_argument('--rnaFilterFile', metavar='rnaFilterFile', type=str,
default = "", help='File with list of RNAs we want to keep, one per line. No header.')
parser.add_argument('--proteinFilterFile', metavar='proteinFilterFile', type=str,
default = "", help='File with list of Proteins we want to keep, one per line. No header.')
parser.add_argument('--writeInteractions', metavar='writeInteractions', type=int,
default = 1, help='Whether to write interaction file after the filters.')
parser.add_argument('--batchSize', metavar='batchSize', type=int,
default = 1000000, help='How many lines to process before writing to file (to avoid excessive memory consumption).')
parser.add_argument('--writeNormalisedInteractions', metavar='writeNormalisedInteractions', type=int,
default = 0, help='Whether to write interaction file after the filters, normalised by max (unity-based normalisation) score for each RNA. --writeInteractions argument must also be 1.')
parser.add_argument('--writeInteractionMatrix', metavar='writeInteractionMatrix', type=int,
default = 0, help='Whether to write interaction matrix file after the filters. --writeInteractions argument must also be 1.')
parser.add_argument('--booleanInteraction', metavar='booleanInteraction', type=int,
default = 0, help='Whether to write interaction matrix file with 1 or 0 instead of score values. --writeInteractions and --writeInteractionMatrix argument must also be 1.')
parser.add_argument('--sampleInteractions', metavar='sampleInteractions', type=int,
default = 0, help='Whether to write file with at least one interactions for each RNA and each protein. Output file can have more than X interactions for a protein/RNA since they are co-dependent. Applied after all other filters. Default = 0 (OFF).')
#gets the arguments
args = parser.parse_args( )
# init
readCatrapid = ReadCatrapid( args.catRAPIDFile, args.outputFolder, args.interactionCutoff, args.interactionFilterFile,
args.rnaFilterFile, args.proteinFilterFile, args.writeInteractions, args.batchSize,
args.writeNormalisedInteractions, args.writeInteractionMatrix, args.booleanInteraction, args.sampleInteractions)
readCatrapid.run()
# Stop the chrono
Timer.get_instance().stop_chrono( "FINISHED " + SCRIPT_NAME )
# Use RainetException to catch errors
except RainetException as rainet:
Logger.get_instance().error( "Error during execution of %s. Aborting :\n" % SCRIPT_NAME + rainet.to_string())
| 23,717 | 1,975 | 23 |
b6d0c601f0b3ca1259885a7dddccd7cb50af0917 | 2,047 | py | Python | watchdog_s3.py | giuliocalzolari/watchdog_s3 | 2578791fd82114afe79c77469e2d969e162b29a2 | [
"MIT"
] | 3 | 2018-02-28T16:19:20.000Z | 2021-11-05T06:47:51.000Z | watchdog_s3.py | giuliocalzolari/watchdog_s3 | 2578791fd82114afe79c77469e2d969e162b29a2 | [
"MIT"
] | null | null | null | watchdog_s3.py | giuliocalzolari/watchdog_s3 | 2578791fd82114afe79c77469e2d969e162b29a2 | [
"MIT"
] | 1 | 2020-08-15T02:19:53.000Z | 2020-08-15T02:19:53.000Z | #!/usr/bin/env python
import sys
import time
import logging
import boto3
from botocore.exceptions import ClientError
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
s3 = boto3.client('s3')
S3_DST_BUCKET = "s3-sync-example"
if __name__ == "__main__":
path = sys.argv[1] if len(sys.argv) > 1 else './folder'
observer = Observer()
observer.schedule(S3Handler(), path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 27.293333 | 103 | 0.602345 | #!/usr/bin/env python
import sys
import time
import logging
import boto3
from botocore.exceptions import ClientError
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
s3 = boto3.client('s3')
S3_DST_BUCKET = "s3-sync-example"
class S3Handler(PatternMatchingEventHandler):
# patterns = ["*.xml", "*.lxml"]
def process(self, event):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
# the file will be processed there
if event.is_directory == False:
s3_key = event.src_path.lstrip("./")
try:
with open(event.src_path, 'rb') as data:
s3.upload_fileobj(data, S3_DST_BUCKET, s3_key)
except ClientError as e:
logging.error(e)
def on_any_event(self, event):
if event.is_directory == False:
s3_key = event.src_path.lstrip("./")
logging.info("[{}] {}".format(event.event_type, s3_key))
def on_modified(self, event):
self.process(event)
def on_created(self, event):
self.process(event)
def on_deleted(self, event):
try:
s3.delete_object(
Bucket=S3_DST_BUCKET,
Key=event.src_path.lstrip("./"),
)
except ClientError as e:
logging.error(e)
if __name__ == "__main__":
path = sys.argv[1] if len(sys.argv) > 1 else './folder'
observer = Observer()
observer.schedule(S3Handler(), path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 451 | 760 | 23 |
bf1f7972df9e38d244077c4f38d14e906f106268 | 177 | py | Python | tiger_pl/__main__.py | dmzobel/tiger | bc3b936bd596235360f3cd8f02c9746279af00ed | [
"MIT"
] | null | null | null | tiger_pl/__main__.py | dmzobel/tiger | bc3b936bd596235360f3cd8f02c9746279af00ed | [
"MIT"
] | null | null | null | tiger_pl/__main__.py | dmzobel/tiger | bc3b936bd596235360f3cd8f02c9746279af00ed | [
"MIT"
] | null | null | null | import argparse
from tiger_pl import Tiger
parser = argparse.ArgumentParser()
parser.add_argument("filename")
args = parser.parse_args()
print(Tiger(args.filename).execute())
| 19.666667 | 37 | 0.79096 | import argparse
from tiger_pl import Tiger
parser = argparse.ArgumentParser()
parser.add_argument("filename")
args = parser.parse_args()
print(Tiger(args.filename).execute())
| 0 | 0 | 0 |
95d68597ab6f351f4854c1f350aad77f05ff8017 | 5,275 | py | Python | adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/bia_application_interface.py | ArrowElectronics/Vital-Signs-Monitoring | ba43fe9a116d94170561433910fd7bffba5726e7 | [
"Unlicense"
] | 5 | 2021-06-13T17:11:19.000Z | 2021-12-01T18:20:38.000Z | adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/bia_application_interface.py | ArrowElectronics/Vital-Signs-Monitoring | ba43fe9a116d94170561433910fd7bffba5726e7 | [
"Unlicense"
] | null | null | null | adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/bia_application_interface.py | ArrowElectronics/Vital-Signs-Monitoring | ba43fe9a116d94170561433910fd7bffba5726e7 | [
"Unlicense"
] | 1 | 2022-01-08T15:01:44.000Z | 2022-01-08T15:01:44.000Z | #!/usr/bin/env python3
from ctypes import *
import common_application_interface
import common_sensor_interface
| 33.386076 | 79 | 0.660474 | #!/usr/bin/env python3
from ctypes import *
import common_application_interface
import common_sensor_interface
class M2M2_BIA_APP_CMD_ENUM_t(c_uint8):
_M2M2_BIA_APP_CMD_LOWEST = 0x40
M2M2_BIA_APP_CMD_SWEEP_FREQ_ENABLE_REQ = 0x42
M2M2_BIA_APP_CMD_SWEEP_FREQ_ENABLE_RESP = 0x43
M2M2_BIA_APP_CMD_SWEEP_FREQ_DISABLE_REQ = 0x44
M2M2_BIA_APP_CMD_SWEEP_FREQ_DISABLE_RESP = 0x45
M2M2_BIA_APP_CMD_SET_DFT_NUM_REQ = 0x46
M2M2_BIA_APP_CMD_SET_DFT_NUM_RESP = 0x47
M2M2_BIA_APP_CMD_SET_HS_RTIA_CAL_REQ = 0x48
M2M2_BIA_APP_CMD_SET_HS_RTIA_CAL_RESP = 0x49
M2M2_DCB_COMMAND_FDS_STATUS_REQ = 0x4A
M2M2_DCB_COMMAND_FDS_STATUS_RESP = 0x4B
M2M2_APP_COMMON_CMD_DCB_TIMING_INFO_REQ = 0x4C
M2M2_APP_COMMON_CMD_DCB_TIMING_INFO_RESP = 0x4D
M2M2_BCM_APP_CMD_ALGO_STREAM_RESP = 0x4E
M2M2_BIA_APP_CMD_LOAD_DCFG_REQ = 0x4F
M2M2_BIA_APP_CMD_LOAD_DCFG_RESP = 0x50
M2M2_BIA_APP_COMMON_CMD_WRITE_DCFG_REQ = 0x51
M2M2_BIA_APP_COMMON_CMD_WRITE_DCFG_RESP = 0x52
M2M2_BIA_APP_COMMON_CMD_READ_DCFG_REQ = 0x53
M2M2_BIA_APP_COMMON_CMD_READ_DCFG_RESP = 0x54
class M2M2_SENSOR_BIA_NSAMPLES_ENUM_t(c_uint8):
M2M2_SENSOR_BIA_NSAMPLES = 0x4
class M2M2_SENSOR_BIA_RAW_DATA_TYPES_ENUM_t(c_uint8):
M2M2_SENSOR_BIA_DATA = 0x0
class M2M2_SENSOR_BIA_SWEEP_FREQ_INDEX_ENUM_t(c_uint8):
M2M2_SENSOR_BIA_FREQ_50KHZ = 0xFF
M2M2_SENSOR_BIA_FREQ_1000HZ = 0x0
M2M2_SENSOR_BIA_FREQ_3760HZ = 0x1
M2M2_SENSOR_BIA_FREQ_14140HZ = 0x2
M2M2_SENSOR_BIA_FREQ_53180HZ = 0x3
M2M2_SENSOR_BIA_FREQ_200KHZ = 0x4
class M2M2_SENSOR_BIA_APP_INFO_BITSET_ENUM_t(c_uint8):
M2M2_SENSOR_BIA_APP_INFO_BITSET_LEADSOFF = 0x0
M2M2_SENSOR_BIA_APP_INFO_BITSET_LEADSON = 0x1
class bia_app_set_dft_num_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
("dftnum", c_uint16),
]
class bia_app_lib_state_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
("states", c_uint8 * 10),
]
class bia_app_lcfg_op_t(Structure):
fields = [
("field", c_uint8),
("value", c_float),
]
class bia_app_dcb_lcfg_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
]
class bia_app_lcfg_op_hdr_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
("num_ops", c_uint8),
("ops", bia_app_lcfg_op_t * 0),
]
class bia_app_dcfg_op_t(Structure):
fields = [
("field", c_uint32),
("value", c_uint32),
]
class bia_app_dcfg_op_hdr_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
("num_ops", c_uint8),
("ops", bia_app_dcfg_op_t * 0),
]
class bia_data_set_t(Structure):
fields = [
("timestamp", c_uint32),
("real", c_int32),
("img", c_int32),
("excitation_freq", uint32_t),
]
class bia_app_stream_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_data_stream_hdr_t),
("datatype", c_uint8),
("bia_info", M2M2_SENSOR_BIA_APP_INFO_BITSET_ENUM_t),
("bia_data", bia_data_set_t * 4),
]
class m2m2_bia_app_sweep_freq_resp_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
]
class bia_app_hs_rtia_sel_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
("hsritasel", c_uint16),
]
class bcm_app_algo_out_stream_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_data_stream_hdr_t),
("ffm_estimated",c_float),
("bmi",c_float),
("fat_percent",c_float),
("time_stamp", c_ulong),
]
class m2m2_dcb_fds_status_info_req_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
]
class m2m2_dcb_fds_timing_info_req_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
]
class m2m2_dcb_fds_timing_info_resp_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
("adi_dcb_clear_entries_time", c_uint16),
("adi_dcb_check_entries_time", c_uint16),
("adi_dcb_delete_record_time", c_uint16),
("adi_dcb_read_entry_time", c_uint16),
("adi_dcb_update_entry_time", c_uint16),
]
class m2m2_dcb_fds_status_info_resp_t(Structure):
fields = [
(None, common_application_interface._m2m2_app_common_cmd_t),
("dirty_records", c_uint16),
("open_records", c_uint16),
("valid_records", c_uint16),
("pages_available", c_uint16),
("num_blocks", c_uint16),
("blocks_free", c_uint16),
]
| 0 | 4,678 | 483 |
2570e87ff7b6b4f4aaed4a328b7361c96668d859 | 1,409 | py | Python | setup.py | Adalyia/aiowowapi | 660297d00273468b336749e8a079f7292a6be4d8 | [
"MIT"
] | null | null | null | setup.py | Adalyia/aiowowapi | 660297d00273468b336749e8a079f7292a6be4d8 | [
"MIT"
] | null | null | null | setup.py | Adalyia/aiowowapi | 660297d00273468b336749e8a079f7292a6be4d8 | [
"MIT"
] | null | null | null | from setuptools import setup
readme = ''
with open('README.rst') as f:
readme = f.read()
requirements = [
'aiohttp>=3.7.0,<3.8.0'
]
extras_require = {
'docs': [
'sphinx>=4.1.2',
'sphinx_rtd_theme>=0.5.2',
]
}
packages = [
'aiowowapi',
'aiowowapi.retail',
]
setup(
name='aiowowapi',
author='Adalyia',
url='https://github.com/Adalyia/aiowowapi',
project_urls={
"Documentation": "https://docs.adalyia.com/wowapi",
"Issue tracker": "https://github.com/Adalyia/aiowowapi/issues",
},
version='1.0.3',
packages=packages,
license='MIT',
description='An async ready client library for the World of Warcraft APIs',
long_description=readme,
long_description_content_type="text/x-rst",
include_package_data=True,
install_requires=requirements,
extras_require=extras_require,
python_requires='>=3.8.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
) | 25.618182 | 79 | 0.631654 | from setuptools import setup
readme = ''
with open('README.rst') as f:
readme = f.read()
requirements = [
'aiohttp>=3.7.0,<3.8.0'
]
extras_require = {
'docs': [
'sphinx>=4.1.2',
'sphinx_rtd_theme>=0.5.2',
]
}
packages = [
'aiowowapi',
'aiowowapi.retail',
]
setup(
name='aiowowapi',
author='Adalyia',
url='https://github.com/Adalyia/aiowowapi',
project_urls={
"Documentation": "https://docs.adalyia.com/wowapi",
"Issue tracker": "https://github.com/Adalyia/aiowowapi/issues",
},
version='1.0.3',
packages=packages,
license='MIT',
description='An async ready client library for the World of Warcraft APIs',
long_description=readme,
long_description_content_type="text/x-rst",
include_package_data=True,
install_requires=requirements,
extras_require=extras_require,
python_requires='>=3.8.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
) | 0 | 0 | 0 |
948ced1d7fbe483c56ec4e04a79fdbbf44a8dcd3 | 5,329 | py | Python | redirect_demo/settings.py | mllrsohn/django-cms-redirects | 3398528e44594adb708aa090d5b7867f619db10e | [
"BSD-3-Clause"
] | 8 | 2015-02-10T20:30:26.000Z | 2020-05-31T20:20:51.000Z | redirect_demo/settings.py | mllrsohn/django-cms-redirects | 3398528e44594adb708aa090d5b7867f619db10e | [
"BSD-3-Clause"
] | 5 | 2017-04-10T07:41:45.000Z | 2021-12-20T08:49:35.000Z | redirect_demo/settings.py | mllrsohn/django-cms-redirects | 3398528e44594adb708aa090d5b7867f619db10e | [
"BSD-3-Clause"
] | 8 | 2015-04-16T21:25:55.000Z | 2018-09-27T11:15:12.000Z | # Django settings for redirect_demo project.
import os
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
gettext = lambda s: s
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'redirect_demo.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*^aor)8+d(lg_#ezg0&8sc&&pju^18#t=clw-2ief&q#+%(s*n'
LANGUAGE_CODE = 'en'
CMS_TEMPLATES = (
('home.html', gettext('Homepage')),
)
CMS_PLACEHOLDER_CONF = {
'footer-address-content': {
'plugins': ('TextPlugin',),
'name':gettext('Footer Link List'),
},
'footer-link-list': {
'plugins': ('FilerImagePlugin',),
'name':gettext('Footer Link List'),
},
'right-image': {
'plugins': ('FilerImagePlugin',),
'name':gettext('Right Image'),
},
}
LANGUAGES = (
('en', gettext('English')),
)
CMS_LANGUAGES = LANGUAGES
GOOGLE_MAPS_API_KEY = ""
CMS_SHOW_END_DATE = True
CMS_SHOW_START_DATE = True
CMS_PERMISSION = True
CMS_MODERATOR = False
CMS_URL_OVERWRITE = True
CMS_MENU_TITLE_OVERWRITE = True
CMS_SEO_FIELDS = True
CMS_REDIRECTS = True
CMS_SOFTROOT = True
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS" : False,
}
# Allowed IPs for the Django Debug Toolbar
INTERNAL_IPS = ('127.0.0.1',)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
'django.template.loaders.eggs.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
'django.core.context_processors.debug',
"django.core.context_processors.i18n",
"django.core.context_processors.request",
"django.core.context_processors.media",
"cms.context_processors.media",
'django.contrib.messages.context_processors.messages',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.media.PlaceholderMediaMiddleware',
'cms_redirects.middleware.RedirectFallbackMiddleware',
)
ROOT_URLCONF = 'redirect_demo.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR,'templates'),
)
FIXTURE_DIRS = (
os.path.join(PROJECT_DIR, "fixtures"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'cms',
'menus',
'cms.plugins.text',
'mptt',
'publisher',
'south',
'appmedia',
'cms_redirects',
)
SOUTH_TESTS_MIGRATE = False
try:
from settings_dev import *
except ImportError:
pass
| 29.605556 | 122 | 0.691687 | # Django settings for redirect_demo project.
import os
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
gettext = lambda s: s
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'redirect_demo.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*^aor)8+d(lg_#ezg0&8sc&&pju^18#t=clw-2ief&q#+%(s*n'
LANGUAGE_CODE = 'en'
CMS_TEMPLATES = (
('home.html', gettext('Homepage')),
)
CMS_PLACEHOLDER_CONF = {
'footer-address-content': {
'plugins': ('TextPlugin',),
'name':gettext('Footer Link List'),
},
'footer-link-list': {
'plugins': ('FilerImagePlugin',),
'name':gettext('Footer Link List'),
},
'right-image': {
'plugins': ('FilerImagePlugin',),
'name':gettext('Right Image'),
},
}
LANGUAGES = (
('en', gettext('English')),
)
CMS_LANGUAGES = LANGUAGES
GOOGLE_MAPS_API_KEY = ""
CMS_SHOW_END_DATE = True
CMS_SHOW_START_DATE = True
CMS_PERMISSION = True
CMS_MODERATOR = False
CMS_URL_OVERWRITE = True
CMS_MENU_TITLE_OVERWRITE = True
CMS_SEO_FIELDS = True
CMS_REDIRECTS = True
CMS_SOFTROOT = True
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS" : False,
}
# Allowed IPs for the Django Debug Toolbar
INTERNAL_IPS = ('127.0.0.1',)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
'django.template.loaders.eggs.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
'django.core.context_processors.debug',
"django.core.context_processors.i18n",
"django.core.context_processors.request",
"django.core.context_processors.media",
"cms.context_processors.media",
'django.contrib.messages.context_processors.messages',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.media.PlaceholderMediaMiddleware',
'cms_redirects.middleware.RedirectFallbackMiddleware',
)
ROOT_URLCONF = 'redirect_demo.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR,'templates'),
)
FIXTURE_DIRS = (
os.path.join(PROJECT_DIR, "fixtures"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'cms',
'menus',
'cms.plugins.text',
'mptt',
'publisher',
'south',
'appmedia',
'cms_redirects',
)
SOUTH_TESTS_MIGRATE = False
try:
from settings_dev import *
except ImportError:
pass
| 0 | 0 | 0 |
bd3809cf0caa404dcf1774a91b849c420fd6f92c | 2,615 | py | Python | src/m1_hangman.py | theneltj/21-FunctionalDecomposition | 53dd19436a3f33e65957e8db0216bcb056ce187e | [
"MIT"
] | null | null | null | src/m1_hangman.py | theneltj/21-FunctionalDecomposition | 53dd19436a3f33e65957e8db0216bcb056ce187e | [
"MIT"
] | null | null | null | src/m1_hangman.py | theneltj/21-FunctionalDecomposition | 53dd19436a3f33e65957e8db0216bcb056ce187e | [
"MIT"
] | null | null | null | """
Hangman.
Authors: Tyler Thenell and Zachary Zdanavicius.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
# DONE: 2. Implement Hangman using your Iterative Enhancement Plan.
import random
####### Do NOT attempt this assignment before class! #######
main()
| 27.526316 | 67 | 0.56673 | """
Hangman.
Authors: Tyler Thenell and Zachary Zdanavicius.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
# DONE: 2. Implement Hangman using your Iterative Enhancement Plan.
import random
####### Do NOT attempt this assignment before class! #######
def main():
print('_________________________________')
print('| -HANGMAN- |')
print('| Tyler & Zach |')
print('|________________________________|')
min_length = int(input('Give a minimum length: '))
num_guesses = int(input('How many chances do you want: '))
secret_word = word_selector(min_length)
guessing_runner(secret_word, num_guesses)
def word_selector(minimum):
with open('words.txt') as f:
f.readline()
string = f.read()
word = string.split()
r = random.randrange(0, len(word))
item = word[r]
while True:
if len(item) >= minimum:
return item
def print_known(secret_word, guessed):
print()
for k in range(len(secret_word)):
for j in range(len(guessed)):
if secret_word[k] == guessed[j]:
print(secret_word[k], end='')
break
if j+1 == len(guessed):
print('_ ', end='')
def guessing_runner(secret_word, num_guesses):
guessed = ''
num_guessed = 0
while num_guessed < num_guesses:
print_known(secret_word, guessed)
print()
print('You have', num_guesses - num_guessed, 'tries left!')
print('And have guessed: ', guessed)
new_guess = input('Enter your new guess: ')
if len(new_guess) == 1:
guessed += new_guess
for k in range(len(secret_word)):
if secret_word[k] == new_guess:
num_guessed -= 1
break
num_guessed += 1
if win_check(secret_word, guessed) == True:
results(True, secret_word)
return
results(False, secret_word)
def win_check(secret_word, guessed):
checker = ''
for k in range(len(secret_word)):
for j in range(len(guessed)):
if secret_word[k] == guessed[j]:
checker += guessed[j]
break
if secret_word == checker:
return True
return False
def results(result, secret_word):
for k in range(10):
print()
print(' __________________')
if result == True:
print('| YOU WIN! |')
if result == False:
print('| YOU LOSE |')
print('|__________________|')
print('The word was: ', secret_word)
main()
| 2,208 | 0 | 138 |
cfac305f0f7ce458aca125a8380e57d97d04bc9b | 81,938 | py | Python | openstuder.py | OpenStuder/openstuder-client-python | ade667116afcd084faed93febfa4e267972f5250 | [
"MIT"
] | null | null | null | openstuder.py | OpenStuder/openstuder-client-python | ade667116afcd084faed93febfa4e267972f5250 | [
"MIT"
] | null | null | null | openstuder.py | OpenStuder/openstuder-client-python | ade667116afcd084faed93febfa4e267972f5250 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Callable, Optional, Tuple, List
from enum import Enum, Flag, auto
from threading import Thread
import datetime
import json
import websocket
class SIStatus(Enum):
"""
Status of operations on the OpenStuder gateway.
- **SIStatus.SUCCESS**: Operation was successfully completed.
- **SIStatus.IN_PROGRESS**: Operation is already in progress or another operation is occupying the resource.
- **SIStatus.ERROR**: General (unspecified) error.
- **SIStatus.NO_PROPERTY**: The property does not exist or the user's access level does not allow to access the property.
- **SIStatus.NO_DEVICE**: The device does not exist.
- **SIStatus.NO_DEVICE_ACCESS**: The device access instance does not exist.
- **SIStatus.TIMEOUT**: A timeout occurred when waiting for the completion of the operation.
- **SIStatus.INVALID_VALUE**: A invalid value was passed.
"""
SUCCESS = 0
IN_PROGRESS = 1
ERROR = -1
NO_PROPERTY = -2
NO_DEVICE = -3
NO_DEVICE_ACCESS = -4
TIMEOUT = -5
INVALID_VALUE = -6
@staticmethod
class SIConnectionState(Enum):
"""
State of the connection to the OpenStuder gateway.
- **SIConnectionState.DISCONNECTED**: The client is not connected.
- **SIConnectionState.CONNECTING**: The client is establishing the WebSocket connection to the gateway.
- **SIConnectionState.AUTHORIZING**: The WebSocket connection to the gateway has been established and the client is authorizing.
- **SIConnectionState.CONNECTED**: The WebSocket connection is established and the client is authorized, ready to use.
"""
DISCONNECTED = auto()
CONNECTING = auto()
AUTHORIZING = auto()
CONNECTED = auto()
class SIAccessLevel(Enum):
"""
Level of access granted to a client from the OpenStuder gateway.
- **NONE**: No access at all.
- **BASIC**: Basic access to device information properties (configuration excluded).
- **INSTALLER**: Basic access + additional access to most common configuration properties.
- **EXPERT**: Installer + additional advanced configuration properties.
- **QUALIFIED_SERVICE_PERSONNEL**: Expert and all configuration and service properties only for qualified service personnel.
"""
NONE = 0
BASIC = auto()
INSTALLER = auto()
EXPERT = auto()
QUALIFIED_SERVICE_PERSONNEL = auto()
@staticmethod
class SIDescriptionFlags(Flag):
"""
Flags to control the format of the **DESCRIBE** functionality.
- **SIDescriptionFlags.NONE**: No description flags.
- **SIDescriptionFlags.INCLUDE_ACCESS_INFORMATION**: Includes device access instances information.
- **SIDescriptionFlags.INCLUDE_DEVICE_INFORMATION**: Include device information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device property information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device access driver information.
"""
NONE = 0
INCLUDE_ACCESS_INFORMATION = auto()
INCLUDE_DEVICE_INFORMATION = auto()
INCLUDE_PROPERTY_INFORMATION = auto()
INCLUDE_DRIVER_INFORMATION = auto()
class SIWriteFlags(Flag):
"""
Flags to control write property operation.
- **SIWriteFlags.NONE**: No write flags.
- **SIWriteFlags.PERMANENT**: Write the change to the persistent storage, eg the change lasts reboots.
"""
NONE = 0
PERMANENT = auto()
class SIProtocolError(IOError):
"""
Class for reporting all OpenStuder protocol errors.
"""
def reason(self) -> str:
"""
Returns the actual reason for the error.
:return: Reason for the error.
"""
return super(SIProtocolError, self).args[0]
class SIDeviceMessage:
"""
The SIDeviceMessage class represents a message a device connected to the OpenStuder gateway has broadcast.
"""
@staticmethod
class SIPropertyReadResult:
"""
The SIDPropertyReadResult class represents the status of a property read result.
"""
@staticmethod
class SIPropertySubscriptionResult:
"""
The SIDPropertyReadResult class represents the status of a property subscription/unsubscription.
"""
@staticmethod
class SIGatewayClient(_SIAbstractGatewayClient):
"""
Simple, synchronous (blocking) OpenStuder gateway client.
This client uses a synchronous model which has the advantage to be much simpler to use than the asynchronous version SIAsyncGatewayClient. The drawback is that device message
indications are ignored by this client and subscriptions to property changes are not possible.
"""
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None) -> SIAccessLevel:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established. This method blocks the
current thread until the operation (authorize) has been completed or an error occurred. The method returns the access level granted to the client during authorization on
success or throws an **SIProtocolError** otherwise.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:return: Access Level granted to the client.
:raises SIProtocolError: If the connection could not be established, or the authorization was refused.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.create_connection('ws://{host}:{port}'.format(host=host, port=port))
# Authorize client.
self.__state = SIConnectionState.AUTHORIZING
if user is None or password is None:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_without_credentials())
else:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_with_credentials(user, password))
try:
self.__access_level, self.__gateway_version = super(SIGatewayClient, self).decode_authorized_frame(self.__ws.recv())
except ConnectionRefusedError:
self.__state = SIConnectionState.DISCONNECTED
raise SIProtocolError('WebSocket connection refused')
# Change state to connected.
self.__state = SIConnectionState.CONNECTED
# Return access level.
return self.__access_level
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> Tuple[SIStatus, int]:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore. Returns the status of
the operation, and the number of devices present.
:return: Returns two values. 1: operation status, 2: the number of devices present.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_enumerate_frame())
# Wait for ENUMERATED message, decode it and return data.
return super(SIGatewayClient, self).decode_enumerated_frame(self.__receive_frame_until_commands(['ENUMERATED', 'ERROR']))
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> Tuple[SIStatus, Optional[str], object]:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:return: Returns three values. 1: Status of the operation, 2: the subject's id, 3: the description object.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
# Wait for DESCRIPTION message, decode it and return data.
return super(SIGatewayClient, self).decode_description_frame(self.__receive_frame_until_commands(['DESCRIPTION', 'ERROR']))
def find_properties(self, property_id: str) -> Tuple[SIStatus, str, int, List[str]]:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
:param property_id: The search wildcard ID.
:return: Returns four values: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_find_properties_frame(property_id))
# Wait for PROPERTIES FOUND message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_found_frame(self.__receive_frame_until_commands(['PROPERTIES FOUND', 'ERROR']))
def read_property(self, property_id: str) -> Tuple[SIStatus, str, Optional[any]]:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns three values: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_property_frame(property_id))
# Wait for PROPERTY READ message, decode it and return data.
return super(SIGatewayClient, self).decode_property_read_frame(self.__receive_frame_until_commands(['PROPERTY READ', 'ERROR'])).to_tuple()
def read_properties(self, property_ids: List[str]) -> List[SIPropertyReadResult]:
"""
This method is used to retrieve the actual value of multiple properties at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns one value: 1: List of statuses and values of all read properties.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_properties_frame(property_ids))
# Wait for PROPERTIES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_read_frame(self.__receive_frame_until_commands(['PROPERTIES READ', 'ERROR']))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> Tuple[SIStatus, str]:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client, and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:return: Returns two values: 1: Status of the write operation, 2: the ID of the property written.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_write_property_frame(property_id, value, flags))
# Wait for PROPERTY WRITTEN message, decode it and return data.
return super(SIGatewayClient, self).decode_property_written_frame(self.__receive_frame_until_commands(['PROPERTY WRITTEN', 'ERROR']))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> Tuple[SIStatus, List[str]]:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:return: Returns two values: 1: Status of the operation, 2: List of all properties for whom data is logged on the gateway in the optional time window.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
# Wait for DATALOG READ message, decode it and return data.
status, _, _, parameters = super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
return status, parameters.splitlines()
def read_datalog_csv(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, str, int, str]:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:return: Returns four values: 1: Status of the operation, 2: id of the property, 3: number of entries, 4: Properties data in CSV format whereas the first column is the
date and time in ISO 8601 extended format, and the second column contains the actual values.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
# Wait for DATALOG READ message, decode it and return data.
return super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, int, List[SIDeviceMessage]]:
"""
The read_messages() method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:return: Returns three values. 1: the status of the operation, 2: the number of messages, 3: the list of retrieved messages.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_messages_frame(from_, to, limit))
# Wait for MESSAGES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_messages_read_frame(self.__receive_frame_until_commands(['MESSAGES READ', 'ERROR']))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Change state to disconnected.
self.__state = SIConnectionState.DISCONNECTED
# Close the WebSocket
self.__ws.close()
class SIAsyncGatewayClientCallbacks:
"""
Base class containing all callback methods that can be called by the SIAsyncGatewayClient. You can use this as your base class and register it using
IAsyncGatewayClient.set_callbacks().
"""
def on_connected(self, access_level: SIAccessLevel, gateway_version: str) -> None:
"""
This method is called once the connection to the gateway could be established and the user has been successfully authorized.
:param access_level: Access level that was granted to the user during authorization.
:param gateway_version: Version of the OpenStuder software running on the gateway.
"""
pass
def on_disconnected(self) -> None:
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
"""
pass
def on_error(self, reason) -> None:
"""
Called on severe errors.
:param reason: Exception that caused the erroneous behavior.
"""
pass
def on_enumerated(self, status: SIStatus, device_count: int) -> None:
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: , 2: the .
:param status: Operation status.
:param device_count: Number of devices present.
"""
pass
def on_description(self, status: SIStatus, id_: Optional[str], description: object) -> None:
"""
Called when the gateway returned the description requested using the describe() method.
:param status: Status of the operation.
:param id_: Subject's ID.
:param description: Description object.
"""
pass
def on_properties_found(self, status: SIStatus, id_: str, count: int, properties: List[str]):
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
:param status: Status of the find operation.
:param id_: The searched ID (including wildcard character).
:param count: The number of properties found.
:param properties: List of the property IDs.
"""
pass
def on_property_read(self, status: SIStatus, property_id: str, value: Optional[any]) -> None:
"""
Called when the property read operation started using read_property() has completed on the gateway.
:param status: Status of the read operation.
:param property_id: ID of the property read.
:param value: The value read.
"""
pass
def on_properties_read(self, results: List[SIPropertyReadResult]) -> None:
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
:param results: List of all results of the operation.
"""
pass
def on_property_written(self, status: SIStatus, property_id: str) -> None:
"""
Called when the property write operation started using write_property() has completed on the gateway.
:param status: Status of the write operation.
:param property_id: ID of the property written.
"""
pass
def on_property_subscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
:param status: The status of the subscription.
:param property_id: ID of the property.
"""
pass
def on_properties_subscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
:param statuses: The statuses of the individual subscriptions.
"""
pass
def on_property_unsubscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
:param status: The status of the unsubscription.
:param property_id: ID of the property.
"""
pass
def on_properties_unsubscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
:param statuses: The statuses of the individual unsubscriptions.
"""
pass
def on_property_updated(self, property_id: str, value: any) -> None:
"""
This callback is called whenever the gateway send a property update.
:param property_id: ID of the updated property.
:param value: The current value of the property.
"""
pass
def on_datalog_properties_read(self, status: SIStatus, properties: List[str]) -> None:
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
:param status: Status of the operation.
:param properties: List of the IDs of the properties for whom data is available in the data log.
"""
pass
def on_datalog_read_csv(self, status: SIStatus, property_id: str, count: int, values: str) -> None:
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the method returns the data in CSV format suitable to
be written to a file.
:param status: Status of the operation.
:param property_id: ID of the property.
:param count: Number of entries.
:param values: Properties data in CSV format whereas the first column is the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
pass
def on_device_message(self, message: SIDeviceMessage) -> None:
"""
This callback is called whenever the gateway send a device message indication.
:param message: The device message received.
"""
pass
def on_messages_read(self, status: SIStatus, count: int, messages: List[SIDeviceMessage]) -> None:
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
:param status: The status of the operation.
:param count: Number of messages retrieved.
:param messages: List of retrieved messages.
"""
pass
class SIAsyncGatewayClient(_SIAbstractGatewayClient):
"""
Complete, asynchronous (non-blocking) OpenStuder gateway client.
This client uses an asynchronous model which has the disadvantage to be a bit harder to use than the synchronous version. The advantages are that long operations do not block
the main thread as all results are reported using callbacks, device message indications are supported and subscriptions to property changes are possible.
"""
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None, background: bool = True) -> None:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established in the background. This
method returns immediately and does not block the current thread.
The status of the connection attempt is reported either by the on_connected() callback on success or the on_error() callback if the connection could not be established
or the authorisation for the given user was rejected by the gateway.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:param background: If true, the handling of the WebSocket connection is done in the background, if false the current thread is took over.
:raises SIProtocolError: If there was an error initiating the WebSocket connection.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Save parameter for later use.
self.__user = user
self.__password = password
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.WebSocketApp('ws://{host}:{port}'.format(host=host, port=port),
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close
)
# TODO: Start connection timeout.
# If background mode is selected, start a daemon thread for the connection handling, otherwise take over current thread.
if background:
self.__thread = Thread(target=self.__ws.run_forever)
self.__thread.setDaemon(True)
self.__thread.start()
else:
self.__ws.run_forever()
def set_callbacks(self, callbacks: SIAsyncGatewayClientCallbacks) -> None:
"""
Configures the client to use all callbacks of the passed abstract client callback class. Using this you can set all callbacks to be called on the given object and avoid
having to set each callback individually.
:param callbacks: Object derived from SIAsyncGatewayClientCallbacks to be used for all callbacks.
"""
if isinstance(callbacks, SIAsyncGatewayClientCallbacks):
self.on_connected = callbacks.on_connected
self.on_disconnected = callbacks.on_disconnected
self.on_error = callbacks.on_error
self.on_enumerated = callbacks.on_enumerated
self.on_description = callbacks.on_description
self.on_properties_found = callbacks.on_properties_found
self.on_property_read = callbacks.on_property_read
self.on_properties_read = callbacks.on_properties_read
self.on_property_written = callbacks.on_property_written
self.on_property_subscribed = callbacks.on_property_subscribed
self.on_properties_subscribed = callbacks.on_properties_subscribed
self.on_property_unsubscribed = callbacks.on_property_unsubscribed
self.on_properties_unsubscribed = callbacks.on_properties_unsubscribed
self.on_property_updated = callbacks.on_property_updated
self.on_datalog_properties_read = callbacks.on_datalog_properties_read
self.on_datalog_read_csv = callbacks.on_datalog_read_csv
self.on_device_message = callbacks.on_device_message
self.on_messages_read = callbacks.on_messages_read
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> None:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore.
The status of the operation and the number of devices present are reported using the on_enumerated() callback.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_enumerate_frame())
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> None:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
The description is reported using the on_description() callback.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
def find_properties(self, property_id: str) -> None:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
The status of the read operation and the actual value of the property are reported using the on_properties_found() callback.
:param property_id: The search wildcard ID.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_find_properties_frame(property_id))
def read_property(self, property_id: str) -> None:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
The status of the read operation and the actual value of the property are reported using the on_property_read() callback.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_property_frame(property_id))
def read_properties(self, property_ids: List[str]) -> None:
"""
This method is used to retrieve the actual value of multiple property at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
The status of the multiple read operations and the actual value of the properties are reported using the on_properties_read() callback.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_properties_frame(property_ids))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> None:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
The status of the write operation is reported using the on_property_written() callback.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_write_property_frame(property_id, value, flags))
def subscribe_to_property(self, property_id: str) -> None:
"""
This method can be used to subscribe to a property on the connected gateway. The property is identified by the property_id parameter.
The status of the subscribe request is reported using the on_property_subscribed() callback.
:param property_id: The ID of the property to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_property_frame(property_id))
def subscribe_to_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to subscribe to multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the subscribe request is reported using the on_properties_subscribed() callback.
:param property_ids: The list of IDs of the properties to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_properties_frame(property_ids))
def unsubscribe_from_property(self, property_id: str) -> None:
"""
This method can be used to unsubscribe from a property on the connected gateway. The property is identified by the property_id parameter.
The status of the unsubscribe request is reported using the on_property_unsubscribed() callback.
:param property_id: The ID of the property to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_property_frame(property_id))
def unsubscribe_from_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to unsubscribe from multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the unsubscribe request is reported using the on_properties_unsubscribed() callback.
:param property_ids: The list of IDs of the properties to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_properties_frame(property_ids))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> None:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
The status of the operation is the list of properties for whom logged data is available are reported using the on_datalog_properties_read() callback.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
def read_datalog(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
The status of this operation and the respective values are reported using the on_datalog_read_csv() callback.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
The read_messages method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
The status of this operation and the retrieved messages are reported using the on_messages_read() callback.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_messages_frame(from_, to, limit))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Close the WebSocket
self.__ws.close()
| 48.541469 | 180 | 0.672435 | from __future__ import annotations
from typing import Callable, Optional, Tuple, List
from enum import Enum, Flag, auto
from threading import Thread
import datetime
import json
import websocket
class SIStatus(Enum):
"""
Status of operations on the OpenStuder gateway.
- **SIStatus.SUCCESS**: Operation was successfully completed.
- **SIStatus.IN_PROGRESS**: Operation is already in progress or another operation is occupying the resource.
- **SIStatus.ERROR**: General (unspecified) error.
- **SIStatus.NO_PROPERTY**: The property does not exist or the user's access level does not allow to access the property.
- **SIStatus.NO_DEVICE**: The device does not exist.
- **SIStatus.NO_DEVICE_ACCESS**: The device access instance does not exist.
- **SIStatus.TIMEOUT**: A timeout occurred when waiting for the completion of the operation.
- **SIStatus.INVALID_VALUE**: A invalid value was passed.
"""
SUCCESS = 0
IN_PROGRESS = 1
ERROR = -1
NO_PROPERTY = -2
NO_DEVICE = -3
NO_DEVICE_ACCESS = -4
TIMEOUT = -5
INVALID_VALUE = -6
@staticmethod
def from_string(string: str) -> SIStatus:
if string == 'Success':
return SIStatus.SUCCESS
elif string == 'InProgress':
return SIStatus.IN_PROGRESS
elif string == 'Error':
return SIStatus.ERROR
elif string == 'NoProperty':
return SIStatus.NO_PROPERTY
elif string == 'NoDevice':
return SIStatus.NO_DEVICE
elif string == 'NoDeviceAccess':
return SIStatus.NO_DEVICE_ACCESS
elif string == 'Timeout':
return SIStatus.TIMEOUT
elif string == 'InvalidValue':
return SIStatus.INVALID_VALUE
else:
return SIStatus.ERROR
class SIConnectionState(Enum):
"""
State of the connection to the OpenStuder gateway.
- **SIConnectionState.DISCONNECTED**: The client is not connected.
- **SIConnectionState.CONNECTING**: The client is establishing the WebSocket connection to the gateway.
- **SIConnectionState.AUTHORIZING**: The WebSocket connection to the gateway has been established and the client is authorizing.
- **SIConnectionState.CONNECTED**: The WebSocket connection is established and the client is authorized, ready to use.
"""
DISCONNECTED = auto()
CONNECTING = auto()
AUTHORIZING = auto()
CONNECTED = auto()
class SIAccessLevel(Enum):
"""
Level of access granted to a client from the OpenStuder gateway.
- **NONE**: No access at all.
- **BASIC**: Basic access to device information properties (configuration excluded).
- **INSTALLER**: Basic access + additional access to most common configuration properties.
- **EXPERT**: Installer + additional advanced configuration properties.
- **QUALIFIED_SERVICE_PERSONNEL**: Expert and all configuration and service properties only for qualified service personnel.
"""
NONE = 0
BASIC = auto()
INSTALLER = auto()
EXPERT = auto()
QUALIFIED_SERVICE_PERSONNEL = auto()
@staticmethod
def from_string(string: str) -> SIAccessLevel:
if string == 'None':
return SIAccessLevel.NONE
elif string == 'Basic':
return SIAccessLevel.BASIC
elif string == 'Installer':
return SIAccessLevel.INSTALLER
elif string == 'Expert':
return SIAccessLevel.EXPERT
elif string == 'QSP':
return SIAccessLevel.QUALIFIED_SERVICE_PERSONNEL
else:
return SIAccessLevel.NONE
class SIDescriptionFlags(Flag):
"""
Flags to control the format of the **DESCRIBE** functionality.
- **SIDescriptionFlags.NONE**: No description flags.
- **SIDescriptionFlags.INCLUDE_ACCESS_INFORMATION**: Includes device access instances information.
- **SIDescriptionFlags.INCLUDE_DEVICE_INFORMATION**: Include device information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device property information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device access driver information.
"""
NONE = 0
INCLUDE_ACCESS_INFORMATION = auto()
INCLUDE_DEVICE_INFORMATION = auto()
INCLUDE_PROPERTY_INFORMATION = auto()
INCLUDE_DRIVER_INFORMATION = auto()
class SIWriteFlags(Flag):
"""
Flags to control write property operation.
- **SIWriteFlags.NONE**: No write flags.
- **SIWriteFlags.PERMANENT**: Write the change to the persistent storage, eg the change lasts reboots.
"""
NONE = 0
PERMANENT = auto()
class SIProtocolError(IOError):
"""
Class for reporting all OpenStuder protocol errors.
"""
def __init__(self, message):
super(SIProtocolError, self).__init__(message)
def reason(self) -> str:
"""
Returns the actual reason for the error.
:return: Reason for the error.
"""
return super(SIProtocolError, self).args[0]
class SIDeviceMessage:
"""
The SIDeviceMessage class represents a message a device connected to the OpenStuder gateway has broadcast.
"""
def __init__(self, access_id: str, device_id: str, message_id: str, message: str, timestamp: datetime.datetime):
self.timestamp = timestamp
"""
Timestamp when the device message was received by the gateway.
"""
self.access_id = access_id
"""
ID of the device access driver that received the message.
"""
self.device_id = device_id
"""
ID of the device that broadcast the message.
"""
self.message_id = message_id
"""
Message ID.
"""
self.message = message
"""
String representation of the message.
"""
@staticmethod
def from_dict(d: dict) -> SIDeviceMessage:
try:
return SIDeviceMessage(d['access_id'], d['device_id'], d['message_id'], d['message'], datetime.datetime.fromisoformat(d['timestamp'].replace("Z", "+00:00")))
except KeyError:
raise SIProtocolError('invalid json body')
class SIPropertyReadResult:
"""
The SIDPropertyReadResult class represents the status of a property read result.
"""
def __init__(self, status: SIStatus, id_: str, value: Optional[any]):
self.status = status
"""
Status of the property read operation.
"""
self.id = id_
"""
ID of the property read.
"""
self.value = value
"""
Value that was read from the property, optional.
"""
def to_tuple(self) -> Tuple[SIStatus, str, Optional[any]]:
return self.status, self.id, self.value
@staticmethod
def from_dict(d: dict) -> SIPropertyReadResult:
try:
result = SIPropertyReadResult(SIStatus.from_string(d['status']), d['id'], None)
if 'value' in d and d['value'] is not None:
try:
result.value = float(d['value'])
except ValueError:
string = d['value'].lower()
if string == 'true':
result.value = True
elif string == 'false':
result.value = False
else:
result.value = string
return result
except KeyError:
raise SIProtocolError('invalid json body')
class SIPropertySubscriptionResult:
"""
The SIDPropertyReadResult class represents the status of a property subscription/unsubscription.
"""
def __init__(self, status: SIStatus, id_: str):
self.status = status
"""
Status of the property subscribe or unsubscribe operation.
"""
self.id = id_
"""
ID of the property.
"""
def to_tuple(self) -> Tuple[SIStatus, str]:
return self.status, self.id
@staticmethod
def from_dict(d: dict) -> SIPropertySubscriptionResult:
try:
return SIPropertySubscriptionResult(SIStatus.from_string(d['status']), d['id'])
except KeyError:
raise SIProtocolError('invalid json body')
class _SIAbstractGatewayClient:
def __init__(self):
super(_SIAbstractGatewayClient, self).__init__()
@staticmethod
def encode_authorize_frame_without_credentials() -> str:
return 'AUTHORIZE\nprotocol_version:1\n\n'
@staticmethod
def encode_authorize_frame_with_credentials(user: str, password: str) -> str:
return 'AUTHORIZE\nuser:{user}\npassword:{password}\nprotocol_version:1\n\n'.format(user=user, password=password)
@staticmethod
def decode_authorized_frame(frame: str) -> Tuple[SIAccessLevel, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'AUTHORIZED' and 'access_level' in headers and 'protocol_version' in headers and 'gateway_version' in headers:
if headers['protocol_version'] == '1':
return SIAccessLevel.from_string(headers['access_level']), headers['gateway_version']
else:
raise SIProtocolError('protocol version 1 not supported by server')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during authorization')
@staticmethod
def encode_enumerate_frame() -> str:
return 'ENUMERATE\n\n'
@staticmethod
def decode_enumerated_frame(frame: str) -> Tuple[SIStatus, int]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'ENUMERATED' and 'status' in headers and 'device_count' in headers:
return SIStatus.from_string(headers['status']), int(headers['device_count'])
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during device enumeration')
@staticmethod
def encode_describe_frame(device_access_id: Optional[str], device_id: Optional[str], property_id: Optional[int], flags: Optional[SIDescriptionFlags]) -> str:
frame = 'DESCRIBE\n'
if device_access_id is not None:
frame += 'id:{device_access_id}'.format(device_access_id=device_access_id)
if device_id is not None:
frame += '.{device_id}'.format(device_id=device_id)
if property_id is not None:
frame += '.{property_id}'.format(property_id=property_id)
frame += '\n'
if flags is not None and isinstance(flags, SIDescriptionFlags):
frame += 'flags:'
if flags & SIDescriptionFlags.INCLUDE_ACCESS_INFORMATION:
frame += 'IncludeAccessInformation,'
if flags & SIDescriptionFlags.INCLUDE_DEVICE_INFORMATION:
frame += 'IncludeDeviceInformation,'
if flags & SIDescriptionFlags.INCLUDE_PROPERTY_INFORMATION:
frame += 'IncludePropertyInformation,'
if flags & SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION:
frame += 'IncludeDriverInformation,'
frame = frame[:-1]
frame += '\n'
frame += '\n'
return frame
@staticmethod
def decode_description_frame(frame: str) -> Tuple[SIStatus, Optional[str], object]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DESCRIPTION' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
description = json.loads(body)
return status, headers.get('id', None), description
else:
return status, headers.get('id', None), {}
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during description')
@staticmethod
def encode_find_properties_frame(property_id: str) -> str:
return 'FIND PROPERTIES\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_properties_found_frame(frame: str) -> (SIStatus, str, int, List[str]):
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES FOUND' and 'status' in headers and 'id' in headers and 'count' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
properties = json.loads(body)
return status, headers.get('id'), int(headers.get('count', 0)), properties
else:
return status, headers.get('id'), int(headers.get('count', 0)), []
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during finding properties')
@staticmethod
def encode_read_property_frame(property_id: str) -> str:
return 'READ PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_read_frame(frame: str) -> SIPropertyReadResult:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY READ' and 'status' in headers and 'id' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS and 'value' in headers:
try:
value = float(headers['value'])
except ValueError:
string = headers['value'].lower()
if string == 'true':
value = True
elif string == 'false':
value = False
else:
value = string
return SIPropertyReadResult(status, headers['id'], value)
else:
return SIPropertyReadResult(status, headers['id'], None)
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property read')
@staticmethod
def encode_read_properties_frame(property_ids: List[str]) -> str:
return 'READ PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_read_frame(frame: str) -> List[SIPropertyReadResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES READ' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertyReadResult.from_dict)
else:
raise SIProtocolError(f'error during property read, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties read')
@staticmethod
def encode_write_property_frame(property_id: str, value: Optional[any], flags: Optional[SIWriteFlags]) -> str:
frame = 'WRITE PROPERTY\nid:{property_id}\n'.format(property_id=property_id)
if flags is not None and isinstance(flags, SIWriteFlags):
frame += 'flags:'
if flags & SIWriteFlags.PERMANENT:
frame += 'Permanent'
frame += '\n'
if value is not None:
frame += 'value:{value}\n'.format(value=value)
frame += '\n'
return frame
@staticmethod
def decode_property_written_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY WRITTEN' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property write')
@staticmethod
def encode_subscribe_property_frame(property_id: str) -> str:
return 'SUBSCRIBE PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_subscribed_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY SUBSCRIBED' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property subscribe')
@staticmethod
def encode_subscribe_properties_frame(property_ids: List[str]) -> str:
return 'SUBSCRIBE PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_subscribed_frame(frame: str) -> List[SIPropertySubscriptionResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES SUBSCRIBED' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertySubscriptionResult.from_dict)
else:
raise SIProtocolError(f'error during properties read, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties subscribe')
@staticmethod
def encode_unsubscribe_property_frame(property_id: str) -> str:
return 'UNSUBSCRIBE PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_unsubscribed_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY UNSUBSCRIBED' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property unsubscribe')
@staticmethod
def encode_unsubscribe_properties_frame(property_ids: List[str]) -> str:
return 'UNSUBSCRIBE PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_unsubscribed_frame(frame: str) -> List[SIPropertySubscriptionResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES UNSUBSCRIBED' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertySubscriptionResult.from_dict)
else:
raise SIProtocolError(f'error during properties unsubscribe, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties unsubscribe')
@staticmethod
def decode_property_update_frame(frame: str) -> Tuple[str, any]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY UPDATE' and 'id' in headers and 'value' in headers:
try:
value = float(headers['value'])
except ValueError:
string = headers['value'].lower()
if string == 'true':
value = True
elif string == 'false':
value = False
else:
value = string
return headers['id'], value
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving property update')
@staticmethod
def encode_read_datalog_frame(property_id: Optional[str], from_: Optional[datetime.datetime], to: Optional[datetime.datetime], limit: Optional[int]) -> str:
frame = 'READ DATALOG\n'
if property_id is not None:
frame += 'id:{property_id}\n'.format(property_id=property_id)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('from', from_)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('to', to)
if limit is not None:
frame += 'limit:{limit}\n'.format(limit=limit)
frame += '\n'
return frame
@staticmethod
def decode_datalog_read_frame(frame: str) -> Tuple[SIStatus, Optional[str], int, str]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DATALOG READ' and 'status' in headers and 'count' in headers:
return SIStatus.from_string(headers['status']), headers.get('id'), int(headers['count']), body
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving datalog read')
@staticmethod
def encode_read_messages_frame(from_: Optional[datetime.datetime], to: Optional[datetime.datetime], limit: Optional[int]) -> str:
frame = 'READ MESSAGES\n'
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('from', from_)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('to', to)
if limit is not None:
frame += 'limit:{limit}\n'.format(limit=limit)
frame += '\n'
return frame
@staticmethod
def decode_messages_read_frame(frame: str) -> Tuple[SIStatus, int, List[SIDeviceMessage]]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'MESSAGES READ' and 'status' in headers and 'count' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
messages = json.loads(body, object_hook=SIDeviceMessage.from_dict)
return status, int(headers['count']), messages
else:
return status, int(headers['count']), []
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during description')
@staticmethod
def decode_device_message_frame(frame: str) -> SIDeviceMessage:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DEVICE MESSAGE' and 'access_id' in headers and 'device_id' in headers and 'message_id' in headers and 'message' in headers and 'timestamp' in headers:
return SIDeviceMessage.from_dict(headers)
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving device message')
@staticmethod
def peek_frame_command(frame: str) -> str:
return frame[:frame.index('\n')]
@staticmethod
def decode_frame(frame: str) -> Tuple[str, dict, str]:
lines = frame.split('\n')
if len(lines) < 2:
raise SIProtocolError('invalid frame')
command = lines[0]
line = 1
headers = {}
while line < len(lines) and lines[line]:
components = lines[line].split(':')
if len(components) >= 2:
headers[components[0]] = ':'.join(components[1:])
line += 1
line += 1
if line >= len(lines):
raise SIProtocolError('invalid frame')
body = '\n'.join(lines[line:])
return command, headers, body
@staticmethod
def get_timestamp_header_if_present(key: str, timestamp: Optional[datetime.datetime]):
if timestamp is not None and isinstance(timestamp, datetime.datetime):
return '{key}:{timestamp}\n'.format(key=key, timestamp=timestamp.replace(microsecond=0).isoformat())
else:
return ''
class SIGatewayClient(_SIAbstractGatewayClient):
"""
Simple, synchronous (blocking) OpenStuder gateway client.
This client uses a synchronous model which has the advantage to be much simpler to use than the asynchronous version SIAsyncGatewayClient. The drawback is that device message
indications are ignored by this client and subscriptions to property changes are not possible.
"""
def __init__(self):
super(SIGatewayClient, self).__init__()
self.__state: SIConnectionState = SIConnectionState.DISCONNECTED
self.__ws: Optional[websocket.WebSocket] = None
self.__access_level: SIAccessLevel = SIAccessLevel.NONE
self.__gateway_version: str = ''
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None) -> SIAccessLevel:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established. This method blocks the
current thread until the operation (authorize) has been completed or an error occurred. The method returns the access level granted to the client during authorization on
success or throws an **SIProtocolError** otherwise.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:return: Access Level granted to the client.
:raises SIProtocolError: If the connection could not be established, or the authorization was refused.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.create_connection('ws://{host}:{port}'.format(host=host, port=port))
# Authorize client.
self.__state = SIConnectionState.AUTHORIZING
if user is None or password is None:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_without_credentials())
else:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_with_credentials(user, password))
try:
self.__access_level, self.__gateway_version = super(SIGatewayClient, self).decode_authorized_frame(self.__ws.recv())
except ConnectionRefusedError:
self.__state = SIConnectionState.DISCONNECTED
raise SIProtocolError('WebSocket connection refused')
# Change state to connected.
self.__state = SIConnectionState.CONNECTED
# Return access level.
return self.__access_level
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> Tuple[SIStatus, int]:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore. Returns the status of
the operation, and the number of devices present.
:return: Returns two values. 1: operation status, 2: the number of devices present.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_enumerate_frame())
# Wait for ENUMERATED message, decode it and return data.
return super(SIGatewayClient, self).decode_enumerated_frame(self.__receive_frame_until_commands(['ENUMERATED', 'ERROR']))
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> Tuple[SIStatus, Optional[str], object]:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:return: Returns three values. 1: Status of the operation, 2: the subject's id, 3: the description object.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
# Wait for DESCRIPTION message, decode it and return data.
return super(SIGatewayClient, self).decode_description_frame(self.__receive_frame_until_commands(['DESCRIPTION', 'ERROR']))
def find_properties(self, property_id: str) -> Tuple[SIStatus, str, int, List[str]]:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
:param property_id: The search wildcard ID.
:return: Returns four values: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_find_properties_frame(property_id))
# Wait for PROPERTIES FOUND message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_found_frame(self.__receive_frame_until_commands(['PROPERTIES FOUND', 'ERROR']))
def read_property(self, property_id: str) -> Tuple[SIStatus, str, Optional[any]]:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns three values: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_property_frame(property_id))
# Wait for PROPERTY READ message, decode it and return data.
return super(SIGatewayClient, self).decode_property_read_frame(self.__receive_frame_until_commands(['PROPERTY READ', 'ERROR'])).to_tuple()
def read_properties(self, property_ids: List[str]) -> List[SIPropertyReadResult]:
"""
This method is used to retrieve the actual value of multiple properties at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns one value: 1: List of statuses and values of all read properties.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_properties_frame(property_ids))
# Wait for PROPERTIES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_read_frame(self.__receive_frame_until_commands(['PROPERTIES READ', 'ERROR']))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> Tuple[SIStatus, str]:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client, and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:return: Returns two values: 1: Status of the write operation, 2: the ID of the property written.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_write_property_frame(property_id, value, flags))
# Wait for PROPERTY WRITTEN message, decode it and return data.
return super(SIGatewayClient, self).decode_property_written_frame(self.__receive_frame_until_commands(['PROPERTY WRITTEN', 'ERROR']))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> Tuple[SIStatus, List[str]]:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:return: Returns two values: 1: Status of the operation, 2: List of all properties for whom data is logged on the gateway in the optional time window.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
# Wait for DATALOG READ message, decode it and return data.
status, _, _, parameters = super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
return status, parameters.splitlines()
def read_datalog_csv(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, str, int, str]:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:return: Returns four values: 1: Status of the operation, 2: id of the property, 3: number of entries, 4: Properties data in CSV format whereas the first column is the
date and time in ISO 8601 extended format, and the second column contains the actual values.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
# Wait for DATALOG READ message, decode it and return data.
return super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, int, List[SIDeviceMessage]]:
"""
The read_messages() method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:return: Returns three values. 1: the status of the operation, 2: the number of messages, 3: the list of retrieved messages.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_messages_frame(from_, to, limit))
# Wait for MESSAGES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_messages_read_frame(self.__receive_frame_until_commands(['MESSAGES READ', 'ERROR']))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Change state to disconnected.
self.__state = SIConnectionState.DISCONNECTED
# Close the WebSocket
self.__ws.close()
def __ensure_in_state(self, state: SIConnectionState) -> None:
if self.__state != state:
raise SIProtocolError("invalid client state")
def __receive_frame_until_commands(self, commands: list) -> str:
while True:
frame = self.__ws.recv()
if super(SIGatewayClient, self).peek_frame_command(frame) in commands:
return frame
class SIAsyncGatewayClientCallbacks:
"""
Base class containing all callback methods that can be called by the SIAsyncGatewayClient. You can use this as your base class and register it using
IAsyncGatewayClient.set_callbacks().
"""
def on_connected(self, access_level: SIAccessLevel, gateway_version: str) -> None:
"""
This method is called once the connection to the gateway could be established and the user has been successfully authorized.
:param access_level: Access level that was granted to the user during authorization.
:param gateway_version: Version of the OpenStuder software running on the gateway.
"""
pass
def on_disconnected(self) -> None:
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
"""
pass
def on_error(self, reason) -> None:
"""
Called on severe errors.
:param reason: Exception that caused the erroneous behavior.
"""
pass
def on_enumerated(self, status: SIStatus, device_count: int) -> None:
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: , 2: the .
:param status: Operation status.
:param device_count: Number of devices present.
"""
pass
def on_description(self, status: SIStatus, id_: Optional[str], description: object) -> None:
"""
Called when the gateway returned the description requested using the describe() method.
:param status: Status of the operation.
:param id_: Subject's ID.
:param description: Description object.
"""
pass
def on_properties_found(self, status: SIStatus, id_: str, count: int, properties: List[str]):
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
:param status: Status of the find operation.
:param id_: The searched ID (including wildcard character).
:param count: The number of properties found.
:param properties: List of the property IDs.
"""
pass
def on_property_read(self, status: SIStatus, property_id: str, value: Optional[any]) -> None:
"""
Called when the property read operation started using read_property() has completed on the gateway.
:param status: Status of the read operation.
:param property_id: ID of the property read.
:param value: The value read.
"""
pass
def on_properties_read(self, results: List[SIPropertyReadResult]) -> None:
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
:param results: List of all results of the operation.
"""
pass
def on_property_written(self, status: SIStatus, property_id: str) -> None:
"""
Called when the property write operation started using write_property() has completed on the gateway.
:param status: Status of the write operation.
:param property_id: ID of the property written.
"""
pass
def on_property_subscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
:param status: The status of the subscription.
:param property_id: ID of the property.
"""
pass
def on_properties_subscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
:param statuses: The statuses of the individual subscriptions.
"""
pass
def on_property_unsubscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
:param status: The status of the unsubscription.
:param property_id: ID of the property.
"""
pass
def on_properties_unsubscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
:param statuses: The statuses of the individual unsubscriptions.
"""
pass
def on_property_updated(self, property_id: str, value: any) -> None:
"""
This callback is called whenever the gateway send a property update.
:param property_id: ID of the updated property.
:param value: The current value of the property.
"""
pass
def on_datalog_properties_read(self, status: SIStatus, properties: List[str]) -> None:
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
:param status: Status of the operation.
:param properties: List of the IDs of the properties for whom data is available in the data log.
"""
pass
def on_datalog_read_csv(self, status: SIStatus, property_id: str, count: int, values: str) -> None:
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the method returns the data in CSV format suitable to
be written to a file.
:param status: Status of the operation.
:param property_id: ID of the property.
:param count: Number of entries.
:param values: Properties data in CSV format whereas the first column is the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
pass
def on_device_message(self, message: SIDeviceMessage) -> None:
"""
This callback is called whenever the gateway send a device message indication.
:param message: The device message received.
"""
pass
def on_messages_read(self, status: SIStatus, count: int, messages: List[SIDeviceMessage]) -> None:
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
:param status: The status of the operation.
:param count: Number of messages retrieved.
:param messages: List of retrieved messages.
"""
pass
class SIAsyncGatewayClient(_SIAbstractGatewayClient):
"""
Complete, asynchronous (non-blocking) OpenStuder gateway client.
This client uses an asynchronous model which has the disadvantage to be a bit harder to use than the synchronous version. The advantages are that long operations do not block
the main thread as all results are reported using callbacks, device message indications are supported and subscriptions to property changes are possible.
"""
def __init__(self):
super(SIAsyncGatewayClient, self).__init__()
self.__state: SIConnectionState = SIConnectionState.DISCONNECTED
self.__ws: Optional[websocket.WebSocketApp] = None
self.__thread: Optional[Thread] = None
self.__access_level: SIAccessLevel = SIAccessLevel.NONE
self.__gateway_version: str = ''
self.__user: Optional[str] = None
self.__password: Optional[str] = None
self.on_connected: Optional[Callable[[SIAccessLevel, str], None]] = None
"""
This callback is called once the connection to the gateway could be established and the user has been successfully authorized.
The callback takes two arguments. 1: the access level that was granted to the user during authorization, 2: the version of the OpenStuder software running on the gateway.
"""
self.on_disconnected: Optional[Callable[[], None]] = None
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
This callback has no parameters.
"""
self.on_error: Optional[Callable[[Exception], None]] = None
"""
Called on severe errors.
The single parameter passed to the callback is the exception that caused the erroneous behavior.
"""
self.on_enumerated: Optional[Callable[[str, int], None]] = None
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: operation status, 2: the number of devices present.
"""
self.on_description: Optional[Callable[[str, Optional[str], object], None]] = None
"""
Called when the gateway returned the description requested using the describe() method.
The callback takes three parameters: 1: Status of the operation, 2: the subject's ID, 3: the description object.
"""
self.on_properties_found: Optional[Callable[[SIStatus, str, int, List[str]], None]] = None
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
The callback takes four parameters: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
"""
self.on_property_read: Optional[Callable[[str, str, Optional[any]], None]] = None
"""
Called when the property read operation started using read_property() has completed on the gateway.
The callback takes three parameters: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
"""
self.on_properties_read: Optional[Callable[[List[SIPropertyReadResult]], None]] = None
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
The callback takes one parameters: 1: List of all results of the operation.
"""
self.on_property_written: Optional[Callable[[str, str], None]] = None
"""
Called when the property write operation started using write_property() has completed on the gateway.
The callback takes two parameters: 1: Status of the write operation, 2: the ID of the property written.
"""
self.on_property_subscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
The callback takes two parameters: 1: The status of the subscription, 2: The ID of the property.
"""
self.on_properties_subscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
The callback takes one parameter: 1: List of statuses of individual subscription requests.
"""
self.on_property_unsubscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
The callback takes two parameters: 1: The status of the unsubscription, 2: The ID of the property.
"""
self.on_properties_unsubscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
The callback takes one parameter: 1: List of statuses of individual unsubscription requests.
"""
self.on_property_updated: Optional[Callable[[str, any], None]] = None
"""
This callback is called whenever the gateway send a property update.
The callback takes two parameters: 1: the ID of the property that has updated, 2: the actual value.
"""
self.on_datalog_properties_read: Optional[Callable[[SIStatus, List[str]], None]] = None
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
The callback takes 2 parameters: 1: Status of the operation, 2: List of the IDs of the properties for whom data is available in the data log.
"""
self.on_datalog_read_csv: Optional[Callable[[str, str, int, str], None]] = None
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the callback returns the data in CSV format suitable to
be written to a file.
The callback takes four parameters: 1: Status of the operation, 2: ID of the property, 3: number of entries, 4: properties data in CSV format whereas the first column is
the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
self.on_device_message: Optional[Callable[[SIDeviceMessage], None]] = None
"""
This callback is called whenever the gateway send a device message indication.
The callback takes one parameter, the device message object.
"""
self.on_messages_read: Optional[Callable[[str, Optional[int], List[SIDeviceMessage]], None]] = None
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
The callback takes three parameters: 1: the status of the operation, 2: the number of messages retrieved, 3: the list of retrieved messages.
"""
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None, background: bool = True) -> None:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established in the background. This
method returns immediately and does not block the current thread.
The status of the connection attempt is reported either by the on_connected() callback on success or the on_error() callback if the connection could not be established
or the authorisation for the given user was rejected by the gateway.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:param background: If true, the handling of the WebSocket connection is done in the background, if false the current thread is took over.
:raises SIProtocolError: If there was an error initiating the WebSocket connection.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Save parameter for later use.
self.__user = user
self.__password = password
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.WebSocketApp('ws://{host}:{port}'.format(host=host, port=port),
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close
)
# TODO: Start connection timeout.
# If background mode is selected, start a daemon thread for the connection handling, otherwise take over current thread.
if background:
self.__thread = Thread(target=self.__ws.run_forever)
self.__thread.setDaemon(True)
self.__thread.start()
else:
self.__ws.run_forever()
def set_callbacks(self, callbacks: SIAsyncGatewayClientCallbacks) -> None:
"""
Configures the client to use all callbacks of the passed abstract client callback class. Using this you can set all callbacks to be called on the given object and avoid
having to set each callback individually.
:param callbacks: Object derived from SIAsyncGatewayClientCallbacks to be used for all callbacks.
"""
if isinstance(callbacks, SIAsyncGatewayClientCallbacks):
self.on_connected = callbacks.on_connected
self.on_disconnected = callbacks.on_disconnected
self.on_error = callbacks.on_error
self.on_enumerated = callbacks.on_enumerated
self.on_description = callbacks.on_description
self.on_properties_found = callbacks.on_properties_found
self.on_property_read = callbacks.on_property_read
self.on_properties_read = callbacks.on_properties_read
self.on_property_written = callbacks.on_property_written
self.on_property_subscribed = callbacks.on_property_subscribed
self.on_properties_subscribed = callbacks.on_properties_subscribed
self.on_property_unsubscribed = callbacks.on_property_unsubscribed
self.on_properties_unsubscribed = callbacks.on_properties_unsubscribed
self.on_property_updated = callbacks.on_property_updated
self.on_datalog_properties_read = callbacks.on_datalog_properties_read
self.on_datalog_read_csv = callbacks.on_datalog_read_csv
self.on_device_message = callbacks.on_device_message
self.on_messages_read = callbacks.on_messages_read
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> None:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore.
The status of the operation and the number of devices present are reported using the on_enumerated() callback.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_enumerate_frame())
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> None:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
The description is reported using the on_description() callback.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
def find_properties(self, property_id: str) -> None:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
The status of the read operation and the actual value of the property are reported using the on_properties_found() callback.
:param property_id: The search wildcard ID.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_find_properties_frame(property_id))
def read_property(self, property_id: str) -> None:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
The status of the read operation and the actual value of the property are reported using the on_property_read() callback.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_property_frame(property_id))
def read_properties(self, property_ids: List[str]) -> None:
"""
This method is used to retrieve the actual value of multiple property at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
The status of the multiple read operations and the actual value of the properties are reported using the on_properties_read() callback.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_properties_frame(property_ids))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> None:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
The status of the write operation is reported using the on_property_written() callback.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_write_property_frame(property_id, value, flags))
def subscribe_to_property(self, property_id: str) -> None:
"""
This method can be used to subscribe to a property on the connected gateway. The property is identified by the property_id parameter.
The status of the subscribe request is reported using the on_property_subscribed() callback.
:param property_id: The ID of the property to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_property_frame(property_id))
def subscribe_to_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to subscribe to multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the subscribe request is reported using the on_properties_subscribed() callback.
:param property_ids: The list of IDs of the properties to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_properties_frame(property_ids))
def unsubscribe_from_property(self, property_id: str) -> None:
"""
This method can be used to unsubscribe from a property on the connected gateway. The property is identified by the property_id parameter.
The status of the unsubscribe request is reported using the on_property_unsubscribed() callback.
:param property_id: The ID of the property to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_property_frame(property_id))
def unsubscribe_from_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to unsubscribe from multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the unsubscribe request is reported using the on_properties_unsubscribed() callback.
:param property_ids: The list of IDs of the properties to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_properties_frame(property_ids))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> None:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
The status of the operation is the list of properties for whom logged data is available are reported using the on_datalog_properties_read() callback.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
def read_datalog(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
The status of this operation and the respective values are reported using the on_datalog_read_csv() callback.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
The read_messages method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
The status of this operation and the retrieved messages are reported using the on_messages_read() callback.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_messages_frame(from_, to, limit))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Close the WebSocket
self.__ws.close()
def __ensure_in_state(self, state: SIConnectionState) -> None:
if self.__state != state:
raise SIProtocolError("invalid client state")
def __on_open(self, ws) -> None:
# Change state to AUTHORIZING.
self.__state = SIConnectionState.AUTHORIZING
# Encode and send AUTHORIZE message to gateway.
if self.__user is None or self.__password is None:
self.__ws.send(super(SIAsyncGatewayClient, self).encode_authorize_frame_without_credentials())
else:
self.__ws.send(super(SIAsyncGatewayClient, self).encode_authorize_frame_with_credentials(self.__user, self.__password))
def __on_message(self, ws, frame: str) -> None:
# Determine the actual command.
command = super(SIAsyncGatewayClient, self).peek_frame_command(frame)
try:
# In AUTHORIZE state we only handle AUTHORIZED messages.
if self.__state == SIConnectionState.AUTHORIZING:
self.__access_level, self.__gateway_version = super(SIAsyncGatewayClient, self).decode_authorized_frame(frame)
# Change state to CONNECTED.
self.__state = SIConnectionState.CONNECTED
# Call callback if present.
if callable(self.on_connected):
self.on_connected(self.__access_level, self.__gateway_version)
# In CONNECTED state we handle all messages except the AUTHORIZED message.
else:
if command == 'ERROR':
if callable(self.on_error):
_, headers, _ = super(SIAsyncGatewayClient, self).decode_frame(frame)
self.on_error(SIProtocolError(headers['reason']))
elif command == 'ENUMERATED':
status, device_count = super(SIAsyncGatewayClient, self).decode_enumerated_frame(frame)
if callable(self.on_enumerated):
self.on_enumerated(status, device_count)
elif command == 'DESCRIPTION':
status, id_, description = super(SIAsyncGatewayClient, self).decode_description_frame(frame)
if callable(self.on_description):
self.on_description(status, id_, description)
elif command == 'PROPERTIES FOUND':
status, id_, count, list = super(SIAsyncGatewayClient, self).decode_properties_found_frame(frame)
if callable(self.on_properties_found):
self.on_properties_found(status, id_, count, list)
elif command == 'PROPERTY READ':
result = super(SIAsyncGatewayClient, self).decode_property_read_frame(frame)
if callable(self.on_property_read):
self.on_property_read(result.status, result.id, result.value)
elif command == 'PROPERTIES READ':
results = super(SIAsyncGatewayClient, self).decode_properties_read_frame(frame)
if callable(self.on_properties_read):
self.on_properties_read(results)
elif command == 'PROPERTY WRITTEN':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_written_frame(frame)
if callable(self.on_property_written):
self.on_property_written(status, id_)
elif command == 'PROPERTY SUBSCRIBED':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_subscribed_frame(frame)
if callable(self.on_property_subscribed):
self.on_property_subscribed(status, id_)
elif command == 'PROPERTIES SUBSCRIBED':
statuses = super(SIAsyncGatewayClient, self).decode_properties_subscribed_frame(frame)
if callable(self.on_properties_subscribed):
self.on_properties_subscribed(statuses)
elif command == 'PROPERTY UNSUBSCRIBED':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_unsubscribed_frame(frame)
if callable(self.on_property_unsubscribed):
self.on_property_unsubscribed(status, id_)
elif command == 'PROPERTIES UNSUBSCRIBED':
statuses = super(SIAsyncGatewayClient, self).decode_properties_unsubscribed_frame(frame)
if callable(self.on_properties_unsubscribed):
self.on_properties_unsubscribed(statuses)
elif command == 'PROPERTY UPDATE':
id_, value = super(SIAsyncGatewayClient, self).decode_property_update_frame(frame)
if callable(self.on_property_updated):
self.on_property_updated(id_, value)
elif command == 'DATALOG READ':
status, id_, count, values = super(SIAsyncGatewayClient, self).decode_datalog_read_frame(frame)
if id_ is None:
if callable(self.on_datalog_properties_read):
self.on_datalog_properties_read(status, values.splitlines())
else:
if callable(self.on_datalog_read_csv):
self.on_datalog_read_csv(status, id_, count, values)
elif command == 'DEVICE MESSAGE':
message = super(SIAsyncGatewayClient, self).decode_device_message_frame(frame)
if callable(self.on_device_message):
self.on_device_message(message)
elif command == 'MESSAGES READ':
status, count, messages = super(SIAsyncGatewayClient, self).decode_messages_read_frame(frame)
if callable(self.on_messages_read):
self.on_messages_read(status, count, messages)
else:
if callable(self.on_error):
self.on_error(SIProtocolError('unsupported frame command: {command}'.format(command=command)))
except SIProtocolError as error:
if callable(self.on_error):
self.on_error(error)
if self.__state == SIConnectionState.AUTHORIZING:
self.__ws.close()
self.__state = SIConnectionState.DISCONNECTED
def __on_error(self, ws, error: Exception) -> None:
if callable(self.on_error):
self.on_error(SIProtocolError(error.args[1]))
def __on_close(self, ws) -> None:
# Change state to DISCONNECTED.
self.__state = SIConnectionState.DISCONNECTED
# Change access level to NONE.
self.__access_level = SIAccessLevel.NONE
# Call callback.
if callable(self.on_disconnected):
self.on_disconnected()
# Wait for the end of the thread.
self.__thread.join()
| 33,241 | 1,476 | 558 |
19edf52f2ee673c122ec1d673f22633ce7ecfbaf | 1,675 | py | Python | py_neuromodulation/nm_fft.py | neuromodulation/py_neuromodulation | 1e8505d4324c9d2f37e5d56629a2ee418ea0b12b | [
"MIT"
] | 7 | 2021-05-12T02:13:12.000Z | 2022-02-28T13:14:23.000Z | py_neuromodulation/nm_fft.py | neuromodulation/py_neuromodulation | 1e8505d4324c9d2f37e5d56629a2ee418ea0b12b | [
"MIT"
] | 98 | 2021-03-26T19:04:20.000Z | 2022-03-15T09:07:29.000Z | py_neuromodulation/nm_fft.py | neuromodulation/py_neuromodulation | 1e8505d4324c9d2f37e5d56629a2ee418ea0b12b | [
"MIT"
] | 1 | 2021-07-16T10:39:01.000Z | 2021-07-16T10:39:01.000Z | from scipy import fft
import numpy as np
def get_fft_features(features_, s, fs, data, KF_dict, ch, f_ranges, f_band_names):
"""Get FFT features for different f_ranges. Data needs to be a batch of 1s length
Parameters
----------
features_ : dict
feature dictionary
s : dict
settings dict
fs : int/float
sampling frequency
data : np.array
data for single channel, assumed to be one second
KF_dict : dict
Kalmanfilter dictionaries, channel, bandpower and frequency
band specific
ch : string
channel name
f_ranges : list
list of list with respective frequency band ranges
f_band_names : list
list of frequency band names
"""
data = data[-int(s["fft_settings"]["windowlength"]*fs/1000):]
Z = np.abs(fft.rfft(data))
f = np.arange(0, int(s["fft_settings"]["windowlength"]/2)+1, 1)
for idx_fband, f_range in enumerate(f_ranges):
fband = f_band_names[idx_fband]
idx_range = np.where((f >= f_range[0]) & (f <= f_range[1]))[0]
feature_calc = np.mean(Z[idx_range])
if s["fft_settings"]["log_transform"]:
feature_calc = np.log(feature_calc)
if s["methods"]["kalman_filter"] is True:
if fband in s["kalman_filter_settings"]["frequency_bands"]:
KF_name = '_'.join([ch, fband])
KF_dict[KF_name].predict()
KF_dict[KF_name].update(feature_calc)
feature_calc = KF_dict[KF_name].x[0] # filtered signal
feature_name = '_'.join([ch, 'fft', fband])
features_[feature_name] = feature_calc
return features_
| 32.843137 | 85 | 0.616716 | from scipy import fft
import numpy as np
def get_fft_features(features_, s, fs, data, KF_dict, ch, f_ranges, f_band_names):
"""Get FFT features for different f_ranges. Data needs to be a batch of 1s length
Parameters
----------
features_ : dict
feature dictionary
s : dict
settings dict
fs : int/float
sampling frequency
data : np.array
data for single channel, assumed to be one second
KF_dict : dict
Kalmanfilter dictionaries, channel, bandpower and frequency
band specific
ch : string
channel name
f_ranges : list
list of list with respective frequency band ranges
f_band_names : list
list of frequency band names
"""
data = data[-int(s["fft_settings"]["windowlength"]*fs/1000):]
Z = np.abs(fft.rfft(data))
f = np.arange(0, int(s["fft_settings"]["windowlength"]/2)+1, 1)
for idx_fband, f_range in enumerate(f_ranges):
fband = f_band_names[idx_fband]
idx_range = np.where((f >= f_range[0]) & (f <= f_range[1]))[0]
feature_calc = np.mean(Z[idx_range])
if s["fft_settings"]["log_transform"]:
feature_calc = np.log(feature_calc)
if s["methods"]["kalman_filter"] is True:
if fband in s["kalman_filter_settings"]["frequency_bands"]:
KF_name = '_'.join([ch, fband])
KF_dict[KF_name].predict()
KF_dict[KF_name].update(feature_calc)
feature_calc = KF_dict[KF_name].x[0] # filtered signal
feature_name = '_'.join([ch, 'fft', fband])
features_[feature_name] = feature_calc
return features_
| 0 | 0 | 0 |
0086c54dad5b6fdac6f2eec722f377e833b2a519 | 2,099 | py | Python | kubeasy_sdk/service.py | dylanturn/kubeasy | 7c9fd62e22ecd89632f5aa7a7a17fda24ebe7490 | [
"Apache-2.0"
] | null | null | null | kubeasy_sdk/service.py | dylanturn/kubeasy | 7c9fd62e22ecd89632f5aa7a7a17fda24ebe7490 | [
"Apache-2.0"
] | null | null | null | kubeasy_sdk/service.py | dylanturn/kubeasy | 7c9fd62e22ecd89632f5aa7a7a17fda24ebe7490 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from enum import Enum
from imports import k8s
from cdk8s import Chart
from kubeasy_sdk.deployment import Deployment
from kubeasy_sdk.utils.resource import Rendered
from kubeasy_sdk.utils.networking.service_port import ServicePort
from kubeasy_sdk.utils.collections.service_ports import ServicePorts
# Service Labels
# Service Selectors
| 28.364865 | 110 | 0.738923 | from __future__ import annotations
from enum import Enum
from imports import k8s
from cdk8s import Chart
from kubeasy_sdk.deployment import Deployment
from kubeasy_sdk.utils.resource import Rendered
from kubeasy_sdk.utils.networking.service_port import ServicePort
from kubeasy_sdk.utils.collections.service_ports import ServicePorts
class ServiceType(Enum):
CLUSTERIP = "ClusterIP"
LOADBALANCER = "LoadBalancer"
NODEPORT = "NodePort"
def k8s_name(self) -> str:
return '{0}'.format(self.value)
class Service(Rendered):
def __init__(self, name: str, deployment: Deployment):
func_locals = dict(locals())
del func_locals['self']
super().__init__(**func_locals)
self.name = name
self.deployment = deployment
self.environment = deployment.environment
self.namespace = deployment.namespace
self.labels = {}
self.selector = {}
self.service_type = ServiceType.CLUSTERIP
self.ports = ServicePorts()
def set_type(self, service_type: ServiceType) -> Service:
self.service_type = service_type
return self
def add_port(self, service_port: ServicePort) -> ServicePort:
return self.ports.add_port(service_port).set_service_name(self.name)
# Service Labels
def set_labels(self, labels: dict[str]) -> Service:
self.labels = labels
return self
def add_label(self, key: str, value: str) -> Service:
self.labels[key] = value
return self
# Service Selectors
def set_selectors(self, selectors: dict[str]) -> Service:
self.selector = selectors
return self
def add_selector(self, selector_key: str, selector_value: str) -> Service:
self.selector[selector_key] = selector_value
return self
def render_k8s_resource(self, chart: Chart) -> k8s.Service:
service_ports = ServicePort.render_port_list(self.ports)
svc_spec = k8s.ServiceSpec(type=self.service_type.k8s_name(), ports=service_ports, selector=self.selector)
object_meta = k8s.ObjectMeta(name=self.name, labels=self.labels)
return k8s.Service(scope=chart, name=self.name, metadata=object_meta, spec=svc_spec)
| 1,356 | 113 | 245 |
6176f54a26a8cd4a12b47289cd60ad078137a050 | 1,602 | py | Python | day14/solution.py | Mark-Simulacrum/advent-of-code-2015 | 8b842eba07a68fd4c2baad60f4ea204429d62f35 | [
"MIT"
] | 1 | 2015-12-20T23:59:34.000Z | 2015-12-20T23:59:34.000Z | day14/solution.py | Mark-Simulacrum/advent-of-code-2015 | 8b842eba07a68fd4c2baad60f4ea204429d62f35 | [
"MIT"
] | null | null | null | day14/solution.py | Mark-Simulacrum/advent-of-code-2015 | 8b842eba07a68fd4c2baad60f4ea204429d62f35 | [
"MIT"
] | null | null | null | import operator
import re
data = open("data", "r").read()
seconds = 2503
reindeers = map(parseDataLine, data.split("\n"))
reindeerNames = {}
for reindeer in reindeers:
reindeerNames[reindeer[0]] = 0
for second in range(seconds + 1):
inLeads = inLeadAtSecond(reindeers, second)
for inLead in inLeads:
reindeerNames[inLead[1][0]] += 1
a = inLeadAtSecond(reindeers, seconds)[0]
print "Part 1, in lead:", (a[1][0], a[0])
print "Part 2, in lead:", list(reversed(sorted(reindeerNames.items(), key=operator.itemgetter(1))))[0]
| 23.910448 | 102 | 0.708489 | import operator
import re
data = open("data", "r").read()
def parseDataLine(line):
name, kmPerSec, speedTime, restTime = re.search(
r'(\w+) .* (\d+) km/s for (\d+) .* (\d+)', line).groups()
kmPerSec = int(kmPerSec)
speedTime = int(speedTime)
restTime = int(restTime)
return ( name, kmPerSec, speedTime, restTime )
def getDistanceAtTime(time, reindeerObject):
name, kmPerSec, speedTime, restTime = reindeerObject
distanceTraveled = 0
timeSpeeding = 0
timeResting = 0
isResting = False
for _ in range(time):
if timeSpeeding == speedTime:
isResting = True
timeSpeeding = 0
elif timeResting == restTime:
isResting = False
timeResting = 0
if isResting:
timeResting += 1
else:
distanceTraveled += kmPerSec
timeSpeeding += 1
return ( distanceTraveled, reindeerObject )
def inLeadAtSecond(reindeers, second):
distanceObjects = map(lambda reindeerObj: getDistanceAtTime(second, reindeerObj), reindeers)
sortedDistObj = list(reversed(sorted(distanceObjects)))
if sortedDistObj[0] == sortedDistObj[1]:
return [sortedDistObj[0][0], sortedDistObj[1][0]]
return [sortedDistObj[0]]
seconds = 2503
reindeers = map(parseDataLine, data.split("\n"))
reindeerNames = {}
for reindeer in reindeers:
reindeerNames[reindeer[0]] = 0
for second in range(seconds + 1):
inLeads = inLeadAtSecond(reindeers, second)
for inLead in inLeads:
reindeerNames[inLead[1][0]] += 1
a = inLeadAtSecond(reindeers, seconds)[0]
print "Part 1, in lead:", (a[1][0], a[0])
print "Part 2, in lead:", list(reversed(sorted(reindeerNames.items(), key=operator.itemgetter(1))))[0]
| 1,001 | 0 | 69 |
8a523dc01e2cfb12326fe5012d648b8bc8959329 | 1,834 | py | Python | samples/quandl_sample.py | daxlab/pyalgotrade | 5517c2644da97e7ef143d344d813232d6845a29f | [
"Apache-2.0"
] | 1,000 | 2016-01-26T12:10:11.000Z | 2022-03-01T23:59:50.000Z | samples/quandl_sample.py | leeong05/pyalgotrade | 5578596f2442aeb3f1a777a79f82e041c1609f5f | [
"Apache-2.0"
] | 22 | 2016-01-26T15:14:09.000Z | 2019-01-30T02:36:38.000Z | samples/quandl_sample.py | leeong05/pyalgotrade | 5578596f2442aeb3f1a777a79f82e041c1609f5f | [
"Apache-2.0"
] | 613 | 2016-01-27T01:02:30.000Z | 2022-03-21T01:38:58.000Z | from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.tools import quandl
from pyalgotrade.feed import csvfeed
import datetime
if __name__ == "__main__":
main(True)
| 33.345455 | 134 | 0.703381 | from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.tools import quandl
from pyalgotrade.feed import csvfeed
import datetime
class MyStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, quandlFeed, instrument):
strategy.BacktestingStrategy.__init__(self, feed)
self.setUseAdjustedValues(True)
self.__instrument = instrument
# It is VERY important to add the the extra feed to the event dispatch loop before
# running the strategy.
self.getDispatcher().addSubject(quandlFeed)
# Subscribe to events from the Quandl feed.
quandlFeed.getNewValuesEvent().subscribe(self.onQuandlData)
def onQuandlData(self, dateTime, values):
self.info(values)
def onBars(self, bars):
self.info(bars[self.__instrument].getAdjClose())
def main(plot):
instruments = ["GORO"]
# Download GORO bars using WIKI source code.
feed = quandl.build_feed("WIKI", instruments, 2006, 2012, ".")
# Load Quandl CSV downloaded from http://www.quandl.com/OFDP-Open-Financial-Data-Project/GOLD_2-LBMA-Gold-Price-London-Fixings-P-M
quandlFeed = csvfeed.Feed("Date", "%Y-%m-%d")
quandlFeed.setDateRange(datetime.datetime(2006, 1, 1), datetime.datetime(2012, 12, 31))
quandlFeed.addValuesFromCSV("quandl_gold_2.csv")
myStrategy = MyStrategy(feed, quandlFeed, instruments[0])
if plot:
plt = plotter.StrategyPlotter(myStrategy, True, False, False)
plt.getOrCreateSubplot("quandl").addDataSeries("USD", quandlFeed["USD"])
plt.getOrCreateSubplot("quandl").addDataSeries("EUR", quandlFeed["EUR"])
plt.getOrCreateSubplot("quandl").addDataSeries("GBP", quandlFeed["GBP"])
myStrategy.run()
if plot:
plt.plot()
if __name__ == "__main__":
main(True)
| 1,481 | 26 | 126 |
ef7deba50f2dc714d92981f52b24ec7718b9a073 | 1,629 | py | Python | tests/unit/test_notebooks.py | Saransh-cpp/liionpack | 82ab00ad257ccb2bc8dbcb71bc08baa30fa9ed43 | [
"MIT"
] | 23 | 2021-09-28T15:48:48.000Z | 2022-03-15T10:34:35.000Z | tests/unit/test_notebooks.py | Saransh-cpp/liionpack | 82ab00ad257ccb2bc8dbcb71bc08baa30fa9ed43 | [
"MIT"
] | 131 | 2021-09-29T09:18:50.000Z | 2022-03-03T06:09:52.000Z | tests/unit/test_notebooks.py | Saransh-cpp/liionpack | 82ab00ad257ccb2bc8dbcb71bc08baa30fa9ed43 | [
"MIT"
] | 17 | 2021-09-29T13:14:00.000Z | 2022-03-24T11:01:19.000Z | #
# Tests jupyter notebooks
#
import os
import subprocess
import unittest
import nbconvert
import liionpack as lp
if __name__ == "__main__":
unittest.main()
| 31.941176 | 151 | 0.544506 | #
# Tests jupyter notebooks
#
import os
import subprocess
import unittest
import nbconvert
import liionpack as lp
class TestNotebooks(unittest.TestCase):
def test_notebooks(self):
examples_folder = os.path.join(lp.ROOT_DIR, "docs", "examples")
for filename in os.listdir(examples_folder):
if os.path.splitext(filename)[1] == ".ipynb":
print("-" * 80)
print("Testing notebook:", filename)
print("-" * 80)
# Load notebook, convert to python
path = os.path.join(examples_folder, filename)
e = nbconvert.exporters.PythonExporter()
code, __ = e.from_filename(path)
# Make sure the notebook has pip install command, for using Google Colab
self.assertIn(
"pip install -q git+https://github.com/pybamm-team/liionpack.git@main", # noqa: E501
code,
"Installation command '!pip install -q git+https://github.com/pybamm-team/liionpack.git@main' not found in notebook", # noqa: E501
)
# Comment out the pip install command to avoid reinstalling
code = code.replace("get_ipython().system('pip", "#")
# Run in subprocess
cmd = ["python", "-c", code]
p = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self.assertEqual(p.returncode, 0)
if __name__ == "__main__":
unittest.main()
| 1,396 | 18 | 49 |
b9da83eaab411be02a255e3869cef457be65eecc | 169 | py | Python | frappe/patches/v7_0/update_send_after_in_bulk_email.py | Nxweb-in/frappe | 56b3eb52bf56dd71bee29fde3ed28ed9c6d15947 | [
"MIT"
] | 1 | 2021-06-03T07:04:48.000Z | 2021-06-03T07:04:48.000Z | frappe/patches/v7_0/update_send_after_in_bulk_email.py | Nxweb-in/frappe | 56b3eb52bf56dd71bee29fde3ed28ed9c6d15947 | [
"MIT"
] | null | null | null | frappe/patches/v7_0/update_send_after_in_bulk_email.py | Nxweb-in/frappe | 56b3eb52bf56dd71bee29fde3ed28ed9c6d15947 | [
"MIT"
] | null | null | null |
import frappe
from frappe.utils import now_datetime | 28.166667 | 100 | 0.781065 |
import frappe
from frappe.utils import now_datetime
def execute():
frappe.db.sql('update `tabEmail Queue` set send_after=%s where send_after is null', now_datetime()) | 94 | 0 | 23 |
8284ceab076e729a53283aca651148ca390bb5ce | 2,485 | py | Python | app.py | andresmvidaurre/Project | a3c1193f8c6aac13218715a8ad98479c60f0168a | [
"MIT"
] | 1 | 2022-03-04T22:21:54.000Z | 2022-03-04T22:21:54.000Z | app.py | andresmvidaurre/Project | a3c1193f8c6aac13218715a8ad98479c60f0168a | [
"MIT"
] | null | null | null | app.py | andresmvidaurre/Project | a3c1193f8c6aac13218715a8ad98479c60f0168a | [
"MIT"
] | 2 | 2022-02-02T19:24:12.000Z | 2022-02-09T02:33:15.000Z | from flask import Flask, render_template, request
import pickle
import pandas as pd
# app instantiation
APP = Flask(__name__)
# Load model
with open("xgb_class_1.pkl", "rb") as f:
model = pickle.load(f)
@APP.route('/')
def Home_page():
'''Landing page to the Kickstarter Prediction project'''
return render_template('landing.html', title='Home')
@APP.route('/prediction', methods= ["POST"])
| 36.544118 | 125 | 0.651911 | from flask import Flask, render_template, request
import pickle
import pandas as pd
# app instantiation
APP = Flask(__name__)
# Load model
with open("xgb_class_1.pkl", "rb") as f:
model = pickle.load(f)
def create_project_df(name, blurb, goal, category, length):
# function to process user input and make a dataframe
# list all columns needed for model
cols = ['goal', 'name_len', 'blurb_len', 'category_academic',
'category_apps', 'category_blues', 'category_comedy',
'category_experimental', 'category_festivals', 'category_flight',
'category_gadgets', 'category_hardware', 'category_immersive',
'category_makerspaces', 'category_musical', 'category_places',
'category_plays', 'category_restaurants', 'category_robots',
'category_shorts', 'category_software', 'category_sound',
'category_spaces','category_thrillers', 'category_wearables',
'category_web','category_webseries', 'campaign_length_days']
nlen = len(name.split())
blen = len(blurb.split())
cat = "category_" + category.lower()
# Create a dataframe with 1 row with only 0's
ks = pd.DataFrame(columns = cols)
ks.loc[len(ks.index)] = 0
# Add our variables to the dataframe
ks['goal'] = int(goal)
ks['name_len'] = int(nlen)
ks['blurb_len'] = int(blen)
ks['campaign_length_days'] = int(length)
# "OneHotEncode" our category
for col in ks.columns:
if cat == col:
ks[col] = 1
return ks
@APP.route('/')
def Home_page():
'''Landing page to the Kickstarter Prediction project'''
return render_template('landing.html', title='Home')
@APP.route('/prediction', methods= ["POST"])
def prediction():
prj_name = request.form['prj']
prj_desc = request.form['blurb']
prj_goal = request.form['goal']
prj_category = request.form['category']
prj_length = request.form['length']
ks = create_project_df(prj_name, prj_desc, prj_goal, prj_category, prj_length)
predify = model.predict(ks)
if predify == [0]:
pred_result = 'an utter failure. Re-think your life, and may God have mercy on your soul.'
if predify == [1]:
pred_result = 'a successful individual. Revel in your glory, and be kind as you stare down on those less fortunate.'
return render_template('prediction.html',
title="Prediction",
prediction=pred_result)
| 2,033 | 0 | 45 |
2cdf216954cfff6bf8963150ed97dfed0f7362b0 | 1,125 | py | Python | leetcode/implement_queue_using_stacks.py | zhangao0086/Python-Algorithm | 981c875b2e0f30619bd3d44e1f2bd0c47d1464a2 | [
"MIT"
] | 3 | 2021-05-21T12:55:14.000Z | 2022-02-01T16:21:30.000Z | leetcode/implement_queue_using_stacks.py | zhangao0086/Python-Algorithm | 981c875b2e0f30619bd3d44e1f2bd0c47d1464a2 | [
"MIT"
] | null | null | null | leetcode/implement_queue_using_stacks.py | zhangao0086/Python-Algorithm | 981c875b2e0f30619bd3d44e1f2bd0c47d1464a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
if __name__ == '__main__':
obj = MyQueue()
obj.push(1)
obj.push(2)
assert obj.peek() == 1
assert obj.pop() == 1
assert obj.empty() == False | 22.5 | 76 | 0.524444 | #!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack = []
self.reverse_stack = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
self.stack.append(x)
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
self.peek()
return self.reverse_stack.pop()
def peek(self) -> int:
"""
Get the front element.
"""
if not self.reverse_stack:
while self.stack:
self.reverse_stack.append(self.stack.pop())
return self.reverse_stack[-1]
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return not (self.stack or self.reverse_stack)
if __name__ == '__main__':
obj = MyQueue()
obj.push(1)
obj.push(2)
assert obj.peek() == 1
assert obj.pop() == 1
assert obj.empty() == False | 0 | 873 | 23 |
53f979a770bf36c7d39eb7c9093dddfa222161db | 20,464 | py | Python | Tests/interop/net/field/test_static_fields.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | null | null | null | Tests/interop/net/field/test_static_fields.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | null | null | null | Tests/interop/net/field/test_static_fields.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | 1 | 2019-09-18T05:37:46.000Z | 2019-09-18T05:37:46.000Z | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
'''
#------------------------------------------------------------------------------
from iptest import *
from iptest.assert_util import *
skiptest("silverlight")
add_clr_assemblies("fieldtests", "typesamples")
if options.RUN_TESTS: #TODO - bug when generating Pydoc
from Merlin.Testing.FieldTest import *
from Merlin.Testing.TypeSample import *
types = [
Struct,
GenericStruct[int],
GenericStruct[SimpleClass],
Class,
GenericClass[SimpleStruct],
GenericClass[SimpleClass],
]
for i in range(len(types)):
exec("def test_%s_get_by_instance(): _test_get_by_instance(types[%s])" % (i, i))
exec("def test_%s_get_by_type(): _test_get_by_type(types[%s])" % (i, i))
exec("def test_%s_get_by_descriptor(): _test_get_by_descriptor(types[%s])" % (i, i))
exec("def test_%s_set_by_instance(): _test_set_by_instance(types[%s])" % (i, i))
exec("def test_%s_set_by_type(): _test_set_by_type(types[%s])" % (i, i))
exec("def test_%s_set_by_descriptor(): _test_set_by_descriptor(types[%s])" % (i, i))
exec("def test_%s_delete_by_type(): _test_delete_by_type(types[%s])" % (i, i))
exec("def test_%s_delete_by_instance(): _test_delete_by_instance(types[%s])" % (i, i))
exec("def test_%s_delete_by_descriptor(): _test_delete_by_descriptor(types[%s])" % (i, i))
@skip("multiple_execute")
run_test(__name__)
| 44.008602 | 115 | 0.722097 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
'''
#------------------------------------------------------------------------------
from iptest import *
from iptest.assert_util import *
skiptest("silverlight")
add_clr_assemblies("fieldtests", "typesamples")
if options.RUN_TESTS: #TODO - bug when generating Pydoc
from Merlin.Testing.FieldTest import *
from Merlin.Testing.TypeSample import *
def _test_get_by_instance(current_type):
o = current_type()
o.SetStaticFields()
AreEqual(o.StaticByteField, 0)
AreEqual(o.StaticSByteField, 1)
AreEqual(o.StaticUInt16Field, 2)
AreEqual(o.StaticInt16Field, 3)
AreEqual(o.StaticUInt32Field, 4)
AreEqual(o.StaticInt32Field, 5)
AreEqual(o.StaticUInt64Field, 6)
AreEqual(o.StaticInt64Field, 7)
AreEqual(o.StaticDoubleField, 8)
AreEqual(o.StaticSingleField, 9)
AreEqual(o.StaticDecimalField, 10)
AreEqual(o.StaticCharField, 'a')
AreEqual(o.StaticBooleanField, True)
AreEqual(o.StaticStringField, 'testing')
AreEqual(o.StaticObjectField.Flag, 1111)
AreEqual(o.StaticEnumField, EnumInt64.B)
AreEqual(o.StaticDateTimeField, System.DateTime(50000))
AreEqual(o.StaticSimpleStructField.Flag, 1234)
AreEqual(o.StaticSimpleGenericStructField.Flag, 32)
AreEqual(o.StaticNullableStructNotNullField.Flag, 56)
AreEqual(o.StaticNullableStructNullField, None)
AreEqual(o.StaticSimpleClassField.Flag, 54)
AreEqual(o.StaticSimpleGenericClassField.Flag, "string")
AreEqual(o.StaticSimpleInterfaceField.Flag, 87)
def _test_get_by_type(current_type):
current_type.SetStaticFields()
AreEqual(current_type.StaticByteField, 0)
AreEqual(current_type.StaticSByteField, 1)
AreEqual(current_type.StaticUInt16Field, 2)
AreEqual(current_type.StaticInt16Field, 3)
AreEqual(current_type.StaticUInt32Field, 4)
AreEqual(current_type.StaticInt32Field, 5)
AreEqual(current_type.StaticUInt64Field, 6)
AreEqual(current_type.StaticInt64Field, 7)
AreEqual(current_type.StaticDoubleField, 8)
AreEqual(current_type.StaticSingleField, 9)
AreEqual(current_type.StaticDecimalField, 10)
AreEqual(current_type.StaticCharField, 'a')
AreEqual(current_type.StaticBooleanField, True)
AreEqual(current_type.StaticStringField, 'testing')
AreEqual(current_type.StaticObjectField.Flag, 1111)
AreEqual(current_type.StaticEnumField, EnumInt64.B)
AreEqual(current_type.StaticDateTimeField, System.DateTime(50000))
AreEqual(current_type.StaticSimpleStructField.Flag, 1234)
AreEqual(current_type.StaticSimpleGenericStructField.Flag, 32)
AreEqual(current_type.StaticNullableStructNotNullField.Flag, 56)
AreEqual(current_type.StaticNullableStructNullField, None)
AreEqual(current_type.StaticSimpleClassField.Flag, 54)
AreEqual(current_type.StaticSimpleGenericClassField.Flag, "string")
AreEqual(current_type.StaticSimpleInterfaceField.Flag, 87)
def _test_get_by_descriptor(current_type):
current_type.SetStaticFields()
o = current_type()
AreEqual(current_type.__dict__['StaticByteField'].__get__(None, current_type), 0)
AreEqual(current_type.__dict__['StaticSByteField'].__get__(o, current_type), 1)
AreEqual(current_type.__dict__['StaticUInt16Field'].__get__(None, current_type), 2)
AreEqual(current_type.__dict__['StaticInt16Field'].__get__(o, current_type), 3)
AreEqual(current_type.__dict__['StaticUInt32Field'].__get__(None, current_type), 4)
AreEqual(current_type.__dict__['StaticInt32Field'].__get__(o, current_type), 5)
AreEqual(current_type.__dict__['StaticUInt64Field'].__get__(None, current_type), 6)
AreEqual(current_type.__dict__['StaticInt64Field'].__get__(o, current_type), 7)
AreEqual(current_type.__dict__['StaticDoubleField'].__get__(None, current_type), 8)
AreEqual(current_type.__dict__['StaticSingleField'].__get__(o, current_type), 9)
AreEqual(current_type.__dict__['StaticDecimalField'].__get__(None, current_type), 10)
AreEqual(current_type.__dict__['StaticCharField'].__get__(o, current_type), 'a')
AreEqual(current_type.__dict__['StaticBooleanField'].__get__(None, current_type), True)
AreEqual(current_type.__dict__['StaticStringField'].__get__(o, current_type), 'testing')
AreEqual(current_type.__dict__['StaticObjectField'].__get__(None, current_type).Flag, 1111)
AreEqual(current_type.__dict__['StaticEnumField'].__get__(o, current_type), EnumInt64.B)
AreEqual(current_type.__dict__['StaticDateTimeField'].__get__(None, current_type), System.DateTime(50000))
AreEqual(current_type.__dict__['StaticSimpleStructField'].__get__(o, current_type).Flag, 1234)
AreEqual(current_type.__dict__['StaticSimpleGenericStructField'].__get__(None, current_type).Flag, 32)
AreEqual(current_type.__dict__['StaticNullableStructNotNullField'].__get__(o, current_type).Flag, 56)
AreEqual(current_type.__dict__['StaticNullableStructNullField'].__get__(None, current_type), None)
AreEqual(current_type.__dict__['StaticSimpleClassField'].__get__(o, current_type).Flag, 54)
AreEqual(current_type.__dict__['StaticSimpleGenericClassField'].__get__(None, current_type).Flag, "string")
AreEqual(current_type.__dict__['StaticSimpleInterfaceField'].__get__(o, current_type).Flag, 87)
# TODO (pass in other values to __get__)
def _test_verify(current_type):
AreEqual(current_type.StaticByteField, 5)
AreEqual(current_type.StaticSByteField, 10)
AreEqual(current_type.StaticUInt16Field, 20)
AreEqual(current_type.StaticInt16Field, 30)
AreEqual(current_type.StaticUInt32Field, 40)
AreEqual(current_type.StaticInt32Field, 50)
AreEqual(current_type.StaticUInt64Field, 60)
AreEqual(current_type.StaticInt64Field, 70)
AreEqual(current_type.StaticDoubleField, 80)
AreEqual(current_type.StaticSingleField, 90)
AreEqual(current_type.StaticDecimalField, 100)
AreEqual(current_type.StaticCharField, 'd')
AreEqual(current_type.StaticBooleanField, False)
AreEqual(current_type.StaticStringField, 'TESTING')
AreEqual(current_type.StaticObjectField, "number_to_string")
AreEqual(current_type.StaticEnumField, EnumInt64.C)
AreEqual(current_type.StaticDateTimeField, System.DateTime(500000))
AreEqual(current_type.StaticSimpleStructField.Flag, 12340)
AreEqual(current_type.StaticSimpleGenericStructField.Flag, 320)
AreEqual(current_type.StaticNullableStructNotNullField, None)
AreEqual(current_type.StaticNullableStructNullField.Flag, 650)
AreEqual(current_type.StaticSimpleClassField.Flag, 540)
AreEqual(current_type.StaticSimpleGenericClassField.Flag, "STRING")
AreEqual(current_type.StaticSimpleInterfaceField.Flag, 78)
def _test_set_by_instance(current_type):
current_type.SetStaticFields()
o = current_type()
# pass correct values
def f1(): o.StaticByteField = 5
def f2(): o.StaticSByteField = 10
def f3(): o.StaticUInt16Field = 20
def f4(): o.StaticInt16Field = 30
def f5(): o.StaticUInt32Field = 40
def f6(): o.StaticInt32Field = 50
def f7(): o.StaticUInt64Field = 60
def f8(): o.StaticInt64Field = 70
def f9(): o.StaticDoubleField = 80
def f10(): o.StaticSingleField = 90
def f11(): o.StaticDecimalField = 100
def f12(): o.StaticCharField = 'd'
def f13(): o.StaticBooleanField = False
def f14(): o.StaticStringField = 'testing'.upper()
def f15(): o.StaticObjectField = "number_to_string"
def f16(): o.StaticEnumField = EnumInt64.C
def f17(): o.StaticDateTimeField = System.DateTime(500000)
def f18(): o.StaticSimpleStructField = SimpleStruct(12340)
def f19(): o.StaticSimpleGenericStructField = SimpleGenericStruct[System.UInt16](320)
def f20(): o.StaticNullableStructNotNullField = None
def f21(): o.StaticNullableStructNullField = SimpleStruct(650)
def f22(): o.StaticSimpleClassField = SimpleClass(540)
def f23(): o.StaticSimpleGenericClassField = SimpleGenericClass[str]("string".upper())
def f24(): o.StaticSimpleInterfaceField = ClassImplementSimpleInterface(78)
funcs = [ eval("f%s" % i) for i in range(1, 25) ]
for f in funcs: f()
_test_verify(current_type)
# set values which need conversion.
o.StaticInt32Field = 100L
AreEqual(current_type.StaticInt32Field, 100)
o.StaticInt32Field = 10.01
AreEqual(current_type.StaticInt32Field, 10)
# set bad values
def f1(): o.StaticInt32Field = "abc"
def f2(): o.StaticEnumField = 3
for f in [f1, f2]: AssertError(TypeError, f)
def _test_set_by_type(current_type):
current_type.SetStaticFields()
# pass correct values
current_type.StaticByteField = 5
current_type.StaticSByteField = 10
current_type.StaticUInt16Field = 20
current_type.StaticInt16Field = 30
current_type.StaticUInt32Field = 40
current_type.StaticInt32Field = 50
current_type.StaticUInt64Field = 60
current_type.StaticInt64Field = 70
current_type.StaticDoubleField = 80
current_type.StaticSingleField = 90
current_type.StaticDecimalField = 100
current_type.StaticCharField = 'd'
current_type.StaticBooleanField = False
current_type.StaticStringField = 'testing'.upper()
current_type.StaticObjectField = "number_to_string"
current_type.StaticEnumField = EnumInt64.C
current_type.StaticDateTimeField = System.DateTime(500000)
current_type.StaticSimpleStructField = SimpleStruct(12340)
current_type.StaticSimpleGenericStructField = SimpleGenericStruct[System.UInt16](320)
current_type.StaticNullableStructNotNullField = None
current_type.StaticNullableStructNullField = SimpleStruct(650)
current_type.StaticSimpleClassField = SimpleClass(540)
current_type.StaticSimpleGenericClassField = SimpleGenericClass[str]("string".upper())
current_type.StaticSimpleInterfaceField = ClassImplementSimpleInterface(78)
# verify
_test_verify(current_type)
# set values which need conversion.
current_type.StaticInt16Field = 100L
AreEqual(current_type.StaticInt16Field, 100)
current_type.StaticBooleanField = 0
AreEqual(current_type.StaticBooleanField, False)
# set bad values
def f1(): current_type.StaticInt16Field = "abc"
def f2(): current_type.StaticCharField = "abc"
def f3(): current_type.StaticEnumField = EnumInt32.B
for f in [f1, f2, f3]: AssertError(TypeError, f)
def _test_set_by_descriptor(current_type):
current_type.SetStaticFields()
o = current_type()
# pass correct values
current_type.__dict__['StaticByteField'].__set__(None, 5)
current_type.__dict__['StaticSByteField'].__set__(None, 10)
#current_type.__dict__['StaticSByteField'].__set__(o, 10)
current_type.__dict__['StaticUInt16Field'].__set__(None, 20)
current_type.__dict__['StaticInt16Field'].__set__(None, 30)
current_type.__dict__['StaticUInt32Field'].__set__(None, 40)
current_type.__dict__['StaticInt32Field'].__set__(None, 50)
current_type.__dict__['StaticUInt64Field'].__set__(None, 60)
current_type.__dict__['StaticInt64Field'].__set__(None, 70)
current_type.__dict__['StaticDoubleField'].__set__(None, 80)
current_type.__dict__['StaticSingleField'].__set__(None, 90)
current_type.__dict__['StaticDecimalField'].__set__(None, 100)
current_type.__dict__['StaticCharField'].__set__(None, 'd')
current_type.__dict__['StaticBooleanField'].__set__(None, False)
current_type.__dict__['StaticStringField'].__set__(None, 'TESTING')
current_type.__dict__['StaticObjectField'].__set__(None, "number_to_string")
current_type.__dict__['StaticEnumField'].__set__(None, EnumInt64.C)
current_type.__dict__['StaticDateTimeField'].__set__(None, System.DateTime(500000))
current_type.__dict__['StaticSimpleStructField'].__set__(None, SimpleStruct(12340))
current_type.__dict__['StaticSimpleGenericStructField'].__set__(None, SimpleGenericStruct[System.UInt16](320))
current_type.__dict__['StaticNullableStructNotNullField'].__set__(None, None)
current_type.__dict__['StaticNullableStructNullField'].__set__(None, SimpleStruct(650))
current_type.__dict__['StaticSimpleClassField'].__set__(None, SimpleClass(540))
current_type.__dict__['StaticSimpleGenericClassField'].__set__(None, SimpleGenericClass[str]("STRING"))
current_type.__dict__['StaticSimpleInterfaceField'].__set__(None, ClassImplementSimpleInterface(78))
# verify
_test_verify(current_type)
# set with bad values (TODO)
def _test_delete_by_type(current_type):
def f1(): del current_type.StaticByteField
def f2(): del current_type.StaticSByteField
def f3(): del current_type.StaticUInt16Field
def f4(): del current_type.StaticInt16Field
def f5(): del current_type.StaticUInt32Field
def f6(): del current_type.StaticInt32Field
def f7(): del current_type.StaticUInt64Field
def f8(): del current_type.StaticInt64Field
def f9(): del current_type.StaticDoubleField
def f10(): del current_type.StaticSingleField
def f11(): del current_type.StaticDecimalField
def f12(): del current_type.StaticCharField
def f13(): del current_type.StaticBooleanField
def f14(): del current_type.StaticStringField
def f15(): del current_type.StaticObjectField
def f16(): del current_type.StaticEnumField
def f17(): del current_type.StaticDateTimeField
def f18(): del current_type.StaticSimpleStructField
def f19(): del current_type.StaticSimpleGenericStructField
def f20(): del current_type.StaticNullableStructNotNullField
def f21(): del current_type.StaticNullableStructNullField
def f22(): del current_type.StaticSimpleClassField
def f23(): del current_type.StaticSimpleGenericClassField
def f24(): del current_type.StaticSimpleInterfaceField
funcs = [ eval("f%s" % i) for i in range(1, 25) ]
for f in funcs:
AssertError(AttributeError, f) # ???
def _test_delete_by_instance(current_type):
o = current_type()
def f1(): del o.StaticByteField
def f2(): del o.StaticSByteField
def f3(): del o.StaticUInt16Field
def f4(): del o.StaticInt16Field
def f5(): del o.StaticUInt32Field
def f6(): del o.StaticInt32Field
def f7(): del o.StaticUInt64Field
def f8(): del o.StaticInt64Field
def f9(): del o.StaticDoubleField
def f10(): del o.StaticSingleField
def f11(): del o.StaticDecimalField
def f12(): del o.StaticCharField
def f13(): del o.StaticBooleanField
def f14(): del o.StaticStringField
def f15(): del o.StaticObjectField
def f16(): del o.StaticEnumField
def f17(): del o.StaticDateTimeField
def f18(): del o.StaticSimpleStructField
def f19(): del o.StaticSimpleGenericStructField
def f20(): del o.StaticNullableStructNotNullField
def f21(): del o.StaticNullableStructNullField
def f22(): del o.StaticSimpleClassField
def f23(): del o.StaticSimpleGenericClassField
def f24(): del o.StaticSimpleInterfaceField
funcs = [ eval("f%s" % i) for i in range(1, 25) ]
for f in funcs:
AssertError(AttributeError, f) # ???
def _test_delete_by_descriptor(current_type):
for x in [
'Byte',
'SByte',
'UInt16',
'Int16',
'UInt32',
'Int32',
'UInt64',
'Int64',
'Double',
'Single',
'Decimal',
'Char',
'Boolean',
'String',
'Object',
'Enum',
'DateTime',
'SimpleStruct',
'SimpleGenericStruct',
'NullableStructNotNull',
'NullableStructNull',
'SimpleClass',
'SimpleGenericClass',
'SimpleInterface',
]:
for o in [None, current_type, current_type()]:
AssertError(AttributeError, lambda: current_type.__dict__['Static%sField' % x].__delete__(o))
types = [
Struct,
GenericStruct[int],
GenericStruct[SimpleClass],
Class,
GenericClass[SimpleStruct],
GenericClass[SimpleClass],
]
for i in range(len(types)):
exec("def test_%s_get_by_instance(): _test_get_by_instance(types[%s])" % (i, i))
exec("def test_%s_get_by_type(): _test_get_by_type(types[%s])" % (i, i))
exec("def test_%s_get_by_descriptor(): _test_get_by_descriptor(types[%s])" % (i, i))
exec("def test_%s_set_by_instance(): _test_set_by_instance(types[%s])" % (i, i))
exec("def test_%s_set_by_type(): _test_set_by_type(types[%s])" % (i, i))
exec("def test_%s_set_by_descriptor(): _test_set_by_descriptor(types[%s])" % (i, i))
exec("def test_%s_delete_by_type(): _test_delete_by_type(types[%s])" % (i, i))
exec("def test_%s_delete_by_instance(): _test_delete_by_instance(types[%s])" % (i, i))
exec("def test_%s_delete_by_descriptor(): _test_delete_by_descriptor(types[%s])" % (i, i))
@skip("multiple_execute")
def test_nested():
for s in [ Struct2, GenericStruct2[int], GenericStruct2[str] ]:
AreEqual(s.StaticNextField.StaticNextField.StaticNextField.StaticField, 10)
s.StaticNextField.StaticNextField.StaticNextField.StaticField = -10
AreEqual(s.StaticNextField.StaticNextField.StaticNextField.StaticField, -10)
for c in [ Class2, GenericClass2[System.Byte], GenericClass2[object] ]:
AreEqual(c.StaticNextField, None)
c.StaticNextField = c()
AreEqual(c.StaticNextField.StaticNextField.StaticNextField.StaticField, 10)
c.StaticNextField.StaticNextField.StaticNextField.StaticField = 20
AreEqual(c.StaticField, 20)
def test_generic_fields():
for gt in [GenericStruct2, GenericClass2]:
current_type = gt[int]
o = current_type()
current_type.StaticTField = 30
current_type.StaticClassTField = SimpleGenericClass[int](40)
current_type.StaticStructTField = SimpleGenericStruct[int](50)
AreEqual(o.StaticTField, 30)
AreEqual(current_type.StaticClassTField.Flag, 40)
AreEqual(o.StaticStructTField.Flag, 50)
def f(): o.StaticStructTField = SimpleGenericStruct[int](60)
f()
AreEqual(current_type.StaticStructTField.Flag, 60)
current_type = gt[str]
o = current_type()
current_type.StaticTField = "30"
current_type.StaticClassTField = SimpleGenericClass[str]("40")
current_type.StaticStructTField = SimpleGenericStruct[str]("50")
AreEqual(o.StaticTField, '30')
AreEqual(current_type.StaticClassTField.Flag, '40')
AreEqual(o.StaticStructTField.Flag, '50')
def f(): o.StaticClassTField = SimpleGenericClass[str]("60")
f()
AreEqual(current_type.StaticClassTField.Flag, "60")
def test_access_from_derived_types():
for current_type in [
DerivedClass,
DerivedOpenGenericClass[int],
DerivedOpenGenericClass[str],
DerivedGenericClassOfInt32,
DerivedGenericClassOfObject,
]:
_test_get_by_instance(current_type)
_test_get_by_type(current_type)
#
# the behavior for derived type is different from that for the base type.
# I have to write seperate tests as below, instead of using the 2 lines
#
#_test_set_by_instance(current_type)
#_test_set_by_type(current_type)
o = current_type()
def f1(): o.StaticByteField = 1
def f2(): current_type.StaticByteField = 1
AssertErrorWithMatch(AttributeError, "'.*' object has no attribute 'StaticByteField'", f1)
AssertErrorWithMatch(AttributeError, "'.*' object has no attribute 'StaticByteField'", f2)
Assert('StaticByteField' not in current_type.__dict__)
run_test(__name__)
| 18,035 | 0 | 306 |
4e71ac5ceee753ffebe093fcf274c014b19c977c | 4,299 | py | Python | RKD/model/backbone/inception/google.py | pantheon5100/fmcw-finetune-RKD | 1e606432ca3006e8c5d48030301988d5febb4c1b | [
"Apache-2.0"
] | 2 | 2020-08-24T07:57:16.000Z | 2022-01-16T02:06:40.000Z | RKD/model/backbone/inception/google.py | pantheon5100/fmcw-finetune-RKD | 1e606432ca3006e8c5d48030301988d5febb4c1b | [
"Apache-2.0"
] | null | null | null | RKD/model/backbone/inception/google.py | pantheon5100/fmcw-finetune-RKD | 1e606432ca3006e8c5d48030301988d5febb4c1b | [
"Apache-2.0"
] | null | null | null | import os
import torch
import torch.nn as nn
import h5py
from collections import OrderedDict
from torchvision.datasets.utils import download_url
__all__ = ["GoogleNet"]
| 40.556604 | 120 | 0.551058 | import os
import torch
import torch.nn as nn
import h5py
from collections import OrderedDict
from torchvision.datasets.utils import download_url
__all__ = ["GoogleNet"]
class GoogleNet(nn.Sequential):
output_size = 1024
input_side = 227
rescale = 255.0
rgb_mean = [122.7717, 115.9465, 102.9801]
rgb_std = [1, 1, 1]
url = "https://github.com/vadimkantorov/metriclearningbench/releases/download/data/googlenet.h5"
md5hash = 'c7d7856bd1ab5cb02618b3f7f564e3c6'
model_filename = 'googlenet.h5'
def __init__(self, pretrained=True, root='data'):
super(GoogleNet, self).__init__(OrderedDict([
('conv1', nn.Sequential(OrderedDict([
('7x7_s2', nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3))),
('relu1', nn.ReLU(True)),
('pool1', nn.MaxPool2d((3, 3), (2, 2), ceil_mode=True)),
('lrn1', nn.CrossMapLRN2d(5, 0.0001, 0.75, 1))
]))),
('conv2', nn.Sequential(OrderedDict([
('3x3_reduce', nn.Conv2d(64, 64, (1, 1), (1, 1), (0, 0))),
('relu1', nn.ReLU(True)),
('3x3', nn.Conv2d(64, 192, (3, 3), (1, 1), (1, 1))),
('relu2', nn.ReLU(True)),
('lrn2', nn.CrossMapLRN2d(5, 0.0001, 0.75, 1)),
('pool2', nn.MaxPool2d((3, 3), (2, 2), ceil_mode=True))
]))),
('inception_3a', InceptionModule(192, 64, 96, 128, 16, 32, 32)),
('inception_3b', InceptionModule(256, 128, 128, 192, 32, 96, 64)),
('pool3', nn.MaxPool2d((3, 3), (2, 2), ceil_mode=True)),
('inception_4a', InceptionModule(480, 192, 96, 208, 16, 48, 64)),
('inception_4b', InceptionModule(512, 160, 112, 224, 24, 64, 64)),
('inception_4c', InceptionModule(512, 128, 128, 256, 24, 64, 64)),
('inception_4d', InceptionModule(512, 112, 144, 288, 32, 64, 64)),
('inception_4e', InceptionModule(528, 256, 160, 320, 32, 128, 128)),
('pool4', nn.MaxPool2d((3, 3), (2, 2), ceil_mode=True)),
('inception_5a', InceptionModule(832, 256, 160, 320, 32, 128, 128)),
('inception_5b', InceptionModule(832, 384, 192, 384, 48, 128, 128)),
('pool5', nn.AvgPool2d((7, 7), (1, 1), ceil_mode=True)),
]))
if pretrained:
self.load(root)
def load(self, root):
download_url(self.url, root, self.model_filename, self.md5hash)
h5_file = h5py.File(os.path.join(root, self.model_filename), 'r')
group_key = list(h5_file.keys())[0]
self.load_state_dict({k: torch.from_numpy(v[group_key][()]) for k, v in h5_file[group_key].items()})
class InceptionModule(nn.Module):
def __init__(self, inplane, outplane_a1x1, outplane_b3x3_reduce, outplane_b3x3, outplane_c5x5_reduce, outplane_c5x5,
outplane_pool_proj):
super(InceptionModule, self).__init__()
a = nn.Sequential(OrderedDict([
('1x1', nn.Conv2d(inplane, outplane_a1x1, (1, 1), (1, 1), (0, 0))),
('1x1_relu', nn.ReLU(True))
]))
b = nn.Sequential(OrderedDict([
('3x3_reduce', nn.Conv2d(inplane, outplane_b3x3_reduce, (1, 1), (1, 1), (0, 0))),
('3x3_relu1', nn.ReLU(True)),
('3x3', nn.Conv2d(outplane_b3x3_reduce, outplane_b3x3, (3, 3), (1, 1), (1, 1))),
('3x3_relu2', nn.ReLU(True))
]))
c = nn.Sequential(OrderedDict([
('5x5_reduce', nn.Conv2d(inplane, outplane_c5x5_reduce, (1, 1), (1, 1), (0, 0))),
('5x5_relu1', nn.ReLU(True)),
('5x5', nn.Conv2d(outplane_c5x5_reduce, outplane_c5x5, (5, 5), (1, 1), (2, 2))),
('5x5_relu2', nn.ReLU(True))
]))
d = nn.Sequential(OrderedDict([
('pool_pool', nn.MaxPool2d((3, 3), (1, 1), (1, 1))),
('pool_proj', nn.Conv2d(inplane, outplane_pool_proj, (1, 1), (1, 1), (0, 0))),
('pool_relu', nn.ReLU(True))
]))
for container in [a, b, c, d]:
for name, module in container.named_children():
self.add_module(name, module)
self.branches = [a, b, c, d]
def forward(self, input):
return torch.cat([branch(input) for branch in self.branches], 1)
| 3,631 | 396 | 99 |
0f6cbf7880a269cc7ee9969661d51e032171160e | 1,448 | py | Python | string_constants.py | PyPals/pywebsis | 01d9dd9751fa0296c11354322b70c6efef4a9671 | [
"MIT"
] | null | null | null | string_constants.py | PyPals/pywebsis | 01d9dd9751fa0296c11354322b70c6efef4a9671 | [
"MIT"
] | 7 | 2016-04-18T17:04:58.000Z | 2016-08-11T13:05:31.000Z | string_constants.py | PyPals/pywebsis | 01d9dd9751fa0296c11354322b70c6efef4a9671 | [
"MIT"
] | null | null | null | import constants
base_url = 'http://websismit.manipal.edu/websis/control/StudentAcademicProfile'
url = base_url + '?productCategoryId=0905-TERM-'
base_url_details = 'http://websismit.manipal.edu/websis/control/'
url_details = base_url_details + 'ListCTPEnrollment?customTimePeriodId='
#HTML IDs to be used in first url
form_id = 'ProgramAdmissionItemDetail'
credits_id = 'ProgramAdmissionItemDetail_pcredits_title'
gpa_id = 'ProgramAdmissionItemDetail_ptermResultScore_title'
course_code_id = 'cc_TermGradeBookSummary_internalName_'
course_id = 'cc_TermGradeBookSummary_productName_'
course_credit_id = 'cc_TermGradeBookSummary_credit_'
course_grade_id = 'cc_TermGradeBookSummary_pfinalResult_'
course_session_id = 'cc_TermGradeBookSummary_customTimePeriodId_'
#HTML IDs to be used in second url
attendance_id = 'cc_ListAttendanceSummary_'
attendance_code_id = attendance_id + 'productId_'
attendance_name_id = attendance_id + 'productName_'
attendance_classes_id = attendance_id + 'attendanceTaken_'
attendance_attended_id = attendance_id + 'classesAttended_'
attendance_absent_id = attendance_id + 'classesAbsent_'
attendance_percent_id = attendance_id + 'attendancePercentage_'
attendance_last_updated = attendance_id + 'lastUpdatedStamp_'
internal_id = 'cc_ListAssessmentScores_'
internal_code_id = internal_id + 'internalName_'
internal_subject_name = internal_id + 'productName_'
internal_marks_id = internal_id + 'obtainedMarks_'
| 46.709677 | 79 | 0.839088 | import constants
base_url = 'http://websismit.manipal.edu/websis/control/StudentAcademicProfile'
url = base_url + '?productCategoryId=0905-TERM-'
base_url_details = 'http://websismit.manipal.edu/websis/control/'
url_details = base_url_details + 'ListCTPEnrollment?customTimePeriodId='
#HTML IDs to be used in first url
form_id = 'ProgramAdmissionItemDetail'
credits_id = 'ProgramAdmissionItemDetail_pcredits_title'
gpa_id = 'ProgramAdmissionItemDetail_ptermResultScore_title'
course_code_id = 'cc_TermGradeBookSummary_internalName_'
course_id = 'cc_TermGradeBookSummary_productName_'
course_credit_id = 'cc_TermGradeBookSummary_credit_'
course_grade_id = 'cc_TermGradeBookSummary_pfinalResult_'
course_session_id = 'cc_TermGradeBookSummary_customTimePeriodId_'
#HTML IDs to be used in second url
attendance_id = 'cc_ListAttendanceSummary_'
attendance_code_id = attendance_id + 'productId_'
attendance_name_id = attendance_id + 'productName_'
attendance_classes_id = attendance_id + 'attendanceTaken_'
attendance_attended_id = attendance_id + 'classesAttended_'
attendance_absent_id = attendance_id + 'classesAbsent_'
attendance_percent_id = attendance_id + 'attendancePercentage_'
attendance_last_updated = attendance_id + 'lastUpdatedStamp_'
internal_id = 'cc_ListAssessmentScores_'
internal_code_id = internal_id + 'internalName_'
internal_subject_name = internal_id + 'productName_'
internal_marks_id = internal_id + 'obtainedMarks_'
| 0 | 0 | 0 |
ad7f3ce39bee40ba0b8f15a52a9d6a03823d342e | 282 | py | Python | sample_problems/problems_with_solution101.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | sample_problems/problems_with_solution101.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | sample_problems/problems_with_solution101.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | # Write a Python program to access and print a URL's content to the console.
from http.client import HTTPConnection
conn = HTTPConnection("example.com")
conn.request("GET", "/")
result = conn.getresponse()
# retrieves the entire contents.
contents = result.read()
print(contents)
| 28.2 | 76 | 0.755319 | # Write a Python program to access and print a URL's content to the console.
from http.client import HTTPConnection
conn = HTTPConnection("example.com")
conn.request("GET", "/")
result = conn.getresponse()
# retrieves the entire contents.
contents = result.read()
print(contents)
| 0 | 0 | 0 |
e9c0bafa2df3e1bf122110edaf6d11c7a8a1d0ae | 143 | py | Python | blog_content/custom_context_processor.py | paulootavio343/Blog | 06e4814688770580b191129d5db60cc90a1fef6d | [
"MIT"
] | null | null | null | blog_content/custom_context_processor.py | paulootavio343/Blog | 06e4814688770580b191129d5db60cc90a1fef6d | [
"MIT"
] | null | null | null | blog_content/custom_context_processor.py | paulootavio343/Blog | 06e4814688770580b191129d5db60cc90a1fef6d | [
"MIT"
] | null | null | null | from .models import Category
| 17.875 | 61 | 0.657343 | from .models import Category
def subject_renderer(request):
return {
'categories': Category.objects.all().order_by('-id'),
}
| 90 | 0 | 23 |
a7f198e1d8cff8833d4ae6d9f3264eb305093d82 | 549 | py | Python | answers/vjha21/Day8/question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/vjha21/Day8/question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/vjha21/Day8/question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | ##Return sum of all unique elements of an array
if __name__ == "__main__":
nums = [1, 2, 3, 2]
print(unique_element(nums))
| 24.954545 | 65 | 0.586521 | ##Return sum of all unique elements of an array
def unique_element(array):
n = len(array)
unique_elements = []
array.sort()
if array[0] != array[1]:
unique_elements.append(array[0])
for i in range(1, n - 1):
if array[i] != array[i + 1] and array[i] != array[i - 1]:
unique_elements.append(array[i])
if array[n - 2] != array[n - 1]:
unique_elements.append(array[n - 1])
return sum(unique_elements)
if __name__ == "__main__":
nums = [1, 2, 3, 2]
print(unique_element(nums))
| 392 | 0 | 23 |
422a4cf2546ac8427c4d0f426776bc704bc59852 | 1,119 | py | Python | 7/13/solve.py | juancroldan/tuenti-challenge | 4b0b233f457366dd78e80c011ade138cd162e297 | [
"Unlicense"
] | null | null | null | 7/13/solve.py | juancroldan/tuenti-challenge | 4b0b233f457366dd78e80c011ade138cd162e297 | [
"Unlicense"
] | null | null | null | 7/13/solve.py | juancroldan/tuenti-challenge | 4b0b233f457366dd78e80c011ade138cd162e297 | [
"Unlicense"
] | null | null | null | from numpy import uint64 as ui, arange
START = 1050
ONE = ui(1)
n = START
while n < 2**32 -1:
print(n)
with open("fromto.txt", "a") as f:
f.write("%s\t%s\n" % (n, carvedToWritten(n)))
n += 1 | 31.083333 | 62 | 0.460232 | from numpy import uint64 as ui, arange
START = 1050
ONE = ui(1)
def carvedToWritten(n):
n = ui(n)
r = ui(0)
for i in arange(64, dtype = ui):
a = ui(0)
for j in reversed(arange(n + 1, dtype = ui)):
b = ui(0)
for k in arange(i+1, dtype = ui):
#print("N: {0:064b}".format(n).replace("0"," ")+"|%s" % n)
#print("I: {0:064b}".format(i).replace("0"," ")+"|%s" % i)
#print("J: {0:064b}".format(j).replace("0"," ")+"|%s" % j)
#print("K: {0:064b}".format(k).replace("0"," ")+"|%s" % k)
c = a ^ (((i&n&~j)|(i&~n&j) & ONE) << k)
#print("C: {0:064b}".format(c).replace("0"," ")+"|%s" % c)
a ^= (j & (ONE << k)) ^ b
#print("A: {0:064b}".format(a).replace("0"," ")+"|%s" % a)
b = (((c&j) | (c&b) | (j&b)) & (ONE << k)) << ONE
#print("B: {0:064b}".format(b).replace("0"," ")+"|%s" % b)
#print("-"*67+"+")
#print("-"*67+"+")
r |= (a & (ONE << i))
#print("R: {0:064b}".format(r).replace("0"," ")+"|%s" % r)
#print("-"*67+"+")
return r
n = START
while n < 2**32 -1:
print(n)
with open("fromto.txt", "a") as f:
f.write("%s\t%s\n" % (n, carvedToWritten(n)))
n += 1 | 899 | 0 | 22 |
1cbcd39bc17ace2409f7b87b96e0cffb046b876c | 3,893 | py | Python | jina/peapods/peas/helper.py | anuragdw710/jina | b2d3577f2d5b86399f0b4a8e4529df4929dd18ff | [
"Apache-2.0"
] | 3 | 2021-09-02T04:55:20.000Z | 2021-11-15T09:41:50.000Z | jina/peapods/peas/helper.py | sheetal01761/jina | 520fc0794fb43d96e1fc85534e9df3cf9c89c42e | [
"Apache-2.0"
] | null | null | null | jina/peapods/peas/helper.py | sheetal01761/jina | 520fc0794fb43d96e1fc85534e9df3cf9c89c42e | [
"Apache-2.0"
] | null | null | null | import multiprocessing
import threading
from functools import partial
from typing import Union, TYPE_CHECKING
from copy import deepcopy
from ... import __default_host__
from ...hubble.hubio import HubIO
from ...hubble.helper import is_valid_huburi
from ...enums import GatewayProtocolType, RuntimeBackendType
from ...parsers.hubble import set_hub_pull_parser
if TYPE_CHECKING:
from argparse import Namespace
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param backend_runtime: The runtime type to decide which type of Event to instantiate
:param events_list: The list of events that compose this composable event
"""
def update_runtime_cls(args, copy=False) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pea/pod namespace args
:param copy: True if args shouldn't be modified in-place
:return: runtime class as a string
"""
_args = deepcopy(args) if copy else args
gateway_runtime_dict = {
GatewayProtocolType.GRPC: 'GRPCRuntime',
GatewayProtocolType.WEBSOCKET: 'WebSocketRuntime',
GatewayProtocolType.HTTP: 'HTTPRuntime',
}
if (
_args.runtime_cls not in gateway_runtime_dict.values()
and _args.host != __default_host__
and not _args.disable_remote
):
_args.runtime_cls = 'JinadRuntime'
# NOTE: remote pea would also create a remote workspace which might take alot of time.
# setting it to -1 so that wait_start_success doesn't fail
_args.timeout_ready = -1
if _args.runtime_cls == 'ZEDRuntime' and _args.uses.startswith('docker://'):
_args.runtime_cls = 'ContainerRuntime'
if _args.runtime_cls == 'ZEDRuntime' and is_valid_huburi(_args.uses):
_args.uses = HubIO(
set_hub_pull_parser().parse_args([_args.uses, '--no-usage'])
).pull()
if _args.uses.startswith('docker://'):
_args.runtime_cls = 'ContainerRuntime'
if hasattr(_args, 'protocol'):
_args.runtime_cls = gateway_runtime_dict[_args.protocol]
return _args
| 34.451327 | 106 | 0.670948 | import multiprocessing
import threading
from functools import partial
from typing import Union, TYPE_CHECKING
from copy import deepcopy
from ... import __default_host__
from ...hubble.hubio import HubIO
from ...hubble.helper import is_valid_huburi
from ...enums import GatewayProtocolType, RuntimeBackendType
from ...parsers.hubble import set_hub_pull_parser
if TYPE_CHECKING:
from argparse import Namespace
def _get_event(obj) -> Union[multiprocessing.Event, threading.Event]:
if isinstance(obj, threading.Thread):
return threading.Event()
elif isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(
f'{obj} is not an instance of "threading.Thread" nor "multiprocessing.Process"'
)
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param backend_runtime: The runtime type to decide which type of Event to instantiate
:param events_list: The list of events that compose this composable event
"""
def __init__(self, backend_runtime: RuntimeBackendType, events_list):
super().__init__()
self.event = None
if backend_runtime == RuntimeBackendType.THREAD:
self.event = threading.Event()
else:
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args, copy=False) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pea/pod namespace args
:param copy: True if args shouldn't be modified in-place
:return: runtime class as a string
"""
_args = deepcopy(args) if copy else args
gateway_runtime_dict = {
GatewayProtocolType.GRPC: 'GRPCRuntime',
GatewayProtocolType.WEBSOCKET: 'WebSocketRuntime',
GatewayProtocolType.HTTP: 'HTTPRuntime',
}
if (
_args.runtime_cls not in gateway_runtime_dict.values()
and _args.host != __default_host__
and not _args.disable_remote
):
_args.runtime_cls = 'JinadRuntime'
# NOTE: remote pea would also create a remote workspace which might take alot of time.
# setting it to -1 so that wait_start_success doesn't fail
_args.timeout_ready = -1
if _args.runtime_cls == 'ZEDRuntime' and _args.uses.startswith('docker://'):
_args.runtime_cls = 'ContainerRuntime'
if _args.runtime_cls == 'ZEDRuntime' and is_valid_huburi(_args.uses):
_args.uses = HubIO(
set_hub_pull_parser().parse_args([_args.uses, '--no-usage'])
).pull()
if _args.uses.startswith('docker://'):
_args.runtime_cls = 'ContainerRuntime'
if hasattr(_args, 'protocol'):
_args.runtime_cls = gateway_runtime_dict[_args.protocol]
return _args
| 1,490 | 0 | 158 |
f0028266ec1bf2c346a778c1687f14b66af3ebf8 | 400 | py | Python | tests/test_ukw_intelli_store.py | Maddonix/ukw-intelli-store | a6ba088e70d5ee2e9499a9c72477833dfccafb0b | [
"MIT"
] | null | null | null | tests/test_ukw_intelli_store.py | Maddonix/ukw-intelli-store | a6ba088e70d5ee2e9499a9c72477833dfccafb0b | [
"MIT"
] | null | null | null | tests/test_ukw_intelli_store.py | Maddonix/ukw-intelli-store | a6ba088e70d5ee2e9499a9c72477833dfccafb0b | [
"MIT"
] | null | null | null |
from ukw_intelli_store.cli import main
from ukw_intelli_store.endomaterial import EndoMaterial
path_test_imd = "tests/data/imd.xlsx"
path_test_mat = "tests/data/mat.xlsx" | 25 | 55 | 0.75 |
from ukw_intelli_store.cli import main
from ukw_intelli_store.endomaterial import EndoMaterial
path_test_imd = "tests/data/imd.xlsx"
path_test_mat = "tests/data/mat.xlsx"
def test_main():
assert main([]) == 0
def test_init():
em = EndoMaterial(path_test_imd, path_test_mat)
def test_dgvs_keys():
em = EndoMaterial(path_test_imd, path_test_mat)
assert len(em.get_dgvs_keys()) == 3 | 159 | 0 | 69 |
c58165e956f88791ce62265295e7af9606f33f81 | 16 | py | Python | main_1.py | jayz0417/second_project | 44f0912d58330e84654277b51268b30f5599ddc1 | [
"MIT"
] | null | null | null | main_1.py | jayz0417/second_project | 44f0912d58330e84654277b51268b30f5599ddc1 | [
"MIT"
] | null | null | null | main_1.py | jayz0417/second_project | 44f0912d58330e84654277b51268b30f5599ddc1 | [
"MIT"
] | null | null | null | print("user 1")
| 8 | 15 | 0.625 | print("user 1")
| 0 | 0 | 0 |
ae4c3233b0ceea19ef2685aab78a94e4c2bee8b1 | 3,471 | py | Python | Code/AntialiasingManager.py | kergalym/RenderPipeline-version_1_release | 3c78b79d44c33781374e870dd8aad03aa6ce1268 | [
"WTFPL"
] | null | null | null | Code/AntialiasingManager.py | kergalym/RenderPipeline-version_1_release | 3c78b79d44c33781374e870dd8aad03aa6ce1268 | [
"WTFPL"
] | null | null | null | Code/AntialiasingManager.py | kergalym/RenderPipeline-version_1_release | 3c78b79d44c33781374e870dd8aad03aa6ce1268 | [
"WTFPL"
] | null | null | null | from panda3d.core import PTAVecBase2f, Vec2
from Code.DebugObject import DebugObject
from Code.Globals import Globals
from Code.RenderPasses.AntialiasingFXAAPass import AntialiasingFXAAPass
from Code.RenderPasses.AntialiasingSMAAPass import AntialiasingSMAAPass
class AntialiasingManager(DebugObject):
""" The Antialiasing Manager handles the setup of the antialiasing passes,
if antialiasing is defined in the settings. It also handles jittering when
using a temporal antialiasing technique like SMAA.
When jittering is enabled, the frame is moved by half a pixel up/down every
second frame, and then merged with the previous frame result, to achieve
better antialiasing. This is like MSAA but splitted over several frames """
availableTechniques = ["FXAA", "SMAA", "None"]
def __init__(self, pipeline):
""" Creates the manager and directly setups the passes """
DebugObject.__init__(self, "AntialiasingManager")
self.pipeline = pipeline
self.jitter = False
self.jitterOffsets = []
self.jitterIndex = 0
self.jitterPTA = PTAVecBase2f.emptyArray(1)
self.create()
def create(self):
""" Setups the antialiasing passes, and also computes the jitter offsets """
technique = self.pipeline.settings.antialiasingTechnique
if technique not in self.availableTechniques:
self.error("Unrecognized antialiasing technique: " + technique)
return
# No antialiasing
elif technique == "None":
return
# FXAA 3.11 by nvidia
elif technique == "FXAA":
self.antialiasingPass = AntialiasingFXAAPass()
# SMAA T2
elif technique == "SMAA":
self.antialiasingPass = AntialiasingSMAAPass()
self.jitter = True
# Extract smaa quality preset and define it in the shader
quality = self.pipeline.settings.smaaQuality.upper()
if quality in ["LOW", "MEDIUM", "HIGH", "ULTRA"]:
self.pipeline.getRenderPassManager().registerDefine("SMAA_PRESET_" + quality, 1)
else:
self.error("Unrecognized SMAA quality:", quality)
return
# When jittering is enabled, precompute the jitter offsets
if self.jitter:
# Compute how big a pixel is on screen
aspect = float(Globals.resolution.x) / float(Globals.resolution.y)
onePixelShift = Vec2(0.5 / float(Globals.resolution.x),
0.5 / float(Globals.resolution.y) / aspect) * self.pipeline.settings.jitterAmount
# Annoying that Vec2 has no multliply-operator for non-floats
multiplyVec2 = lambda a, b: Vec2(a.x*b.x, a.y*b.y)
# Multiply the pixel size with the offsets to compute the final jitter
self.jitterOffsets = [
multiplyVec2(onePixelShift, Vec2(-0.25, 0.25)),
multiplyVec2(onePixelShift, Vec2(0.25, -0.25))
]
# Finally register the antialiasing pass
self.pipeline.getRenderPassManager().registerPass(self.antialiasingPass)
def update(self):
""" Updates the manager, setting the jitter offsets if enabled """
if self.jitter:
shift = self.jitterOffsets[self.jitterIndex]
self.jitterIndex = 1 - self.jitterIndex
Globals.base.camLens.setFilmOffset(shift.x, shift.y)
| 39 | 97 | 0.649957 | from panda3d.core import PTAVecBase2f, Vec2
from Code.DebugObject import DebugObject
from Code.Globals import Globals
from Code.RenderPasses.AntialiasingFXAAPass import AntialiasingFXAAPass
from Code.RenderPasses.AntialiasingSMAAPass import AntialiasingSMAAPass
class AntialiasingManager(DebugObject):
""" The Antialiasing Manager handles the setup of the antialiasing passes,
if antialiasing is defined in the settings. It also handles jittering when
using a temporal antialiasing technique like SMAA.
When jittering is enabled, the frame is moved by half a pixel up/down every
second frame, and then merged with the previous frame result, to achieve
better antialiasing. This is like MSAA but splitted over several frames """
availableTechniques = ["FXAA", "SMAA", "None"]
def __init__(self, pipeline):
""" Creates the manager and directly setups the passes """
DebugObject.__init__(self, "AntialiasingManager")
self.pipeline = pipeline
self.jitter = False
self.jitterOffsets = []
self.jitterIndex = 0
self.jitterPTA = PTAVecBase2f.emptyArray(1)
self.create()
def create(self):
""" Setups the antialiasing passes, and also computes the jitter offsets """
technique = self.pipeline.settings.antialiasingTechnique
if technique not in self.availableTechniques:
self.error("Unrecognized antialiasing technique: " + technique)
return
# No antialiasing
elif technique == "None":
return
# FXAA 3.11 by nvidia
elif technique == "FXAA":
self.antialiasingPass = AntialiasingFXAAPass()
# SMAA T2
elif technique == "SMAA":
self.antialiasingPass = AntialiasingSMAAPass()
self.jitter = True
# Extract smaa quality preset and define it in the shader
quality = self.pipeline.settings.smaaQuality.upper()
if quality in ["LOW", "MEDIUM", "HIGH", "ULTRA"]:
self.pipeline.getRenderPassManager().registerDefine("SMAA_PRESET_" + quality, 1)
else:
self.error("Unrecognized SMAA quality:", quality)
return
# When jittering is enabled, precompute the jitter offsets
if self.jitter:
# Compute how big a pixel is on screen
aspect = float(Globals.resolution.x) / float(Globals.resolution.y)
onePixelShift = Vec2(0.5 / float(Globals.resolution.x),
0.5 / float(Globals.resolution.y) / aspect) * self.pipeline.settings.jitterAmount
# Annoying that Vec2 has no multliply-operator for non-floats
multiplyVec2 = lambda a, b: Vec2(a.x*b.x, a.y*b.y)
# Multiply the pixel size with the offsets to compute the final jitter
self.jitterOffsets = [
multiplyVec2(onePixelShift, Vec2(-0.25, 0.25)),
multiplyVec2(onePixelShift, Vec2(0.25, -0.25))
]
# Finally register the antialiasing pass
self.pipeline.getRenderPassManager().registerPass(self.antialiasingPass)
def update(self):
""" Updates the manager, setting the jitter offsets if enabled """
if self.jitter:
shift = self.jitterOffsets[self.jitterIndex]
self.jitterIndex = 1 - self.jitterIndex
Globals.base.camLens.setFilmOffset(shift.x, shift.y)
| 0 | 0 | 0 |
04070cd4f46b9eb0eadbd6ad622f2b8d021be30e | 11,573 | py | Python | pysnmp-with-texts/CISCO-MGX82XX-RPM-RSRC-PART-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCO-MGX82XX-RPM-RSRC-PART-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCO-MGX82XX-RPM-RSRC-PART-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-MGX82XX-RPM-RSRC-PART-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MGX82XX-RPM-RSRC-PART-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:07:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
rpmInterface, = mibBuilder.importSymbols("BASIS-MIB", "rpmInterface")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, Counter64, iso, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter32, Bits, IpAddress, TimeTicks, ObjectIdentity, NotificationType, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter64", "iso", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter32", "Bits", "IpAddress", "TimeTicks", "ObjectIdentity", "NotificationType", "Integer32", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoMgx82xxRpmRsrcPartMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 61))
ciscoMgx82xxRpmRsrcPartMIB.setRevisions(('2002-09-17 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setRevisionsDescriptions(('Initial version of the MIB. The content of this MIB was originally available in CISCO-WAN-AXIPOP-MIB defined using SMIv1. The applicable objects from CISCO-WAN-AXIPOP-MIB are defined using SMIv2 in this MIB. Also the descriptions of some of the objects have been modified.',))
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setLastUpdated('200209170000Z')
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-wanatm@cisco.com')
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setDescription('The MIB module to manage resource partition objects. A resource partition is configured on a RPM subinterface. An administrator can partition connection related resources like VPI, VCI ranges, bandwidth and total amount of available connection entries in the switch through these MIB tables.')
rpmIfCnfResPart = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2))
rpmIfCnfRscPartTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1), )
if mibBuilder.loadTexts: rpmIfCnfRscPartTable.setStatus('current')
if mibBuilder.loadTexts: rpmIfCnfRscPartTable.setDescription('The table is for RPM interface resource partition.')
rpmIfCnfRscPartEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1), ).setIndexNames((0, "CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscSlotNum"), (0, "CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartIfNum"), (0, "CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartCtrlrNum"))
if mibBuilder.loadTexts: rpmIfCnfRscPartEntry.setStatus('current')
if mibBuilder.loadTexts: rpmIfCnfRscPartEntry.setDescription('An entry for resource partition configuration on a logical interface. A resource partition need to be configured before connections(or connection endpoints) can be added to the interface.')
rpmIfRscSlotNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscSlotNum.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscSlotNum.setDescription('This object identifies the slot number of the RPM card in MGX shelf.')
rpmIfRscPartIfNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPartIfNum.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPartIfNum.setDescription('This is backplane interface number. Currently there is only one interface and value 1 is the only value supported.')
rpmIfRscPartCtrlrNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("par", 1), ("pnni", 2), ("tag", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPartCtrlrNum.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPartCtrlrNum.setDescription('This object identifies the type of the controller which owns the resource partition. par(1) : Portable Auto Route Controller. This is a controller software implementing Cisco Proprietary protocol for network routing and topology. pnni(2): Private Network-to-Network Interface(PNNI) controller. This is a controller implementing ATM Forum PNNI protocol for routing. tag(3) : Tag Switching(MPLS) controller. This is a controller supporting MPLS protocol.')
rpmIfRscPrtRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("add", 1), ("del", 2), ("mod", 3))).clone('del')).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtRowStatus.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtRowStatus.setDescription('This object contains the information on whether subinterface exists or being modified. add(1) : sub-interface exists del(2) : sub-interface deleted mod(2) : sub-interface being modified')
rpmIfRscPrtIngrPctBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtIngrPctBandwidth.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtIngrPctBandwidth.setDescription('The percentage of logical interface bandwidth.')
rpmIfRscPrtEgrPctBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtEgrPctBandwidth.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtEgrPctBandwidth.setDescription('The percentage of logical interface bandwidth.')
rpmIfRscPrtVpiLow = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVpiLow.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVpiLow.setDescription('The beginning of the VPI range reserved for this partition.')
rpmIfRscPrtVpiHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVpiHigh.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVpiHigh.setDescription('The end of the VPI range reserved for this partition.')
rpmIfRscPrtVciLow = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVciLow.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVciLow.setDescription('The beginning of the VCI range reserved for this partition. This field is only valid for logical interfaces configured with a single VPI.')
rpmIfRscPrtVciHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVciHigh.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVciHigh.setDescription('The end of the VCI range reserved for this partition. This field is only valid for logical interfaces configured with a single VPI.')
rpmIfRscPrtMaxChans = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4047))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtMaxChans.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtMaxChans.setDescription('This represents maximum number of channels(connections) that are available to the controller.')
cmrRsrcPartMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 61, 3))
cmrRsrcPartMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 1))
cmrRsrcPartMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 2))
cmrRsrcPartMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 1, 1)).setObjects(("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "cmrRsrcPartMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmrRsrcPartMIBCompliance = cmrRsrcPartMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: cmrRsrcPartMIBCompliance.setDescription('The Compliance statement for Resource partition management group.')
cmrRsrcPartMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 2, 1)).setObjects(("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscSlotNum"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartIfNum"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartCtrlrNum"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtRowStatus"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtIngrPctBandwidth"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtEgrPctBandwidth"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVpiLow"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVpiHigh"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVciLow"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVciHigh"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtMaxChans"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmrRsrcPartMIBGroup = cmrRsrcPartMIBGroup.setStatus('current')
if mibBuilder.loadTexts: cmrRsrcPartMIBGroup.setDescription('The collection of objects related to configuration of Resource partition.')
mibBuilder.exportSymbols("CISCO-MGX82XX-RPM-RSRC-PART-MIB", ciscoMgx82xxRpmRsrcPartMIB=ciscoMgx82xxRpmRsrcPartMIB, rpmIfCnfRscPartEntry=rpmIfCnfRscPartEntry, cmrRsrcPartMIBGroups=cmrRsrcPartMIBGroups, rpmIfRscPrtVpiLow=rpmIfRscPrtVpiLow, rpmIfRscPrtVciLow=rpmIfRscPrtVciLow, PYSNMP_MODULE_ID=ciscoMgx82xxRpmRsrcPartMIB, cmrRsrcPartMIBCompliance=cmrRsrcPartMIBCompliance, rpmIfRscPartIfNum=rpmIfRscPartIfNum, rpmIfRscPrtIngrPctBandwidth=rpmIfRscPrtIngrPctBandwidth, cmrRsrcPartMIBGroup=cmrRsrcPartMIBGroup, cmrRsrcPartMIBConformance=cmrRsrcPartMIBConformance, rpmIfCnfResPart=rpmIfCnfResPart, rpmIfRscPrtEgrPctBandwidth=rpmIfRscPrtEgrPctBandwidth, rpmIfRscPartCtrlrNum=rpmIfRscPartCtrlrNum, rpmIfRscPrtVpiHigh=rpmIfRscPrtVpiHigh, rpmIfRscPrtRowStatus=rpmIfRscPrtRowStatus, rpmIfCnfRscPartTable=rpmIfCnfRscPartTable, cmrRsrcPartMIBCompliances=cmrRsrcPartMIBCompliances, rpmIfRscPrtVciHigh=rpmIfRscPrtVciHigh, rpmIfRscPrtMaxChans=rpmIfRscPrtMaxChans, rpmIfRscSlotNum=rpmIfRscSlotNum)
| 148.371795 | 980 | 0.786054 | #
# PySNMP MIB module CISCO-MGX82XX-RPM-RSRC-PART-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MGX82XX-RPM-RSRC-PART-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:07:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
rpmInterface, = mibBuilder.importSymbols("BASIS-MIB", "rpmInterface")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, Counter64, iso, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter32, Bits, IpAddress, TimeTicks, ObjectIdentity, NotificationType, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter64", "iso", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter32", "Bits", "IpAddress", "TimeTicks", "ObjectIdentity", "NotificationType", "Integer32", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoMgx82xxRpmRsrcPartMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 61))
ciscoMgx82xxRpmRsrcPartMIB.setRevisions(('2002-09-17 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setRevisionsDescriptions(('Initial version of the MIB. The content of this MIB was originally available in CISCO-WAN-AXIPOP-MIB defined using SMIv1. The applicable objects from CISCO-WAN-AXIPOP-MIB are defined using SMIv2 in this MIB. Also the descriptions of some of the objects have been modified.',))
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setLastUpdated('200209170000Z')
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-wanatm@cisco.com')
if mibBuilder.loadTexts: ciscoMgx82xxRpmRsrcPartMIB.setDescription('The MIB module to manage resource partition objects. A resource partition is configured on a RPM subinterface. An administrator can partition connection related resources like VPI, VCI ranges, bandwidth and total amount of available connection entries in the switch through these MIB tables.')
rpmIfCnfResPart = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2))
rpmIfCnfRscPartTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1), )
if mibBuilder.loadTexts: rpmIfCnfRscPartTable.setStatus('current')
if mibBuilder.loadTexts: rpmIfCnfRscPartTable.setDescription('The table is for RPM interface resource partition.')
rpmIfCnfRscPartEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1), ).setIndexNames((0, "CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscSlotNum"), (0, "CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartIfNum"), (0, "CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartCtrlrNum"))
if mibBuilder.loadTexts: rpmIfCnfRscPartEntry.setStatus('current')
if mibBuilder.loadTexts: rpmIfCnfRscPartEntry.setDescription('An entry for resource partition configuration on a logical interface. A resource partition need to be configured before connections(or connection endpoints) can be added to the interface.')
rpmIfRscSlotNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscSlotNum.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscSlotNum.setDescription('This object identifies the slot number of the RPM card in MGX shelf.')
rpmIfRscPartIfNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPartIfNum.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPartIfNum.setDescription('This is backplane interface number. Currently there is only one interface and value 1 is the only value supported.')
rpmIfRscPartCtrlrNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("par", 1), ("pnni", 2), ("tag", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPartCtrlrNum.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPartCtrlrNum.setDescription('This object identifies the type of the controller which owns the resource partition. par(1) : Portable Auto Route Controller. This is a controller software implementing Cisco Proprietary protocol for network routing and topology. pnni(2): Private Network-to-Network Interface(PNNI) controller. This is a controller implementing ATM Forum PNNI protocol for routing. tag(3) : Tag Switching(MPLS) controller. This is a controller supporting MPLS protocol.')
rpmIfRscPrtRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("add", 1), ("del", 2), ("mod", 3))).clone('del')).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtRowStatus.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtRowStatus.setDescription('This object contains the information on whether subinterface exists or being modified. add(1) : sub-interface exists del(2) : sub-interface deleted mod(2) : sub-interface being modified')
rpmIfRscPrtIngrPctBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtIngrPctBandwidth.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtIngrPctBandwidth.setDescription('The percentage of logical interface bandwidth.')
rpmIfRscPrtEgrPctBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtEgrPctBandwidth.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtEgrPctBandwidth.setDescription('The percentage of logical interface bandwidth.')
rpmIfRscPrtVpiLow = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVpiLow.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVpiLow.setDescription('The beginning of the VPI range reserved for this partition.')
rpmIfRscPrtVpiHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVpiHigh.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVpiHigh.setDescription('The end of the VPI range reserved for this partition.')
rpmIfRscPrtVciLow = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVciLow.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVciLow.setDescription('The beginning of the VCI range reserved for this partition. This field is only valid for logical interfaces configured with a single VPI.')
rpmIfRscPrtVciHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtVciHigh.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtVciHigh.setDescription('The end of the VCI range reserved for this partition. This field is only valid for logical interfaces configured with a single VPI.')
rpmIfRscPrtMaxChans = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 2, 9, 2, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4047))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rpmIfRscPrtMaxChans.setStatus('current')
if mibBuilder.loadTexts: rpmIfRscPrtMaxChans.setDescription('This represents maximum number of channels(connections) that are available to the controller.')
cmrRsrcPartMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 61, 3))
cmrRsrcPartMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 1))
cmrRsrcPartMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 2))
cmrRsrcPartMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 1, 1)).setObjects(("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "cmrRsrcPartMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmrRsrcPartMIBCompliance = cmrRsrcPartMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: cmrRsrcPartMIBCompliance.setDescription('The Compliance statement for Resource partition management group.')
cmrRsrcPartMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 61, 3, 2, 1)).setObjects(("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscSlotNum"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartIfNum"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPartCtrlrNum"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtRowStatus"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtIngrPctBandwidth"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtEgrPctBandwidth"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVpiLow"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVpiHigh"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVciLow"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtVciHigh"), ("CISCO-MGX82XX-RPM-RSRC-PART-MIB", "rpmIfRscPrtMaxChans"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmrRsrcPartMIBGroup = cmrRsrcPartMIBGroup.setStatus('current')
if mibBuilder.loadTexts: cmrRsrcPartMIBGroup.setDescription('The collection of objects related to configuration of Resource partition.')
mibBuilder.exportSymbols("CISCO-MGX82XX-RPM-RSRC-PART-MIB", ciscoMgx82xxRpmRsrcPartMIB=ciscoMgx82xxRpmRsrcPartMIB, rpmIfCnfRscPartEntry=rpmIfCnfRscPartEntry, cmrRsrcPartMIBGroups=cmrRsrcPartMIBGroups, rpmIfRscPrtVpiLow=rpmIfRscPrtVpiLow, rpmIfRscPrtVciLow=rpmIfRscPrtVciLow, PYSNMP_MODULE_ID=ciscoMgx82xxRpmRsrcPartMIB, cmrRsrcPartMIBCompliance=cmrRsrcPartMIBCompliance, rpmIfRscPartIfNum=rpmIfRscPartIfNum, rpmIfRscPrtIngrPctBandwidth=rpmIfRscPrtIngrPctBandwidth, cmrRsrcPartMIBGroup=cmrRsrcPartMIBGroup, cmrRsrcPartMIBConformance=cmrRsrcPartMIBConformance, rpmIfCnfResPart=rpmIfCnfResPart, rpmIfRscPrtEgrPctBandwidth=rpmIfRscPrtEgrPctBandwidth, rpmIfRscPartCtrlrNum=rpmIfRscPartCtrlrNum, rpmIfRscPrtVpiHigh=rpmIfRscPrtVpiHigh, rpmIfRscPrtRowStatus=rpmIfRscPrtRowStatus, rpmIfCnfRscPartTable=rpmIfCnfRscPartTable, cmrRsrcPartMIBCompliances=cmrRsrcPartMIBCompliances, rpmIfRscPrtVciHigh=rpmIfRscPrtVciHigh, rpmIfRscPrtMaxChans=rpmIfRscPrtMaxChans, rpmIfRscSlotNum=rpmIfRscSlotNum)
| 0 | 0 | 0 |
85cb17c76ba00077b48b22352c3160c6a838c796 | 5,108 | py | Python | bin/gen_config_files.py | dannima/pae_probe_experiments-1 | 28e9f2b3873f21b39c377899d31c7a04da2b844b | [
"BSD-2-Clause"
] | null | null | null | bin/gen_config_files.py | dannima/pae_probe_experiments-1 | 28e9f2b3873f21b39c377899d31c7a04da2b844b | [
"BSD-2-Clause"
] | null | null | null | bin/gen_config_files.py | dannima/pae_probe_experiments-1 | 28e9f2b3873f21b39c377899d31c7a04da2b844b | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import sys
import yaml
from pathlib import Path
if __name__ == '__main__':
main()
| 38.406015 | 75 | 0.42502 | #!/usr/bin/env python3
import argparse
import os
import sys
import yaml
from pathlib import Path
def main():
parser = argparse.ArgumentParser(
'generate configuration files', add_help=True)
parser.add_argument(
'feats_dir', nargs=None,
help='directory of extracted features')
parser.add_argument(
'config_dir', nargs=None,
help='output directory for config files')
parser.add_argument(
'df', nargs='+',
help='path to TIMIT variants')
parser.add_argument(
'--context_size', nargs=None, default=0, type=int,
help='number of frames in each side as context to features \
(default: %(default)s)')
parser.add_argument(
'--batch_size', nargs=None, default=1024, type=int,
help='number of training examples used in one iteration \
(default: %(default)s)')
parser.add_argument(
'--step', nargs=None, default=0.01, type=float, metavar='SECONDS',
help='frame step in seconds (default: %(default)s)')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
os.makedirs(args.config_dir, exist_ok=True)
# Determine parameters for configuration files.
for probing_task in ['sad', 'vowel', 'sonorant', 'fricative', 'phone']:
for clf in ['logistic', 'max_margin', 'nnet']:
data = dict(
task=probing_task,
classifier=clf,
context_size=args.context_size,
batch_size=args.batch_size,
train_data=dict(
timit=dict(
uris=args.df[0]+'/train.ids',
step=args.step,
feats=args.feats_dir+'/timit',
phones=args.df[0]+'/phones'
),
ntimit=dict(
uris=args.df[1]+'/train.ids',
step=args.step,
feats=args.feats_dir+'/ntimit',
phones=args.df[1]+'/phones'
),
ctimit=dict(
uris=args.df[2]+'/train.ids',
step=args.step,
feats=args.feats_dir+'/ctimit',
phones=args.df[2]+'/phones'
),
ffmtimit=dict(
uris=args.df[3]+'/train.ids',
step=args.step,
feats=args.feats_dir+'/ffmtimit',
phones=args.df[3]+'/phones'
),
stctimit=dict(
uris=args.df[4]+'/train.ids',
step=args.step,
feats=args.feats_dir+'/stctimit',
phones=args.df[4]+'/phones'
),
wtimit=dict(
uris=args.df[5]+'/train.ids',
step=args.step,
feats=args.feats_dir+'/wtimit',
phones=args.df[5]+'/phones'
)
),
test_data=dict(
timit=dict(
uris=args.df[0]+'/test_full.ids',
step=args.step,
feats=args.feats_dir+'/timit',
phones=args.df[0]+'/phones'
),
ntimit=dict(
uris=args.df[1]+'/test_full.ids',
step=args.step,
feats=args.feats_dir+'/ntimit',
phones=args.df[1]+'/phones'
),
ctimit=dict(
uris=args.df[2]+'/test_full.ids',
step=args.step,
feats=args.feats_dir+'/ctimit',
phones=args.df[2]+'/phones'
),
ffmtimit=dict(
uris=args.df[3]+'/test_full.ids',
step=args.step,
feats=args.feats_dir+'/ffmtimit',
phones=args.df[3]+'/phones'
),
stctimit=dict(
uris=args.df[4]+'/test_full.ids',
step=args.step,
feats=args.feats_dir+'/stctimit',
phones=args.df[4]+'/phones'
),
wtimit=dict(
uris=args.df[5]+'/test_full.ids',
step=args.step,
feats=args.feats_dir+'/wtimit',
phones=args.df[5]+'/phones'
)
)
)
fn = Path('configs/tasks')/f'{probing_task}_{clf}.yaml'
with open(fn, 'w') as config_file:
yaml.dump(data, config_file, default_flow_style=False)
if __name__ == '__main__':
main()
| 4,947 | 0 | 23 |
6f35b4635df9875af0d7c1384cce264be2293dda | 10,176 | py | Python | octagon/environment/__init__.py | Randmeer/Cobalt-Quest | 79b62dea6420cb9f4cd09fa048ddd2b033cad35f | [
"MIT"
] | null | null | null | octagon/environment/__init__.py | Randmeer/Cobalt-Quest | 79b62dea6420cb9f4cd09fa048ddd2b033cad35f | [
"MIT"
] | null | null | null | octagon/environment/__init__.py | Randmeer/Cobalt-Quest | 79b62dea6420cb9f4cd09fa048ddd2b033cad35f | [
"MIT"
] | null | null | null | import random
import time
import pygame
import QuickJSON
import copy
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
from octagon.utils import render_text, var, img, cout, mp_scene
from octagon.environment import hud, camera
from octagon.sprites import block
| 40.541833 | 172 | 0.548054 | import random
import time
import pygame
import QuickJSON
import copy
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
from octagon.utils import render_text, var, img, cout, mp_scene
from octagon.environment import hud, camera
from octagon.sprites import block
class Environment:
def __init__(self, window: pygame.Surface, envjsonpath: str, invjsonpath: str, player: type, entity: list[type],
items: dict, ):
"""
handles logic for player, blocks, entities, particles and more
automatically calculates delta-time
main functions:
load() --> loads floor json
save() --> saves floor json
start_loop() --> starts game loop
end_loop() --> ends game loop
local functions:
single_loop() --> runs _update, update and render functions
_update() --> updates objects, handles input
render() --> renders objects
"""
self.window = window
self.items = items
self.surface = pygame.Surface(var.SIZE, pygame.SRCALPHA)
self.envjson = QuickJSON.QJSON(envjsonpath)
self.invjson = QuickJSON.QJSON(invjsonpath)
self.hud = hud.HUD(self)
self.clock = pygame.time.Clock()
# index entities and projectiles
self.PlayerObj = player
self.EntityObj = {}
for i in entity:
self.EntityObj[str(i.__name__).lower()] = i
# declare variables for later use
self.blocks, self.entities, self.particles, self.projectiles, self.melee, self.events, self.pathfinder_grid, self.pathfinder_blocks = [], [], [], [], [], [], [], []
self.now, self.prev_time, self.delta_time, self.cooldown, self.sidelength = 0, 0, 0, 0, 0
self.player, self.scene, self.pathfinder = None, None, None
self.run, self.click = True, False
self.load()
def load(self):
"""
loads the json of the respective floor and creates all the specified
block & entity classes and the player class
"""
# load environment/inventory json
self.envjson.load()
self.invjson.load()
self.sidelength = self.envjson["size"] * 16 * 2
self.player = self.PlayerObj(env=self, pos=(self.envjson["player"][0], self.envjson["player"][1]), health=self.invjson["health"], mana=self.invjson["mana"])
# read and convert blocks to Block()'s in list
blocks = list(self.envjson["blocks"])
"""
for i in range(self.envjson["size"] * 2):
for j in range(self.envjson["size"] * 2):
blocks.json[i][j] = random.choice([0, 2])
"""
for i in range(self.envjson["size"] * 2):
for j in range(self.envjson["size"] * 2):
if blocks[i][j] != 0:
x, y = j - self.envjson["size"], i - self.envjson["size"]
if x < 0 and y < 0:
pass
elif x > 0 and y < 0:
x += 1
elif x > 0 and y > 0:
x += 1
y += 1
elif x < 0 and y > 0:
y += 1
self.blocks.append(block.Block(blocks, (i, j), (x, y)))
# pathfinder
self.pathfinder_blocks = copy.deepcopy(self.envjson["blocks"])
for i in range(len(self.pathfinder_blocks)):
for j in range(len(self.pathfinder_blocks)):
if self.pathfinder_blocks[i][j] == 0:
self.pathfinder_blocks[i][j] = 1
elif self.pathfinder_blocks[i][j] != 0:
self.pathfinder_blocks[i][j] = 0
self.pathfinder_grid = Grid(matrix=self.pathfinder_blocks)
self.pathfinder = AStarFinder(diagonal_movement=DiagonalMovement.only_when_no_obstacle)
# read and convert entitys/projectiles to instances of their classes
for i in self.envjson["entities"]:
args = list(i)
args.pop(0)
self.entities.append(self.EntityObj[i[0]](env=self, args=args))
for i in self.envjson["projectiles"]:
args = list(i)
args.pop(0)
self.projectiles.append(self.items[i[0]][3](env=self, args=args))
# create scene and set camera target
self.scene = camera.Scene(self)
self.scene.camera.follow(target=self.player)
def save(self):
self.envjson["entities"] = []
self.envjson["projectiles"] = []
for i in self.entities:
args = i.save()
if args:
self.envjson["entities"].append([str(type(i).__name__).lower()] + args)
for i in self.projectiles:
args = i.save()
if args:
self.envjson["projectiles"].append([str(type(i).__name__).lower()] + args)
self.envjson["player"] = self.player.position
self.envjson.save()
self.hud.save_hotbar()
self.invjson["health"] = self.player.health
self.invjson["mana"] = self.player.mana
self.invjson.save()
def end_loop(self):
self.run = False
def _update(self):
# calculate delta time
self.now = time.time()
self.delta_time = self.now - self.prev_time
self.prev_time = self.now
# count down cooldown
if self.cooldown > 0:
self.cooldown -= self.delta_time
# update objects
self.mousepos = mp_scene(scene=self.scene)
self.scene.update()
self.hud.update()
# handle events
self.key = pygame.key.get_pressed()
self.events = list(pygame.event.get())
for event in self.events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_b:
if self.key[pygame.K_F3]:
var.soft_debug = not var.soft_debug
cout("soft_debug = " + str(var.soft_debug))
elif event.key == pygame.K_h:
if self.key[pygame.K_F3]:
var.hard_debug = not var.hard_debug
cout("hard_debug = " + str(var.hard_debug))
elif event.key == pygame.K_g:
if self.key[pygame.K_F3]:
var.render_all = not var.render_all
cout("render_all = " + str(var.render_all))
elif event.key == pygame.K_f:
if self.key[pygame.K_F3]:
var.fps_meter = not var.fps_meter
cout("fps_meter = " + str(var.fps_meter))
elif event.key == pygame.K_1:
self.hud.set_selectangle(0)
elif event.key == pygame.K_2:
self.hud.set_selectangle(1)
elif event.key == pygame.K_3:
self.hud.set_selectangle(2)
elif event.key == pygame.K_4:
self.hud.set_selectangle(3)
elif event.key == pygame.K_5:
self.hud.set_selectangle(4)
elif event.key == pygame.K_6:
self.hud.set_selectangle(5)
if event.type == pygame.MOUSEBUTTONDOWN:
if self.cooldown <= 0:
if event.button == pygame.BUTTON_LEFT:
# left mousebutton clicked
item = self.items[self.hud.hotbar[self.hud.slot][0]]
if item[3] is not None:
# this item object will append itself to the correct array and handle cooldown etc.
item[3](self)
elif event.button == pygame.BUTTON_RIGHT:
# right mousebutton clicked
item = self.items[self.hud.hotbar[self.hud.slot][0]]
if item[4] is not None:
# this item object will append itself to the correct array and handle cooldown etc.
item[4](self)
if event.button == pygame.BUTTON_WHEELUP:
self.hud.set_selectangle(self.hud.slot - 1)
elif event.button == pygame.BUTTON_WHEELDOWN:
self.hud.set_selectangle(self.hud.slot + 1)
def update(self):
pass
def _render(self):
"""
renders the gui and game surface
"""
if var.hard_debug:
surface = pygame.Surface(self.window.get_size())
surface.blit(pygame.transform.scale(img.misc["background"]["game"], self.window.get_size()), (0, 0))
self.scene.draw(surface)
render_text(window=surface, text=str(round(self.clock.get_fps())) + "", pos=(surface.get_width() - 60 , 20), color=var.WHITE, size=20)
else:
# background
x = self.player.hitbox.centerx % 255
y = self.player.hitbox.centery % 144
self.surface.blit(img.misc["background"]["game"], (255-x, 144-y))
self.surface.blit(img.misc["background"]["game"], (255-x, 144-y-144))
self.surface.blit(img.misc["background"]["game"], (255-x-255, 144-y))
self.surface.blit(img.misc["background"]["game"], (255-x-255, 144-y-144))
self.scene.draw(self.surface)
self.hud.draw(self.surface)
surface = pygame.transform.scale(self.surface, var.res_size)
self.window.blit(surface, (0, 0))
pygame.display.update()
def _single_loop(self):
"""
method performs a single iteration of the game loop. This can be overridden to add extra functionality before and
after the game loop and octagon. call _update() to perform a raw iteration and _render() to render stuff out
"""
self._update()
self.update()
self._render()
def start_loop(self):
self.prev_time = time.time()
self.run = True
while self.run:
self.clock.tick(var.FPS)
self._single_loop()
self.save()
| 4,003 | 5,784 | 23 |
70eff2f01152c525f8b8dc057859f9aef2a73800 | 1,273 | py | Python | setup.py | AT0myks/imbox | 6cdc0f01350a6c4b7721e0b9666cb9ce272f2282 | [
"MIT"
] | null | null | null | setup.py | AT0myks/imbox | 6cdc0f01350a6c4b7721e0b9666cb9ce272f2282 | [
"MIT"
] | null | null | null | setup.py | AT0myks/imbox | 6cdc0f01350a6c4b7721e0b9666cb9ce272f2282 | [
"MIT"
] | null | null | null | from setuptools import setup
import os
# Get version without importing, which avoids dependency issues
setup(
name='imbox',
version=get_version(),
description="Python IMAP for Human beings",
long_description=read('README.md'),
keywords='email, IMAP, parsing emails',
author='Martin Rusev',
author_email='martin@amon.cx',
url='https://github.com/martinrusev/imbox',
license='MIT',
packages=['imbox', 'imbox.vendors'],
package_dir={'imbox': 'imbox'},
install_requires=[
'chardet',
],
python_requires='>=3.3',
zip_safe=False,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite='tests',
)
| 28.288889 | 75 | 0.61194 | from setuptools import setup
import os
# Get version without importing, which avoids dependency issues
def get_version():
import re
with open('imbox/version.py') as version_file:
return re.search(r"""__version__\s+=\s+(['"])(?P<version>.+?)\1""",
version_file.read()).group('version')
def read(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
setup(
name='imbox',
version=get_version(),
description="Python IMAP for Human beings",
long_description=read('README.md'),
keywords='email, IMAP, parsing emails',
author='Martin Rusev',
author_email='martin@amon.cx',
url='https://github.com/martinrusev/imbox',
license='MIT',
packages=['imbox', 'imbox.vendors'],
package_dir={'imbox': 'imbox'},
install_requires=[
'chardet',
],
python_requires='>=3.3',
zip_safe=False,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite='tests',
)
| 294 | 0 | 45 |
023af08d13a34ed97c0a6e47836ce856fe50e698 | 1,666 | py | Python | decode.py | cardi/proofpoint-url-decoder | eef834b35faabfe3c8fdb856089151b4f7471143 | [
"CC0-1.0"
] | 12 | 2018-02-24T20:00:51.000Z | 2021-08-05T16:52:55.000Z | decode.py | cardi/proofpoint-url-decoder | eef834b35faabfe3c8fdb856089151b4f7471143 | [
"CC0-1.0"
] | 4 | 2016-02-17T02:58:03.000Z | 2020-05-05T06:35:50.000Z | decode.py | cardi/proofpoint-url-decoder | eef834b35faabfe3c8fdb856089151b4f7471143 | [
"CC0-1.0"
] | 2 | 2018-09-24T17:31:23.000Z | 2018-11-14T15:53:28.000Z | #!/usr/bin/env python3
#
# Written in 2016 by Calvin Ardi <calvin@isi.edu>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software.
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
#
"""This snippet prints out an unmodified proofpoint "protected" URL.
Usage:
./decode.py url
Args:
url: a proofpoint url (usually starts with urldefense.proofpoint.com)
Returns:
A decoded URL string.
"""
import sys
import urllib.request, urllib.parse, urllib.error
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('Usage: %s encoded_url' % sys.argv[0])
#
# proofpoint "protected" URLs take the form of:
#
# https://urldefense.proofpoint.com/v2/url?[params]
#
# where [params] is described below
#
# TODO decode parameters
#
# c := constant (per organization)
# d := constant (per organization)
# e := always empty?
# m := ?
# r := unique identifier tied to email address
# s := ?
# u := safe-encoded URL
#
# 'm' might be a hash of the URL or some metadata
# 's' might be a signature or checksum
#
query = urllib.parse.urlparse(sys.argv[1]).query
param = urllib.parse.parse_qs(query)
if 'u' not in param:
sys.exit('ERROR: check if URL is a proofpoint URL')
else:
u = (param['u'][0].replace('-', '%')
.replace('_', '/'))
url = urllib.parse.unquote(u)
print(url)
| 24.865672 | 78 | 0.655462 | #!/usr/bin/env python3
#
# Written in 2016 by Calvin Ardi <calvin@isi.edu>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software.
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
#
"""This snippet prints out an unmodified proofpoint "protected" URL.
Usage:
./decode.py url
Args:
url: a proofpoint url (usually starts with urldefense.proofpoint.com)
Returns:
A decoded URL string.
"""
import sys
import urllib.request, urllib.parse, urllib.error
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('Usage: %s encoded_url' % sys.argv[0])
#
# proofpoint "protected" URLs take the form of:
#
# https://urldefense.proofpoint.com/v2/url?[params]
#
# where [params] is described below
#
# TODO decode parameters
#
# c := constant (per organization)
# d := constant (per organization)
# e := always empty?
# m := ?
# r := unique identifier tied to email address
# s := ?
# u := safe-encoded URL
#
# 'm' might be a hash of the URL or some metadata
# 's' might be a signature or checksum
#
query = urllib.parse.urlparse(sys.argv[1]).query
param = urllib.parse.parse_qs(query)
if 'u' not in param:
sys.exit('ERROR: check if URL is a proofpoint URL')
else:
u = (param['u'][0].replace('-', '%')
.replace('_', '/'))
url = urllib.parse.unquote(u)
print(url)
| 0 | 0 | 0 |
b49fa7e2bb38a1cf88dd66da0115311861eadda9 | 6,341 | py | Python | pyibisami/tools/ami_generator.py | jdpatt/PyAMI | 82b4912496b7c5ca89b20974a5e93eddb2c519de | [
"BSD-2-Clause"
] | null | null | null | pyibisami/tools/ami_generator.py | jdpatt/PyAMI | 82b4912496b7c5ca89b20974a5e93eddb2c519de | [
"BSD-2-Clause"
] | 1 | 2019-03-20T01:33:20.000Z | 2019-04-12T00:22:02.000Z | pyibisami/tools/ami_generator.py | jdpatt/PyAMI | 82b4912496b7c5ca89b20974a5e93eddb2c519de | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python
"""
IBIS-AMI model source code, AMI file, and IBIS file configuration utility.
Original author: David Banas
Original date: February 26, 2016
This script gets called from a makefile, when any of the following need rebuilding:
* a C++ source code file
* a ``*.AMI`` file
* a ``*.IBS`` file
All three will be rebuilt.
(We rebuild all three, because it doesn't take very long, and we can
insure consistency this way.)
This gets triggered by one of two things:
#. The common model configuration information has changed, or
#. One of the EmPy template files was updated.
The idea, here, is that the ``*.IBS`` file, the ``*.AMI`` file, and the
C++ source file should be configured from a common model configuration
file, so as to ensure consistency between the three.
Copyright (c) 2016 David Banas; all rights reserved World wide.
"""
import importlib.util
from datetime import date
from pathlib import Path
import click
import em
param_types = {
"INT": {"c_type": "int", "ami_type": "Integer", "getter": "get_param_int"},
"FLOAT": {"c_type": "float", "ami_type": "Float", "getter": "get_param_float"},
"BOOL": {"c_type": "bool", "ami_type": "Boolean", "getter": "get_param_bool"},
"STRING": {"c_type": "char *", "ami_type": "String", "getter": "get_param_str"},
}
def print_param(indent, name, param):
"""
Print AMI parameter specification. Handle nested parameters, via recursion.
Args:
indent (str): String containing some number of spaces.
name (str): Parameter name.
param (dict): Dictionary containing parameter definition fields.
"""
print(indent, f"({name}")
if "subs" in param:
for key in param["subs"]:
print_param(indent + " ", key, param["subs"][key])
if "description" in param:
print(indent + " ", f"(Description {param['description']})")
else:
for (fld_name, fld_key) in [
("Usage", "usage"),
("Type", "type"),
("Format", "format"),
("Default", "default"),
("Description", "description"),
]:
# Trap the special cases.
if fld_name == "Type":
print(indent, " (Type", param_types[param["type"]]["ami_type"], ")")
elif fld_name == "Default":
if param["format"] == "Value":
pass
elif fld_name == "Format":
if param["format"] == "Value":
print(indent, " (Value", param["default"], ")")
elif param["format"] == "List":
print(indent, " (List", end=" ")
for item in param["values"]:
print(item, end=" ")
print(")")
print(indent, " (List_Tip", end=" ")
for item in param["labels"]:
print(item, end=" ")
print(")")
else:
print(indent, f" ({param['format']}", param["default"], param["min"], param["max"], ")")
# Execute the default action.
else:
print(indent, f" ({fld_name}", param[fld_key], ")")
print(indent, ")")
def print_code(pname, param):
"""
Print C++ code needed to query AMI parameter tree for a particular leaf.
Args:
pname (str): Parameter name.
param (dict): Dictionary containing parameter definition fields.
"""
print(" ", f'node_names.push_back("{pname}");')
if "subs" in param:
for key in param["subs"]:
print_code(key, param["subs"][key])
else:
if param["usage"] == "In" or param["usage"] == "InOut":
ptype = param["type"]
print(f" {param_types[ptype]['c_type']} {pname};")
if ptype == "BOOL":
print(f" {pname} = {param_types[ptype]['getter']}(node_names, {param['default'].lower()});")
else:
print(f" {pname} = {param_types[ptype]['getter']}(node_names, {param['default']});")
print(" ", "node_names.pop_back();")
def ami_generator(py_file):
"""Read in the ``py_file`` and cpp.em file then generate a ibis, ami and cpp."""
file_base_name = Path(py_file).stem
# Read model configuration information.
print(f"Reading model configuration information from file: {py_file}.")
spec = importlib.util.spec_from_file_location(file_base_name, py_file)
cfg = importlib.util.module_from_spec(spec)
spec.loader.exec_module(cfg)
# Configure the 3 files.
for ext in ["cpp", "ami", "ibs"]:
out_file = Path(py_file).with_suffix(f".{ext}")
if ext == "ami":
em_file = Path(__file__).parent.joinpath("generic.ami.em")
elif ext == "ibs":
em_file = Path(__file__).parent.joinpath("generic.ibs.em")
else:
em_file = out_file.with_suffix(".cpp.em")
print(f"Building '{out_file}' from '{em_file}'...")
with open(out_file, "w", encoding="UTF-8") as out_file:
interpreter = em.Interpreter(
output=out_file,
globals={
"ami_params": cfg.ami_params,
"ibis_params": cfg.ibis_params,
"param_types": param_types,
"model_name": cfg.kFileBaseName,
"description": cfg.kDescription,
"date": str(date.today()),
},
)
try:
with open(em_file, encoding="UTF-8") as em_file_object:
interpreter.file(em_file_object)
finally:
interpreter.shutdown()
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.argument("template", type=click.Path(exists=True, resolve_path=True))
@click.version_option()
def main(template):
"""Configure IBIS-AMI model C++ source code, IBIS model, and AMI file.
This command generates three files based off the input config file.
It expects a .cpp.em file to be located in the same directory so that it can
generate a cpp file from the config file and template file.
template: name of model configuration file (*.py)
"""
ami_generator(template)
| 36.234286 | 115 | 0.565211 | #! /usr/bin/env python
"""
IBIS-AMI model source code, AMI file, and IBIS file configuration utility.
Original author: David Banas
Original date: February 26, 2016
This script gets called from a makefile, when any of the following need rebuilding:
* a C++ source code file
* a ``*.AMI`` file
* a ``*.IBS`` file
All three will be rebuilt.
(We rebuild all three, because it doesn't take very long, and we can
insure consistency this way.)
This gets triggered by one of two things:
#. The common model configuration information has changed, or
#. One of the EmPy template files was updated.
The idea, here, is that the ``*.IBS`` file, the ``*.AMI`` file, and the
C++ source file should be configured from a common model configuration
file, so as to ensure consistency between the three.
Copyright (c) 2016 David Banas; all rights reserved World wide.
"""
import importlib.util
from datetime import date
from pathlib import Path
import click
import em
param_types = {
"INT": {"c_type": "int", "ami_type": "Integer", "getter": "get_param_int"},
"FLOAT": {"c_type": "float", "ami_type": "Float", "getter": "get_param_float"},
"BOOL": {"c_type": "bool", "ami_type": "Boolean", "getter": "get_param_bool"},
"STRING": {"c_type": "char *", "ami_type": "String", "getter": "get_param_str"},
}
def print_param(indent, name, param):
"""
Print AMI parameter specification. Handle nested parameters, via recursion.
Args:
indent (str): String containing some number of spaces.
name (str): Parameter name.
param (dict): Dictionary containing parameter definition fields.
"""
print(indent, f"({name}")
if "subs" in param:
for key in param["subs"]:
print_param(indent + " ", key, param["subs"][key])
if "description" in param:
print(indent + " ", f"(Description {param['description']})")
else:
for (fld_name, fld_key) in [
("Usage", "usage"),
("Type", "type"),
("Format", "format"),
("Default", "default"),
("Description", "description"),
]:
# Trap the special cases.
if fld_name == "Type":
print(indent, " (Type", param_types[param["type"]]["ami_type"], ")")
elif fld_name == "Default":
if param["format"] == "Value":
pass
elif fld_name == "Format":
if param["format"] == "Value":
print(indent, " (Value", param["default"], ")")
elif param["format"] == "List":
print(indent, " (List", end=" ")
for item in param["values"]:
print(item, end=" ")
print(")")
print(indent, " (List_Tip", end=" ")
for item in param["labels"]:
print(item, end=" ")
print(")")
else:
print(indent, f" ({param['format']}", param["default"], param["min"], param["max"], ")")
# Execute the default action.
else:
print(indent, f" ({fld_name}", param[fld_key], ")")
print(indent, ")")
def print_code(pname, param):
"""
Print C++ code needed to query AMI parameter tree for a particular leaf.
Args:
pname (str): Parameter name.
param (dict): Dictionary containing parameter definition fields.
"""
print(" ", f'node_names.push_back("{pname}");')
if "subs" in param:
for key in param["subs"]:
print_code(key, param["subs"][key])
else:
if param["usage"] == "In" or param["usage"] == "InOut":
ptype = param["type"]
print(f" {param_types[ptype]['c_type']} {pname};")
if ptype == "BOOL":
print(f" {pname} = {param_types[ptype]['getter']}(node_names, {param['default'].lower()});")
else:
print(f" {pname} = {param_types[ptype]['getter']}(node_names, {param['default']});")
print(" ", "node_names.pop_back();")
def ami_generator(py_file):
"""Read in the ``py_file`` and cpp.em file then generate a ibis, ami and cpp."""
file_base_name = Path(py_file).stem
# Read model configuration information.
print(f"Reading model configuration information from file: {py_file}.")
spec = importlib.util.spec_from_file_location(file_base_name, py_file)
cfg = importlib.util.module_from_spec(spec)
spec.loader.exec_module(cfg)
# Configure the 3 files.
for ext in ["cpp", "ami", "ibs"]:
out_file = Path(py_file).with_suffix(f".{ext}")
if ext == "ami":
em_file = Path(__file__).parent.joinpath("generic.ami.em")
elif ext == "ibs":
em_file = Path(__file__).parent.joinpath("generic.ibs.em")
else:
em_file = out_file.with_suffix(".cpp.em")
print(f"Building '{out_file}' from '{em_file}'...")
with open(out_file, "w", encoding="UTF-8") as out_file:
interpreter = em.Interpreter(
output=out_file,
globals={
"ami_params": cfg.ami_params,
"ibis_params": cfg.ibis_params,
"param_types": param_types,
"model_name": cfg.kFileBaseName,
"description": cfg.kDescription,
"date": str(date.today()),
},
)
try:
with open(em_file, encoding="UTF-8") as em_file_object:
interpreter.file(em_file_object)
finally:
interpreter.shutdown()
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.argument("template", type=click.Path(exists=True, resolve_path=True))
@click.version_option()
def main(template):
"""Configure IBIS-AMI model C++ source code, IBIS model, and AMI file.
This command generates three files based off the input config file.
It expects a .cpp.em file to be located in the same directory so that it can
generate a cpp file from the config file and template file.
template: name of model configuration file (*.py)
"""
ami_generator(template)
| 0 | 0 | 0 |
7fca02acbdaee874a9f10b153ace0e59eaf7cad1 | 1,272 | py | Python | namematcher/name.py | sansbacon/namematcher | 0f6c3327784d3db5be0f59853d11604fd802a7a0 | [
"MIT"
] | 1 | 2019-05-01T04:47:29.000Z | 2019-05-01T04:47:29.000Z | namematcher/name.py | sansbacon/playermatcher | 0f6c3327784d3db5be0f59853d11604fd802a7a0 | [
"MIT"
] | null | null | null | namematcher/name.py | sansbacon/playermatcher | 0f6c3327784d3db5be0f59853d11604fd802a7a0 | [
"MIT"
] | null | null | null | '''
name.py
Common name functions
'''
import logging
from nameparser import HumanName
logging.getLogger(__name__).addHandler(logging.NullHandler())
def first_last(name):
'''
Returns name in First Last format
Args:
name(str)
Returns:
str
'''
hn = HumanName(name)
return '{0} {1}'.format(hn.first, hn.last)
def first_last_pair(name):
'''
Returns name in First Last pair
Args:
name(str)
Returns:
tuple: of str
'''
hn = HumanName(name)
return (hn.first, hn.last)
def last_first(name):
'''
Returns name in Last, First format
Args:
name(str)
Returns:
str
'''
hn = HumanName(name)
return '{1}, {0}'.format(hn.first, hn.last)
def namestrip(nm, tostrip=None):
'''
Strips various characters out of name. Used for better matching.
Args:
nm(str):
tostrip(list): of str
Returns:
str
'''
if not tostrip:
tostrip = ['Jr.', 'III', 'IV', 'II', "'", '.', ', ', ',']
for char in tostrip:
nm = nm.replace(char, '')
if len(nm.split()) > 0 and nm.split()[-1] == 'V':
nm = ' '.join(nm.split()[:-1])
return nm.strip()
if __name__ == '__main__':
pass
| 15.512195 | 68 | 0.543239 | '''
name.py
Common name functions
'''
import logging
from nameparser import HumanName
logging.getLogger(__name__).addHandler(logging.NullHandler())
def first_last(name):
'''
Returns name in First Last format
Args:
name(str)
Returns:
str
'''
hn = HumanName(name)
return '{0} {1}'.format(hn.first, hn.last)
def first_last_pair(name):
'''
Returns name in First Last pair
Args:
name(str)
Returns:
tuple: of str
'''
hn = HumanName(name)
return (hn.first, hn.last)
def last_first(name):
'''
Returns name in Last, First format
Args:
name(str)
Returns:
str
'''
hn = HumanName(name)
return '{1}, {0}'.format(hn.first, hn.last)
def namestrip(nm, tostrip=None):
'''
Strips various characters out of name. Used for better matching.
Args:
nm(str):
tostrip(list): of str
Returns:
str
'''
if not tostrip:
tostrip = ['Jr.', 'III', 'IV', 'II', "'", '.', ', ', ',']
for char in tostrip:
nm = nm.replace(char, '')
if len(nm.split()) > 0 and nm.split()[-1] == 'V':
nm = ' '.join(nm.split()[:-1])
return nm.strip()
if __name__ == '__main__':
pass
| 0 | 0 | 0 |
29822bb68190a23a133bc28ddc7ced8fd2e1380e | 1,175 | py | Python | setup.py | niall-twomey/recommender_metrics | a195ffbfdfb261bb973479a58123c0cc9ae2ae4c | [
"MIT"
] | 1 | 2021-05-19T18:10:23.000Z | 2021-05-19T18:10:23.000Z | setup.py | niall-twomey/recommender_metrics | a195ffbfdfb261bb973479a58123c0cc9ae2ae4c | [
"MIT"
] | null | null | null | setup.py | niall-twomey/recommender_metrics | a195ffbfdfb261bb973479a58123c0cc9ae2ae4c | [
"MIT"
] | null | null | null | from distutils.util import convert_path
import setuptools
# Load the readme
with open("README.md", "r") as fh:
long_description = fh.read()
# Load the version info
version_namespace = {}
ver_path = convert_path("recommender_metrics/version.py")
with open(ver_path) as ver_file:
exec(ver_file.read(), version_namespace)
# Execute the setup
setuptools.setup(
name="recommender-metrics",
version=version_namespace["__version__"],
author="Niall Twomey",
author_email="twomeynj@gmail.com",
description="Recommender metric evaluation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/niall-twomey/recommender_metrics",
install_requires=["numpy", "scikit-learn", "tqdm"],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires=">=3.6", # Hasn't been tested below this
test_suite="nose.collector",
tests_require=["nose"],
)
| 31.756757 | 69 | 0.704681 | from distutils.util import convert_path
import setuptools
# Load the readme
with open("README.md", "r") as fh:
long_description = fh.read()
# Load the version info
version_namespace = {}
ver_path = convert_path("recommender_metrics/version.py")
with open(ver_path) as ver_file:
exec(ver_file.read(), version_namespace)
# Execute the setup
setuptools.setup(
name="recommender-metrics",
version=version_namespace["__version__"],
author="Niall Twomey",
author_email="twomeynj@gmail.com",
description="Recommender metric evaluation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/niall-twomey/recommender_metrics",
install_requires=["numpy", "scikit-learn", "tqdm"],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires=">=3.6", # Hasn't been tested below this
test_suite="nose.collector",
tests_require=["nose"],
)
| 0 | 0 | 0 |
bc2a45af624278e403bb3fb45fea1f5191ff6ddc | 5,417 | py | Python | renormalizer/spectra/finitet.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | renormalizer/spectra/finitet.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | renormalizer/spectra/finitet.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Author: Jiajun Ren <jiajunren0522@gmail.com>
import numpy as np
from renormalizer.mps import Mpo, MpDm, ThermalProp
from renormalizer.spectra.base import SpectraTdMpsJobBase
from renormalizer.mps.mps import BraKetPair
from renormalizer.utils import CompressConfig, EvolveConfig
import os
import logging
logger = logging.getLogger(__name__)
| 35.874172 | 115 | 0.634484 | # -*- coding: utf-8 -*-
# Author: Jiajun Ren <jiajunren0522@gmail.com>
import numpy as np
from renormalizer.mps import Mpo, MpDm, ThermalProp
from renormalizer.spectra.base import SpectraTdMpsJobBase
from renormalizer.mps.mps import BraKetPair
from renormalizer.utils import CompressConfig, EvolveConfig
import os
import logging
logger = logging.getLogger(__name__)
class BraKetPairEmiFiniteT(BraKetPair):
def calc_ft(self):
return np.conj(super(BraKetPairEmiFiniteT, self).calc_ft())
class BraKetPairAbsFiniteT(BraKetPair):
pass
class SpectraFiniteT(SpectraTdMpsJobBase):
def __init__(
self,
mol_list,
spectratype,
temperature,
insteps,
offset,
evolve_config=None,
icompress_config=None,
ievolve_config=None,
gs_shift=0,
dump_dir: str=None,
job_name=None,
):
self.temperature = temperature
self.insteps = insteps
self.gs_shift = gs_shift
self.icompress_config = icompress_config
self.ievolve_config = ievolve_config
if self.icompress_config is None:
self.icompress_config = CompressConfig()
if self.ievolve_config is None:
self.ievolve_config = EvolveConfig()
self.dump_dir = dump_dir
self.job_name = job_name
super(SpectraFiniteT, self).__init__(
mol_list,
spectratype,
temperature,
evolve_config=evolve_config,
offset=offset,
dump_dir=dump_dir,
job_name=job_name
)
def init_mps(self):
if self.spectratype == "emi":
return self.init_mps_emi()
else:
return self.init_mps_abs()
def init_mps_emi(self):
dipole_mpo = Mpo.onsite(self.mol_list, "a", dipole=True)
i_mpo = MpDm.max_entangled_ex(self.mol_list)
i_mpo.compress_config = self.icompress_config
if self.job_name is None:
job_name = None
else:
job_name = self.job_name + "_thermal_prop"
# only propagate half beta
tp = ThermalProp(
i_mpo, self.h_mpo, evolve_config=self.ievolve_config,
dump_dir=self.dump_dir, job_name=job_name
)
if tp._defined_output_path:
try:
logger.info(
f"load density matrix from {self._thermal_dump_path}"
)
ket_mpo = MpDm.load(self.mol_list, self._thermal_dump_path)
logger.info(f"density matrix loaded:{ket_mpo}")
except FileNotFoundError:
logger.debug(f"no file found in {self._thermal_dump_path}")
tp.evolve(None, self.insteps, self.temperature.to_beta() / 2j)
ket_mpo = tp.latest_mps
ket_mpo.dump(self._thermal_dump_path)
else:
tp.evolve(None, self.insteps, self.temperature.to_beta() / 2j)
ket_mpo = tp.latest_mps
ket_mpo.evolve_config = self.evolve_config
# e^{\-beta H/2} \Psi
dipole_mpo_dagger = dipole_mpo.conj_trans()
dipole_mpo_dagger.build_empty_qn()
a_ket_mpo = ket_mpo.apply(dipole_mpo_dagger, canonicalise=True)
a_ket_mpo.canonical_normalize()
a_bra_mpo = a_ket_mpo.copy()
return BraKetPairEmiFiniteT(a_bra_mpo, a_ket_mpo)
@property
def _thermal_dump_path(self):
assert self._defined_output_path
return os.path.join(self.dump_dir, self.job_name + "_impo.npz")
def get_dump_dict(self):
dump_dict = dict()
dump_dict['temperature'] = self.temperature.as_au()
dump_dict['time series'] = self.evolve_times
dump_dict['autocorr'] = self.autocorr
return dump_dict
def stop_evolve_criteria(self):
corr = self.autocorr
if len(corr) < 10:
return False
last_corr = corr[-10:]
first_corr = corr[0]
return np.abs(last_corr.mean()) < 1e-5 * np.abs(first_corr) and last_corr.std() < 1e-5 * np.abs(first_corr)
def init_mps_abs(self):
dipole_mpo = Mpo.onsite(self.mol_list, r"a^\dagger", dipole=True)
i_mpo = MpDm.max_entangled_gs(self.mol_list)
i_mpo.compress_config = self.icompress_config
beta = self.temperature.to_beta()
tp = ThermalProp(i_mpo, self.h_mpo, exact=True, space="GS")
tp.evolve(None, 1, beta / 2j)
ket_mpo = tp.latest_mps
ket_mpo.evolve_config = self.evolve_config
a_ket_mpo = dipole_mpo.apply(ket_mpo, canonicalise=True)
if self.evolve_config.is_tdvp:
a_ket_mpo = a_ket_mpo.expand_bond_dimension(self.h_mpo)
a_ket_mpo.canonical_normalize()
a_bra_mpo = a_ket_mpo.copy()
return BraKetPairAbsFiniteT(a_bra_mpo, a_ket_mpo)
def evolve_single_step(self, evolve_dt):
latest_bra_mpo, latest_ket_mpo = self.latest_mps
if len(self.evolve_times) % 2 == 1:
latest_ket_mpo = \
latest_ket_mpo.evolve_exact(self.h_mpo, -evolve_dt, "GS")
latest_ket_mpo = latest_ket_mpo.evolve(self.h_mpo, evolve_dt)
else:
latest_bra_mpo = \
latest_bra_mpo.evolve_exact(self.h_mpo, evolve_dt, "GS")
latest_bra_mpo = latest_bra_mpo.evolve(self.h_mpo, -evolve_dt)
return self.latest_mps.__class__(latest_bra_mpo, latest_ket_mpo)
| 4,655 | 295 | 95 |
5674fd5c9728b9f954621893c6f14f5e32ddb63a | 1,969 | py | Python | quakenet/data_io.py | AfricaMachineIntelligence/ConvNetQuake | 6fea487ce0031d31cc64ad14f9ffc6a0b088a723 | [
"MIT"
] | 162 | 2017-02-10T20:13:57.000Z | 2022-03-06T12:50:50.000Z | quakenet/data_io.py | VioletaSeo/ConvNetQuake | 9d8bb6d41e5e3185edf3a3fc716539b910e17cfe | [
"MIT"
] | 15 | 2017-05-25T03:58:35.000Z | 2020-03-12T18:39:10.000Z | quakenet/data_io.py | VioletaSeo/ConvNetQuake | 9d8bb6d41e5e3185edf3a3fc716539b910e17cfe | [
"MIT"
] | 108 | 2017-05-25T03:19:51.000Z | 2022-03-18T02:07:09.000Z | """Handle the raw data input/output and interface with external formats."""
from obspy.core import read
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
import datetime as dt
def load_stream(path):
"""Loads a Stream object from the file at path.
Args:
path: path to the input file, (for supported formats see,
http://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html)
Returns:
an obspy.core.Stream object
(http://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html#obspy.core.stream.Stream)
"""
stream = read(path)
stream.merge()
# assert len(stream) == 3 # We need X,Y,Z traces
return stream
def load_catalog(path):
"""Loads a event catalog from a .csv file.
Each row in the catalog references a know seismic event.
Args:
path: path to the input .csv file.
Returns:
catalog: A Pandas dataframe.
"""
catalog = pd.read_csv(path)
# Check if utc_timestamp exists, otherwise create it
if 'utc_timestamp' not in catalog.columns:
utc_timestamp = []
for e in catalog.origintime.values:
utc_timestamp.append(UTCDateTime(e).timestamp)
catalog['utc_timestamp'] = utc_timestamp
return catalog
| 28.536232 | 103 | 0.670391 | """Handle the raw data input/output and interface with external formats."""
from obspy.core import read
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
import datetime as dt
def load_stream(path):
"""Loads a Stream object from the file at path.
Args:
path: path to the input file, (for supported formats see,
http://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html)
Returns:
an obspy.core.Stream object
(http://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html#obspy.core.stream.Stream)
"""
stream = read(path)
stream.merge()
# assert len(stream) == 3 # We need X,Y,Z traces
return stream
def load_catalog(path):
"""Loads a event catalog from a .csv file.
Each row in the catalog references a know seismic event.
Args:
path: path to the input .csv file.
Returns:
catalog: A Pandas dataframe.
"""
catalog = pd.read_csv(path)
# Check if utc_timestamp exists, otherwise create it
if 'utc_timestamp' not in catalog.columns:
utc_timestamp = []
for e in catalog.origintime.values:
utc_timestamp.append(UTCDateTime(e).timestamp)
catalog['utc_timestamp'] = utc_timestamp
return catalog
def write_stream(stream, path):
stream.write(path, format='MSEED')
def write_catalog(events, path):
catalog = pd.DataFrame(
{'utc_timestamp': pd.Series([t.timestamp for t in events])})
catalog.to_csv(path)
def write_catalog_with_clusters(events, clusters, latitudes, longitudes, depths, path):
catalog = pd.DataFrame(
{'utc_timestamp': pd.Series([t for t in events]),
"cluster_id": pd.Series([cluster_id for cluster_id in clusters]),
"latitude": pd.Series([lat for lat in latitudes]),
"longitude": pd.Series([lon for lon in longitudes]),
"depth": pd.Series([d for d in depths])})
catalog.to_csv(path)
| 607 | 0 | 69 |
b9b359d27fc50e548e1458ba61b0b764c0deece5 | 1,350 | py | Python | datastructures/trees/binary_search_tree/bst_iterator.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 3 | 2017-05-02T10:28:13.000Z | 2019-02-06T09:10:11.000Z | datastructures/trees/binary_search_tree/bst_iterator.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2017-06-21T20:39:14.000Z | 2020-02-25T10:28:57.000Z | datastructures/trees/binary_search_tree/bst_iterator.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2016-07-29T04:35:22.000Z | 2017-01-18T17:05:36.000Z | from datastructures.stacks import Stack
from datastructures.trees.binary_tree_node import BinaryTreeNode
| 34.615385 | 108 | 0.663704 | from datastructures.stacks import Stack
from datastructures.trees.binary_tree_node import BinaryTreeNode
class BinarySearchTreeIterator:
def __init__(self, root: BinaryTreeNode):
self.root = root
self.stack = Stack()
self.__leftmost_inorder(root)
def __leftmost_inorder(self, root: BinaryTreeNode) -> None:
while root:
self.stack.push(root)
root = root.left
def next(self) -> int:
"""
Returns the next smallest number in a BST
"""
# this is the smallest element in the BST
topmost_node = self.stack.pop()
# if the node has a right child, call the helper function for the right child to
# get the next smallest item
# We don't need to check for the left child because of the way we have added nodes onto the stack.
# The topmost node either won't have a left child or would already have the left subtree processed.
# If it has a right child, then we call our helper function on the node's right child.
# This would comparatively be a costly operation depending upon the structure of the tree
if topmost_node.right:
self.__leftmost_inorder(topmost_node.right)
return topmost_node.data
def has_next(self) -> bool:
return self.stack.is_empty()
| 277 | 944 | 23 |
4d451e2d20fe2b297765f47c0f6c62e2ecc3467e | 4,987 | py | Python | tests/test_git.py | ChristopherMacGown/roundabout | b2222bb1aa98a1c39c26928dbcd9f1be114d02e4 | [
"Apache-2.0"
] | 3 | 2016-08-09T21:26:06.000Z | 2018-04-23T15:11:56.000Z | tests/test_git.py | ChristopherMacGown/roundabout | b2222bb1aa98a1c39c26928dbcd9f1be114d02e4 | [
"Apache-2.0"
] | 1 | 2020-10-23T20:49:22.000Z | 2020-10-23T20:49:22.000Z | tests/test_git.py | ChristopherMacGown/roundabout | b2222bb1aa98a1c39c26928dbcd9f1be114d02e4 | [
"Apache-2.0"
] | null | null | null | import os
import git
import time
import unittest
from roundabout.config import Config
from roundabout.git_client import Git, GitException
from tests import utils
| 35.119718 | 92 | 0.630038 | import os
import git
import time
import unittest
from roundabout.config import Config
from roundabout.git_client import Git, GitException
from tests import utils
def create_test_repo():
repo_path = utils.testdata('test_repo')
if not os.path.exists(repo_path):
repo = git.Repo.init(repo_path, mkdir=True)
with open(os.path.join(repo_path, "README"), "w") as fp:
fp.write("This is just test stuff")
repo.git.execute(("git", "add", "README"))
repo.git.execute(("git", "commit", "-m", "Test commit"))
return repo_path
class GitTestCase(utils.TestHelper):
def setUp(self):
self.t = time.time()
repo_path = create_test_repo()
config = Config(config_file=utils.testdata('good_git.cfg'))
config["git"]["base_repo_url"] = repo_path
remote_branch = config["test"]["remote_branch"]
remote_name = config["test"]["remote_name"]
remote_url = repo_path
self.repo = Git(remote_name=remote_name,
remote_url=remote_url,
remote_branch=remote_branch,
config=config)
def tearDown(self):
print "%s: %f" % (self.id(), time.time() - self.t)
def test_clone_repo_with_good_config(self):
self.assertTrue(self.repo)
def test_enter_repo_with_good_config(self):
self.repo.repo.create_head(self.repo.remote_branch)
self.assertTrue(self.repo.__enter__())
self.assertTrue(self.repo.branch('master').checkout())
self.assertFalse(self.repo.__exit__())
def test_clean_merge_with_good_config(self):
self.repo.repo.create_head(self.repo.remote_branch)
with self.repo as repo:
self.assertTrue(repo.merge('master'))
self.assertTrue(repo.branch('master').checkout())
def test_clean_squash_merge_with_good_Config(self):
branch = self.repo.remote_branch
self.repo.repo.create_head(branch)
self.repo.branch(branch).checkout()
curdir = os.getcwd()
os.chdir(self.repo.clonepath)
with open("testfile", "w") as test:
test.write("this is just a test")
self.repo.repo.git.execute(('git', 'add', 'testfile'))
self.repo.repo.git.execute(("git", "commit", "-m", "test_commit"))
self.repo.branch("master").checkout()
self.assertTrue(self.repo.merge(branch, squash=True))
os.chdir(curdir)
def test_clean_squash_merge_with_good_config_but_no_squash_message(self):
branch = self.repo.remote_branch
self.repo.repo.create_head(branch)
self.repo.branch(branch).checkout()
curdir = os.getcwd()
os.chdir(self.repo.clonepath)
with open("testfile", "w") as test:
test.write("this is just a test")
self.repo.repo.git.execute(('git', 'add', 'testfile'))
self.repo.repo.git.execute(("git", "commit", "-m", "test_commit"))
self.repo.branch("master").checkout()
self.repo.clonepath="/i/am/a/fake/path/"
self.assertTrue(self.repo.merge(branch, squash=True))
os.chdir(curdir)
def test_merge_fails_for_some_reason_should_raise(self):
class FakeGit(git.Repo):
""" A fake git class """
def execute(self, command):
""" No matter what, we raise a git.exc.GitCommandError """
raise git.exc.GitCommandError(command, -9999)
def reset(self, *args, **kwargs):
""" Pretend to reset a failed merge. """
pass
self.repo.repo.create_head(self.repo.remote_branch)
self.repo.repo.git = FakeGit()
self.assertRaises(GitException, self.repo.merge, "master")
try:
self.assertCalled(self.repo.repo.git.reset, self.repo.merge, "master")
except GitException, e:
pass
def test_push_with_good_config(self):
self.assertTrue(self.repo.push('master'))
self.assertTrue(self.repo.push('master', remote_branch='foo'))
def test_cleanup_master_raises(self):
self.repo.local_branch_name = 'master'
self.assertRaises(GitException, self.repo.cleanup)
def test_cleanup_with_os_error_raises(self):
self.repo.clonepath = "/this/path/doesn't/exist"
self.assertRaises(GitException, self.repo.cleanup)
def test_cleanup_with_good_config_doesnt_raise(self):
try:
self.repo.cleanup()
except GitException, e:
result = False
else:
result = True
self.assertTrue(result)
def test_clone_repo_with_bad_config(self):
config = Config(config_file=utils.testdata('bad_git.cfg'))
remote_name = config["test"]["remote_name"]
remote_url = config["test"]["remote_url"]
remote_branch = config["test"]["remote_branch"]
self.assertRaises(GitException, Git, remote_name, remote_url, remote_branch, config)
| 4,411 | 15 | 396 |
96b0b2385f546a67ce430aec7c5a1b3251ee87e9 | 277 | py | Python | c__84.py | fhansmann/coding-challenges | eebb37565c72e05b77383c24e8273a1e4019b58e | [
"MIT"
] | null | null | null | c__84.py | fhansmann/coding-challenges | eebb37565c72e05b77383c24e8273a1e4019b58e | [
"MIT"
] | null | null | null | c__84.py | fhansmann/coding-challenges | eebb37565c72e05b77383c24e8273a1e4019b58e | [
"MIT"
] | null | null | null | subjects=["I", "You"]
verbs=["Play", "Love"]
objects=["Hockey","Football"]
for i in range(len(subjects)):
for j in range(len(verbs)):
for k in range(len(objects)):
sentence = "%s %s %s." % (subjects[i], verbs[j], objects[k])
print(sentence)
| 30.777778 | 72 | 0.559567 | subjects=["I", "You"]
verbs=["Play", "Love"]
objects=["Hockey","Football"]
for i in range(len(subjects)):
for j in range(len(verbs)):
for k in range(len(objects)):
sentence = "%s %s %s." % (subjects[i], verbs[j], objects[k])
print(sentence)
| 0 | 0 | 0 |
444d27edb84e0134aecd2dd93a8734c3940cff4b | 26 | py | Python | compose/__init__.py | pareshmg/compose | cba758361499d74ef26bf281b73206e6dc12b5c9 | [
"Apache-2.0"
] | 2 | 2020-12-08T21:11:58.000Z | 2021-02-19T11:59:47.000Z | compose/__init__.py | pareshmg/compose | cba758361499d74ef26bf281b73206e6dc12b5c9 | [
"Apache-2.0"
] | 20 | 2020-09-07T16:12:31.000Z | 2022-03-29T22:05:14.000Z | compose/__init__.py | pareshmg/compose | cba758361499d74ef26bf281b73206e6dc12b5c9 | [
"Apache-2.0"
] | 1 | 2019-06-11T15:42:28.000Z | 2019-06-11T15:42:28.000Z | __version__ = '1.28.0dev'
| 13 | 25 | 0.692308 | __version__ = '1.28.0dev'
| 0 | 0 | 0 |
e59a74324e1f816e6c8f796417e7e26135c6bb8d | 6,746 | py | Python | examples/Python/XdmfExampleMap.py | scottwedge/xdmf | f41196c966997a20f60525a3d2083490a63626a3 | [
"BSD-3-Clause"
] | 4 | 2015-12-07T08:11:06.000Z | 2020-06-15T01:39:07.000Z | examples/Python/XdmfExampleMap.py | scottwedge/xdmf | f41196c966997a20f60525a3d2083490a63626a3 | [
"BSD-3-Clause"
] | 1 | 2020-04-26T16:50:37.000Z | 2020-04-26T16:50:37.000Z | examples/Python/XdmfExampleMap.py | scottwedge/xdmf | f41196c966997a20f60525a3d2083490a63626a3 | [
"BSD-3-Clause"
] | 4 | 2016-04-04T20:54:31.000Z | 2020-06-15T01:39:08.000Z | from Xdmf import *
if __name__ == "__main__":
#//initialization begin
exampleMap = XdmfMap.New()
#//initialization end
#//initializationnode begin
#create attributes for each task id
#the index of the node id in the attribute is the local node id
map1Attribute = XdmfAttribute.New()
map1Attribute.setName("Test Attribute")
map1Attribute.setType(XdmfAttributeType.Scalar())
map1Attribute.setCenter(XdmfAttributeCenter.Node())
map1Vals = [1,2,3,4,5,7,9]
map1Attribute.insertAsInt32(0, map1Vals)
map2Attribute = XdmfAttribute.New()
map2Attribute.setName("Test Attribute")
map2Attribute.setType(XdmfAttributeType.Scalar())
map2Attribute.setCenter(XdmfAttributeCenter.Node())
map2Vals = [9,8,7,4,3]
map2Attribute.insertAsInt32(0, map2Vals)
#insert the attributes into a vector
#the id of the attribute in the vector is equal to the task id
testVector = AttributeVector()
testVector.push_back(map1Attribute)
testVector.push_back(map2Attribute)
exampleMapVector = XdmfMap.New(testVector)
#returns a vector of maps that holds the equivalencies for the nodes provided
#for example if Attribute 1 had globalNodeID 3 at localNodeID 2
#and Attribute 3 had globalNodeID 3 at localNodeID 5
#then map 1 would have an entry of (3, 5, 2)
#and map 3 would have an entry of (1, 2, 5)
#The entries are formatted (remoteTaskID, remoteLocalNodeID, localNodeID)
#//initializationnode end
#//inserttuple begin
newRemoteTaskID = 4
newLocalNodeID = 7
newRemoteLocalNodeID = 3
exampleMap.insert(newRemoteTaskID, newLocalNodeID, newRemoteLocalNodeID)
#This inserts an entry of (4, 7, 3) into the map
#//inserttuple end
#//setMap begin
newTaskMap = XdmfMapMap()
newNodeIdMap = XdmfMapNodeIdMap()
newNodeIdMap[2] = (3, 6, 8)
newNodeIdMap[3] = (3,)
newNodeIdMap[4] = (7,9)
#newNodeIdMap now contains the following
#(2, 3)
#(2, 6)
#(2, 8)
#(3, 3)
#(4, 7)
#(4, 9)
secondNodeIdMap = XdmfMapNodeIdMap()
secondNodeIdMap[5] = (3, 6, 8)
secondNodeIdMap[7] = (3,)
secondNodeIdMap[9] = (7,9)
#secondNodeIdMap now contains the following
#(5, 3)
#(5, 6)
#(5, 8)
#(7, 3)
#(9, 7)
#(9, 9)
newTaskMap[1] = newNodeIdMap
newTaskMap[2] = secondNodeIdMap
exampleMap = XdmfMap.New()
exampleMap.setMap(newTaskMap)
#(1, 2, 3)
#(1, 2, 6)
#(1, 2, 8)
#(1, 3, 3)
#(1, 4, 7)
#(1, 4, 9)
#(2, 5, 3)
#(2, 5, 6)
#(2, 5, 8)
#(2, 7, 3)
#(2, 9, 7)
#(2, 9, 9)
#Is now stored in exampleMap
#//setMap end
#//setName begin
newName = "New Name"
exampleMap.setName(newName)
#//setName end
#//getName begin
exampleName = exampleMap.getName()
#//getName end
#//getMap begin
#Assuming that exampleMap is a shared pointer to an XdmfMap object filled with the following tuples
#(1, 1, 9)
#(1, 2, 8)
#(2, 3, 7)
#(2, 4, 6)
#(3, 5, 5)
#(3, 6, 4)
taskIDMap = exampleMap.getMap()
i = 0
for val in taskIDMap:
print val
i = i + 1
if i == taskIDMap.size():
break
#This prints out all the task IDs
#unless the break is called on the last iteration the program will fail because of an issue with SWIG
nodeIDMap = taskIDMap[1]
#nodeIDMap now contains the following tuples because it retrieved the tuples associated with taskID 1
#(1, 9)
#(2, 8)
i = 0
for val in nodeIDMap:
print val
i = i + 1
if i == nodeIDMap.size():
break
#This prints out all the local node IDs
#unless the break is called on the last iteration the program will fail because of an issue with SWIG
for val in nodeIDMap[1]:
print val
#prints out all the remote node values associated with taskID 1 and localNode 1
#//getMap end
#//getRemoteNodeIds begin
nodeIDMap = exampleMap.getRemoteNodeIds(1)
#nodeIDMap now contains the following tuples because it retrieved the tuples associated with taskID 1
#(1, 9)
#(2, 8)
i = 0
for val in nodeIDMap:
print val
i = i + 1
if i == nodeIDMap.size():
break
#This prints out all the local node IDs
#unless the break is called on the last iteration the program will fail because of an issue with SWIG
for val in nodeIDMap[1]:
print val
#prints out all the remote node values associated with taskID 1 and localNode 1
#//getRemoteNodeIds end
#//isInitialized begin
if not(exampleMap.isInitialized()):
exampleMap.read()
#//isInitialized end
#//release begin
exampleMap.release()
#//release end
#//setHeavyDataControllers begin
hdf5FilePath = "The HDF5 file path goes here"
hdf5SetPath = "The HDF5 set path goes here"
startIndex = 0#start at the beginning
readStride = 1#read all values
readNumber = 10#read 10 values
newRemoteTaskController = XdmfHDF5Controller.New(
hdf5FilePath, hdf5SetPath, XdmfArrayType.Int32(),
startIndex, readStride, readNumber)
hdf5FilePath = "The HDF5 file path for the local nodes goes here"
hdf5SetPath = "The HDF5 set path for the local nodes goes here"
newLocalNodeController = XdmfHDF5Controller.New(
hdf5FilePath, hdf5SetPath, XdmfArrayType.Int32(),
startIndex, readStride, readNumber)
hdf5FilePath = "The HDF5 file path for the remote local nodes goes here"
hdf5SetPath = "The HDF5 set path for the remote local nodes goes here"
newRemoteLocalNodeController = XdmfHDF5Controller.New(
hdf5FilePath, hdf5SetPath, XdmfArrayType.Int32(),
startIndex, readStride, readNumber)
exampleMap = XdmfMap.New()
exampleMap.setHeavyDataControllers(newRemoteTaskController, newLocalNodeController, newRemoteLocalNodeController)
#//setHeavyDataControllers end
| 32.747573 | 121 | 0.581085 | from Xdmf import *
if __name__ == "__main__":
#//initialization begin
exampleMap = XdmfMap.New()
#//initialization end
#//initializationnode begin
#create attributes for each task id
#the index of the node id in the attribute is the local node id
map1Attribute = XdmfAttribute.New()
map1Attribute.setName("Test Attribute")
map1Attribute.setType(XdmfAttributeType.Scalar())
map1Attribute.setCenter(XdmfAttributeCenter.Node())
map1Vals = [1,2,3,4,5,7,9]
map1Attribute.insertAsInt32(0, map1Vals)
map2Attribute = XdmfAttribute.New()
map2Attribute.setName("Test Attribute")
map2Attribute.setType(XdmfAttributeType.Scalar())
map2Attribute.setCenter(XdmfAttributeCenter.Node())
map2Vals = [9,8,7,4,3]
map2Attribute.insertAsInt32(0, map2Vals)
#insert the attributes into a vector
#the id of the attribute in the vector is equal to the task id
testVector = AttributeVector()
testVector.push_back(map1Attribute)
testVector.push_back(map2Attribute)
exampleMapVector = XdmfMap.New(testVector)
#returns a vector of maps that holds the equivalencies for the nodes provided
#for example if Attribute 1 had globalNodeID 3 at localNodeID 2
#and Attribute 3 had globalNodeID 3 at localNodeID 5
#then map 1 would have an entry of (3, 5, 2)
#and map 3 would have an entry of (1, 2, 5)
#The entries are formatted (remoteTaskID, remoteLocalNodeID, localNodeID)
#//initializationnode end
#//inserttuple begin
newRemoteTaskID = 4
newLocalNodeID = 7
newRemoteLocalNodeID = 3
exampleMap.insert(newRemoteTaskID, newLocalNodeID, newRemoteLocalNodeID)
#This inserts an entry of (4, 7, 3) into the map
#//inserttuple end
#//setMap begin
newTaskMap = XdmfMapMap()
newNodeIdMap = XdmfMapNodeIdMap()
newNodeIdMap[2] = (3, 6, 8)
newNodeIdMap[3] = (3,)
newNodeIdMap[4] = (7,9)
#newNodeIdMap now contains the following
#(2, 3)
#(2, 6)
#(2, 8)
#(3, 3)
#(4, 7)
#(4, 9)
secondNodeIdMap = XdmfMapNodeIdMap()
secondNodeIdMap[5] = (3, 6, 8)
secondNodeIdMap[7] = (3,)
secondNodeIdMap[9] = (7,9)
#secondNodeIdMap now contains the following
#(5, 3)
#(5, 6)
#(5, 8)
#(7, 3)
#(9, 7)
#(9, 9)
newTaskMap[1] = newNodeIdMap
newTaskMap[2] = secondNodeIdMap
exampleMap = XdmfMap.New()
exampleMap.setMap(newTaskMap)
#(1, 2, 3)
#(1, 2, 6)
#(1, 2, 8)
#(1, 3, 3)
#(1, 4, 7)
#(1, 4, 9)
#(2, 5, 3)
#(2, 5, 6)
#(2, 5, 8)
#(2, 7, 3)
#(2, 9, 7)
#(2, 9, 9)
#Is now stored in exampleMap
#//setMap end
#//setName begin
newName = "New Name"
exampleMap.setName(newName)
#//setName end
#//getName begin
exampleName = exampleMap.getName()
#//getName end
#//getMap begin
#Assuming that exampleMap is a shared pointer to an XdmfMap object filled with the following tuples
#(1, 1, 9)
#(1, 2, 8)
#(2, 3, 7)
#(2, 4, 6)
#(3, 5, 5)
#(3, 6, 4)
taskIDMap = exampleMap.getMap()
i = 0
for val in taskIDMap:
print val
i = i + 1
if i == taskIDMap.size():
break
#This prints out all the task IDs
#unless the break is called on the last iteration the program will fail because of an issue with SWIG
nodeIDMap = taskIDMap[1]
#nodeIDMap now contains the following tuples because it retrieved the tuples associated with taskID 1
#(1, 9)
#(2, 8)
i = 0
for val in nodeIDMap:
print val
i = i + 1
if i == nodeIDMap.size():
break
#This prints out all the local node IDs
#unless the break is called on the last iteration the program will fail because of an issue with SWIG
for val in nodeIDMap[1]:
print val
#prints out all the remote node values associated with taskID 1 and localNode 1
#//getMap end
#//getRemoteNodeIds begin
nodeIDMap = exampleMap.getRemoteNodeIds(1)
#nodeIDMap now contains the following tuples because it retrieved the tuples associated with taskID 1
#(1, 9)
#(2, 8)
i = 0
for val in nodeIDMap:
print val
i = i + 1
if i == nodeIDMap.size():
break
#This prints out all the local node IDs
#unless the break is called on the last iteration the program will fail because of an issue with SWIG
for val in nodeIDMap[1]:
print val
#prints out all the remote node values associated with taskID 1 and localNode 1
#//getRemoteNodeIds end
#//isInitialized begin
if not(exampleMap.isInitialized()):
exampleMap.read()
#//isInitialized end
#//release begin
exampleMap.release()
#//release end
#//setHeavyDataControllers begin
hdf5FilePath = "The HDF5 file path goes here"
hdf5SetPath = "The HDF5 set path goes here"
startIndex = 0#start at the beginning
readStride = 1#read all values
readNumber = 10#read 10 values
newRemoteTaskController = XdmfHDF5Controller.New(
hdf5FilePath, hdf5SetPath, XdmfArrayType.Int32(),
startIndex, readStride, readNumber)
hdf5FilePath = "The HDF5 file path for the local nodes goes here"
hdf5SetPath = "The HDF5 set path for the local nodes goes here"
newLocalNodeController = XdmfHDF5Controller.New(
hdf5FilePath, hdf5SetPath, XdmfArrayType.Int32(),
startIndex, readStride, readNumber)
hdf5FilePath = "The HDF5 file path for the remote local nodes goes here"
hdf5SetPath = "The HDF5 set path for the remote local nodes goes here"
newRemoteLocalNodeController = XdmfHDF5Controller.New(
hdf5FilePath, hdf5SetPath, XdmfArrayType.Int32(),
startIndex, readStride, readNumber)
exampleMap = XdmfMap.New()
exampleMap.setHeavyDataControllers(newRemoteTaskController, newLocalNodeController, newRemoteLocalNodeController)
#//setHeavyDataControllers end
| 0 | 0 | 0 |
6996451be4ca13631306c9b573c5f22692758516 | 68 | py | Python | examples/python/gpss/gpss_server.py | xueyubingsen/grpc | b156ebaa24a015402f588b235b79112dc230f6dd | [
"Apache-2.0"
] | null | null | null | examples/python/gpss/gpss_server.py | xueyubingsen/grpc | b156ebaa24a015402f588b235b79112dc230f6dd | [
"Apache-2.0"
] | null | null | null | examples/python/gpss/gpss_server.py | xueyubingsen/grpc | b156ebaa24a015402f588b235b79112dc230f6dd | [
"Apache-2.0"
] | null | null | null | import gpss_pb2_grpc
| 13.6 | 39 | 0.823529 | import gpss_pb2_grpc
class Gpss(gpss_pb2_grpc.GpssServicer):
def
| 0 | 24 | 23 |
2d6fb01b6e6bd5b2b3104188d493377d8ad5da52 | 1,203 | py | Python | functions/shared.py | Lynxtickler/rest-api-terraform | d8b8125de80228a4bee2d8ad4ff7593b77eff121 | [
"MIT"
] | null | null | null | functions/shared.py | Lynxtickler/rest-api-terraform | d8b8125de80228a4bee2d8ad4ff7593b77eff121 | [
"MIT"
] | 4 | 2021-10-16T06:37:28.000Z | 2022-01-05T19:49:44.000Z | functions/shared.py | Lynxtickler/rest-api-terraform | d8b8125de80228a4bee2d8ad4ff7593b77eff121 | [
"MIT"
] | 1 | 2021-11-22T14:24:17.000Z | 2021-11-22T14:24:17.000Z | import json
import traceback
import boto3
from boto3.dynamodb.conditions import Attr
TABLE_NAME = 'Quotes'
DAILY_RESOURCE_NAME = 'daily'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(TABLE_NAME)
| 27.340909 | 81 | 0.677473 | import json
import traceback
import boto3
from boto3.dynamodb.conditions import Attr
TABLE_NAME = 'Quotes'
DAILY_RESOURCE_NAME = 'daily'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(TABLE_NAME)
def response(code=200, headers=None, body='', encode=False):
if not headers:
headers = {'Content-Type': 'application/json'}
body = json.dumps(body)
return {
'statusCode': code,
'headers': headers,
'body': body,
'isBase64Encoded': encode
}
def create_error(code=400, message='Something went wrong.'):
return response(code, body={'code': code, "message": message})
def check_item_exists(item_id):
db_response = table.get_item(Key={'ID': item_id})
return ('Item' in db_response.keys())
def update_item(item_id, item, called_by_daily=False):
if ((item_id == DAILY_RESOURCE_NAME) and (not called_by_daily)):
return create_error(409, 'Resource is reserved.')
if isinstance(item, dict):
item_json = item
else:
item_json = json.loads(item)
item_json['ID'] = item_id
table.put_item(Item=item_json)
return response(code=201, body={'message': 'Resource updated successfully.'})
| 895 | 0 | 92 |
2647696ce205cc1642aeaa417b1a7a5955aaec8e | 4,474 | py | Python | Carteira.py | racoba/AppCarteiraPY | a95749ccbf453b40ecaccedba090b78a922cddc5 | [
"MIT"
] | null | null | null | Carteira.py | racoba/AppCarteiraPY | a95749ccbf453b40ecaccedba090b78a922cddc5 | [
"MIT"
] | null | null | null | Carteira.py | racoba/AppCarteiraPY | a95749ccbf453b40ecaccedba090b78a922cddc5 | [
"MIT"
] | null | null | null | import numpy as np;
import pandas as pd
import matplotlib.pyplot as plt;
from pandas_datareader import data as web
import yfinance as yf
yf.pdr_override()
import os
import dadosCart as c
somaPal=0
somaEu=0
rentTotal=0
decisao=1
while decisao!=5:
os.system('cls')
decisao = int(input('Digite o que você deseja fazer:\n1-Calcular carteira\n2-Acompanhamento ao vivo\n3-Evolução mensal da carteira\n4-Análise individual de ativos da carteira\n5-Sair\n'))
os.system('cls')
if decisao==1:
total = c.valorAcoes + c.valorFI + c.valorBDR + c.cartCaixa
print(
f"""O valor da sua carteira é de R${total} e ela atualmente está distribuida da seguinte forma:\n
Ações = {c.valorAcoes*100/total:.2f}% = R${c.valorAcoes}\n
Fundos Imobiliários = {c.valorFI*100/total:.2f}% = R${c.valorFI}\n
Internacional = {(c.valorBDR)*100/total:.2f}% = R${c.valorBDR}\n
Caixa = {c.cartCaixa*100/total:.2f}% = R${c.cartCaixa}\n\n
Além disso também possui R${c.cartCrypto} em Cryptomoedas\n"""
)
y=np.array([int(c.valorAcoes*100/total),int(c.valorFI*100/total),int((c.valorBDR)*100/total),int(c.cartCaixa*100/total)]);
mylabels = ['Ação','FII','Internacional','Caixa'];
myexplode = [0,0,0,0];
plt.pie(y,labels=mylabels,explode=myexplode,shadow=True);
plt.show();
elif decisao==2:
decisao2=1
while (decisao2):
decisao2 = int(input("Digite o que você deseja ver:\n1-Carteira Completa\n2-Ações\n3-FII\n4-BDR\n5-Sair\n"))
if decisao2==1: ativo = c.cartAcao + c.cartFI + c.cartBDR
elif decisao2==2: ativo = c.cartAcao
elif decisao2==3: ativo = c.cartFI
elif decisao2==4: ativo = c.cartBDR
elif decisao2==5:
os.system('cls')
break
os.system('cls')
for x in range(len(ativo)):
df = web.get_data_yahoo(ativo[x][0], start=c.today)["Adj Close"].to_frame()
mostra = str(df) + '@'
for y in range(mostra.index('@'),0,-1):
if mostra[y]==' ':
inicioNum=y+1
break
pal = float(mostra[inicioNum:mostra.index('@')])
somaPal += pal
somaEu += ativo[x][2]
print(f'\n{ativo[x][0].replace(".SA","")} {c.today[8:10]}/{c.today[5:7]}\nValor de fechamento do dia: {pal:.2f}\nPreço Médio de Aquisição: {ativo[x][2]}\nValor Investido: {ativo[x][2]*ativo[x][1]:.2f}\nValor Atual em Carteira: {(pal*ativo[x][1]):.2f}')
rent = (pal)/ativo[x][2]
if rent > 1:
print(f'Rentabilidade: \033[1;32m+{((rent-1)*100):.2f}%\033[0;0m\n')
elif rent < 1:
print(f'Rentabilidade: \033[1;31m{((rent-1)*100):.2f}%\033[0;0m\n')
elif rent ==1:
print(f'Rentabilidade: \033[1;37m{((rent-1)*100):.2f}%\033[0;0m\n')
print('-'*80+'\n')
print('Rentabilidade atual: ')
rentTotal = somaPal/somaEu
if rentTotal > 1:
print(f'\033[1;32m+{((rentTotal-1)*100):.2f}%\033[0;0m\n')
elif rentTotal < 1:
print(f'\033[1;31m{((rentTotal-1)*100):.2f}%\033[0;0m\n')
elif rentTotal ==1 :
print(f'\033[1;37m{((rentTotal-1)*100):.2f}%\033[0;0m\n')
elif decisao==3:
plt.plot(c.meses,c.valorCartMensal,'r-',)
plt.title('Evolução Mensal da Carteira')
plt.show()
elif decisao==4:
decisao2=1
while (decisao2):
decisao2 = int(input("Digite o que você deseja ver:\n1-Ações\n2-FII\n3-BDR\n4-Sair\n"))
if decisao2==1: ativo = c.cartAcao
elif decisao2==2: ativo = c.cartFI
elif decisao2==3: ativo = c.cartBDR
elif decisao2==4:
os.system('cls')
break
print("Qual ativo você deseja análisar?\n")
for x in range(len(ativo)):
print(f'{x+1}-{ativo[x][0].replace(".SA","")}\n')
menu = int(input())
df = web.get_data_yahoo(ativo[menu-1][0])["Close"]
df.plot()
plt.title("Histórico de fechamento (R$)")
plt.show()
| 38.568966 | 268 | 0.524139 | import numpy as np;
import pandas as pd
import matplotlib.pyplot as plt;
from pandas_datareader import data as web
import yfinance as yf
yf.pdr_override()
import os
import dadosCart as c
somaPal=0
somaEu=0
rentTotal=0
decisao=1
while decisao!=5:
os.system('cls')
decisao = int(input('Digite o que você deseja fazer:\n1-Calcular carteira\n2-Acompanhamento ao vivo\n3-Evolução mensal da carteira\n4-Análise individual de ativos da carteira\n5-Sair\n'))
os.system('cls')
if decisao==1:
total = c.valorAcoes + c.valorFI + c.valorBDR + c.cartCaixa
print(
f"""O valor da sua carteira é de R${total} e ela atualmente está distribuida da seguinte forma:\n
Ações = {c.valorAcoes*100/total:.2f}% = R${c.valorAcoes}\n
Fundos Imobiliários = {c.valorFI*100/total:.2f}% = R${c.valorFI}\n
Internacional = {(c.valorBDR)*100/total:.2f}% = R${c.valorBDR}\n
Caixa = {c.cartCaixa*100/total:.2f}% = R${c.cartCaixa}\n\n
Além disso também possui R${c.cartCrypto} em Cryptomoedas\n"""
)
y=np.array([int(c.valorAcoes*100/total),int(c.valorFI*100/total),int((c.valorBDR)*100/total),int(c.cartCaixa*100/total)]);
mylabels = ['Ação','FII','Internacional','Caixa'];
myexplode = [0,0,0,0];
plt.pie(y,labels=mylabels,explode=myexplode,shadow=True);
plt.show();
elif decisao==2:
decisao2=1
while (decisao2):
decisao2 = int(input("Digite o que você deseja ver:\n1-Carteira Completa\n2-Ações\n3-FII\n4-BDR\n5-Sair\n"))
if decisao2==1: ativo = c.cartAcao + c.cartFI + c.cartBDR
elif decisao2==2: ativo = c.cartAcao
elif decisao2==3: ativo = c.cartFI
elif decisao2==4: ativo = c.cartBDR
elif decisao2==5:
os.system('cls')
break
os.system('cls')
for x in range(len(ativo)):
df = web.get_data_yahoo(ativo[x][0], start=c.today)["Adj Close"].to_frame()
mostra = str(df) + '@'
for y in range(mostra.index('@'),0,-1):
if mostra[y]==' ':
inicioNum=y+1
break
pal = float(mostra[inicioNum:mostra.index('@')])
somaPal += pal
somaEu += ativo[x][2]
print(f'\n{ativo[x][0].replace(".SA","")} {c.today[8:10]}/{c.today[5:7]}\nValor de fechamento do dia: {pal:.2f}\nPreço Médio de Aquisição: {ativo[x][2]}\nValor Investido: {ativo[x][2]*ativo[x][1]:.2f}\nValor Atual em Carteira: {(pal*ativo[x][1]):.2f}')
rent = (pal)/ativo[x][2]
if rent > 1:
print(f'Rentabilidade: \033[1;32m+{((rent-1)*100):.2f}%\033[0;0m\n')
elif rent < 1:
print(f'Rentabilidade: \033[1;31m{((rent-1)*100):.2f}%\033[0;0m\n')
elif rent ==1:
print(f'Rentabilidade: \033[1;37m{((rent-1)*100):.2f}%\033[0;0m\n')
print('-'*80+'\n')
print('Rentabilidade atual: ')
rentTotal = somaPal/somaEu
if rentTotal > 1:
print(f'\033[1;32m+{((rentTotal-1)*100):.2f}%\033[0;0m\n')
elif rentTotal < 1:
print(f'\033[1;31m{((rentTotal-1)*100):.2f}%\033[0;0m\n')
elif rentTotal ==1 :
print(f'\033[1;37m{((rentTotal-1)*100):.2f}%\033[0;0m\n')
elif decisao==3:
plt.plot(c.meses,c.valorCartMensal,'r-',)
plt.title('Evolução Mensal da Carteira')
plt.show()
elif decisao==4:
decisao2=1
while (decisao2):
decisao2 = int(input("Digite o que você deseja ver:\n1-Ações\n2-FII\n3-BDR\n4-Sair\n"))
if decisao2==1: ativo = c.cartAcao
elif decisao2==2: ativo = c.cartFI
elif decisao2==3: ativo = c.cartBDR
elif decisao2==4:
os.system('cls')
break
print("Qual ativo você deseja análisar?\n")
for x in range(len(ativo)):
print(f'{x+1}-{ativo[x][0].replace(".SA","")}\n')
menu = int(input())
df = web.get_data_yahoo(ativo[menu-1][0])["Close"]
df.plot()
plt.title("Histórico de fechamento (R$)")
plt.show()
| 0 | 0 | 0 |
5cd05fd5f965eaf7a1a4a0b1f9bcc7674c613cb6 | 136 | py | Python | Ago-Dic-2019/JOSE ONOFRE/PRACTICAS/Practica1/Names.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2019/JOSE ONOFRE/PRACTICAS/Practica1/Names.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2019/JOSE ONOFRE/PRACTICAS/Practica1/Names.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | names = ['Juan','Pedro','Alejandro','Roberto','Enrique']
print(names[0])
print(names[1])
print(names[2])
print(names[3])
print(names[4]) | 22.666667 | 56 | 0.676471 | names = ['Juan','Pedro','Alejandro','Roberto','Enrique']
print(names[0])
print(names[1])
print(names[2])
print(names[3])
print(names[4]) | 0 | 0 | 0 |