content
stringlengths 5
1.05M
|
|---|
import pandas as pd
from Models import User
from Models import Results
from files_lib import SHA256, file_to_dict
from main import db, Submission
def add_test_flag():
r = Results(subject_type='Forensic',
task='Zad1',
flag=SHA256('CTF'))
db.session.add(r)
r = Results(subject_type='Forensic',
task='Zad2',
flag=SHA256('CTF2'))
db.session.add(r)
db.session.commit()
def insert_tasks_from_json():
""""""
data = file_to_dict(r'D:\PythonProjects\config_local.json')
tasks = data['tasks']
for subject_type in data['tasks']:
for task in data['tasks'][subject_type]:
HASH = SHA256(data['tasks'][subject_type][task])
the_task = Results.query.filter_by(
subject_type=subject_type,
task=task,
flag=HASH
).first()
if the_task is None:
print(f'Task {task} for subject type {subject_type} does not exists, inserting to database')
t = Results(
subject_type=subject_type,
task=task,
flag=HASH,
points=5
)
db.session.add(t)
db.session.commit()
else:
print(f'Task {task} for subject type {subject_type} already exists, skipping')
def add_users():
"""add users from csv file"""
df = pd.read_csv(r'D:\PythonProjects\Kwiatki.csv',sep=';')
#print(df.head())
#print(User.query.delete())
for i, row in df.iterrows():
user = User.query.filter_by(username=row.username).first()
if user is None:
print(f'User {row.username} does not exist, creating')
u = User(
username=row.username,
password=SHA256(row.password),
faction=row.faction,
apikey=row.apikey
)
db.session.add(u)
db.session.commit()
else:
print(f'User {row.username} already exists, skipping')
def add_test_users():
for i in range(1,6):
username = 'test_user_{}'.format(i)
password = 'test_user_PASS'.format(i)
user = User.query.filter_by(username=username).first()
if user is None:
print(f'User {username} does not exist, creating')
u = User(
username=username,
password=SHA256(password),
faction='STOKROTKA',
apikey='no apikey for test users'
)
db.session.add(u)
db.session.commit()
else:
print(f'User {username} already exists, skipping')
def remove_test_users(db):
for i in range(1, 6):
username = 'test_user_{}'.format(i)
user = User.query.filter_by(username=username).first()
if user is None:
print(f'Test user {username} does not exist, skipping')
else:
print(f'Test user {username} exist, removing from database')
u = User.query.filter_by(username=username).delete()
db.session.commit()
add_test_users()
|
from nltk.tokenize import word_tokenize
import numpy as np
class windowed_data():
def __init__(self, sentences, labels, wlen, batchsize, encoder):
self.sentences = sentences
self.labels = labels
self.wlen = wlen
self.batchsize = batchsize
self.encoder = encoder
authors = set(labels)
self.key = {k: v for k, v in zip(authors, range(len(authors)))}
self.senbatchqueue = []
self.labbatchqueue = []
self.generator = self.get_next_sentence_gen_()
def get_next_sentence_gen_(self):
for s, l in zip(self.sentences, self.labels):
yield self.tokenize_(s), l
raise StopIteration('No more sentences to offer')
@staticmethod
def tokenize_(sen):
return word_tokenize(sen.lower())
def __call__(self):
while(len(self.senbatchqueue) < self.batchsize):
sen, lab = next(self.generator)
encs = [self.encoder[t] for t in sen]
for i in range(len(encs) + 1 - self.wlen):
self.senbatchqueue.append(encs[i:i + self.wlen])
self.labbatchqueue.append(lab)
senbatch = self.senbatchqueue[0:5]
self.senbatchqueue = self.senbatchqueue[5:]
labbatch = self.labbatchqueue[0:5]
self.labbatchqueue = self.labbatchqueue[5:]
return {'windows': np.array(senbatch)}, [self.key[l] for l in labbatch]
a = None
if __name__ == '__main__':
sens = ['a b a a b b c c a', 'a a a b c c a']
labs = ['d', 'e']
enc = {'a': 1, 'b': 2, 'c': 3}
a = windowed_data(sens, labs, 3, 5, enc)
|
# import libraries
from math import pi, sin
import matplotlib.pyplot as plt
from openmdao.core.component import Component
class TubeCost(Component):
def __init__(self):
super(TubeCost, self).__init__()
self.add_param('cmt', 1.2, desc='cost of materials', units='1/kg')
self.add_param('cship', 1.e11, desc='cost to ship and crew', units='')
self.add_param('cpod', 1.e6, desc='cost of a pod', units='')
self.add_param('bm', 20., desc='bond maturity')
self.add_param('ib', 0.06, desc='interest bond rate', units='deg')
self.add_param('stress', 2.0e8, desc='stress of steel', units='Pa')
self.add_param('sf', 5.0, desc='safety factor', units='')
self.add_param('depth', 10., desc='depth of tube', units='m')
self.add_param('density',
8050.0,
desc='density of steel',
units='kg/m**3')
self.add_param('g', 9.81, desc='gravity', units='m/s**2')
self.add_param('radius', 4.0, desc='tube radius', units='m')
self.add_param('pod_freq',
30.,
desc='seconds between departures',
units='s')
self.add_param('range', 4.1e6, desc='length of tube', units='m')
self.add_param('npax', 25., desc='passengers per pod', units='')
self.add_param('speed', 270., desc='passengers per pod', units='m/s')
self.add_output('tube_weight',
0.0,
desc='tube weight per meter',
units='kg/m')
self.add_output('po_tube',
0.0,
desc='pressure on the tube',
units='kg/m**2')
self.add_output('tube_thick', 0.0, desc='tube thickness', units='m')
self.add_output('ct', 0.0, desc='tube cost per meter', units='1/m')
self.add_output('cttot', 0.0, desc='total tube cost', units='')
self.add_output('npod',
0.0,
desc='number of pods in the tube',
units='')
self.add_output('ctick', 0.0, desc='ticket cost', units='1/m')
def solve_nonlinear(self, p, u, r):
u['po_tube'] = 101000 + p['depth'] * p['g'] * 1000.
u['tube_thick'] = p['sf'] * u['po_tube'] * p['radius'] / (2 *
p['stress'])
u['tube_weight'] = p['density'] * pi * u['tube_thick'] * (
2 * p['radius'] - u['tube_thick'])
u['ct'] = p['cmt'] * u['tube_weight']
u['cttot'] = u['ct'] * p['range']
u['npod'] = (p['range'] / p['speed']) / p['pod_freq']
u['ctick'] = ((u['ct']*p['range'] + p['cpod']*u['npod'] + p['cship'])*(1+p['ib'])) \
/ (p['npax']/p['pod_freq'])/p['bm']/365./24./3600.
if __name__ == '__main__':
from openmdao.core.problem import Problem
from openmdao.core.group import Group
p = Problem(root=Group())
p.root.add('cost', TubeCost())
p.setup()
p.run()
# save variable sweeps in arrays
cost_array = []
cx = []
cost_array2 = []
cx2 = []
print(p['cost.po_tube'])
print(p['cost.tube_thick'])
print(p['cost.ct'])
print(p['cost.ct'])
print(p['cost.npod'])
for i in xrange(1, 100, 1):
p['cost.pod_freq'] = i
p.run()
cost_array.append(p['cost.ctick'])
cx.append(i)
for i in xrange(10, 35, 1):
p['cost.pod_freq'] = 30.
p['cost.stress'] = i * 1.e7
p.run()
cost_array2.append(p['cost.ctick'])
cx2.append(i * 1.e7)
# plot variable sweeps
fig = plt.figure()
a1 = fig.add_subplot(211)
a1.plot(cx, cost_array)
plt.xlabel('seconds between departures')
plt.ylabel('ticket cost')
a2 = fig.add_subplot(212)
a2.plot(cx2, cost_array2)
plt.xlabel('steel strength (Pa)')
plt.ylabel('ticket price')
plt.show()
# Tom Gregory
# Cost of steel per ton is about USD 777 and fabrication +
# erection cost is about USD 266. But this is for industrial applications upto
# 14 meters height. This may be high for high rise buildings but fabrication
# and erection costs generally do not cross the supply cost.
# Supply AUD 2,500/tonne (~USD 1,821/ton)
# Shop Detailing AUD 500/tonne (~USD 364/ton)
# Fabrication AUD 3,000/tonne (~USD 2,185/ton)
# Transport AUD 150/tonne (~USD 109/ton)
# Erection Labour AUD 2,400/tonne (~USD 1,748/ton)
# Erection Plant AUD 1,200/tonne (~USD 874/ton)
# TOTAL AUD 9,750/tonne (~USD 5,339/ton)
# Steel:
# Another reference
# Tube cost = supply + transport + stir welding + erection
# cmtl = 700 + 150 + 140 +200 = 1200 $/(1000 kg)
# ship cost: w friction welding, handling,
|
from dataclasses import dataclass, field
from typing import Optional
from bindings.gmd.character_string_property_type import CharacterStringPropertyType
from bindings.gmd.md_classification_code_property_type import (
MdClassificationCodePropertyType,
)
from bindings.gmd.md_constraints_type import MdConstraintsType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class MdSecurityConstraintsType(MdConstraintsType):
"""
Handling restrictions imposed on the dataset because of national security,
privacy, or other concerns.
"""
class Meta:
name = "MD_SecurityConstraints_Type"
classification: Optional[MdClassificationCodePropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
user_note: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "userNote",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
classification_system: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "classificationSystem",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
handling_description: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "handlingDescription",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
|
import json
# Create your models here.
class TrafficReport(object):
"""A representation of a traffic report from http://www.internettrafficreport.com/details.htm as a python object. Generically,
speaking, it is a immutable wrapper around a list that only supports ReportEntry instances.
"""
def __init__(self):
self._entries = []
def __get_entries(self):
"""Calculates the 'entries' property."""
return self._entries
def ___get_entries(self):
"""Indirect accessor for 'entries' property."""
return self.__get_entries()
def __set_entries(self, entries):
"""Sets the 'entries' property."""
self._entries = entries
def ___set_entries(self, entries):
"""Indirect setter for 'entries' property."""
self.__set_entries(entries)
entries = property(___get_entries, ___set_entries,
doc="""Gets or sets the entries of the traffic report.""")
def __str__(self):
"""
Converts the report to a JSON string representation of the data. Here is an example:
[
{
"router": "misschaos.chaos-studio.com",
"location": "China (Shanghai)",
"index": 0,
"response_time": 0,
"packet_loss": 100,
"continent": "Asia"
},
{
"router": "cisco.syssrc.com",
"location": "Maryland",
"index": 88,
"response_time": 112,
"packet_loss": 0,
"continent": "North America"
},
etc ...
]
It does this by converting each ReportEntry instance to its entry.__dict__ and adding it to a list. This list of
primitives is then converted to json.
"""
primitives = []
for entry in self.entries:
primitives.append(entry.__dict__)
return json.dumps(primitives)
class ReportEntry(object):
"""An individual Entry used in Traffic Report. It consists of router, location, index, response time, packet loss, and
continent attributes which make up a single entry for a site in the report.
"""
def __init__(self):
self._router = None
self._location = None
self._index = None
self._response_time = None
self._packet_loss = None
self._continent = None
@classmethod
def create(cls, **kwargs):
retval = cls()
for prop in kwargs:
attr = '_' + prop
if hasattr(retval, attr):
setattr(retval, attr, kwargs[prop])
else:
raise AttributeErrror
@classmethod
def Builder(cls):
return ReportEntryBuilder()
def __get_router(self):
"""Calculates the 'router' property."""
return self._router
def ___get_router(self):
"""Indirect accessor for 'router' property."""
return self.__get_router()
def __set_router(self, router):
"""Sets the 'router' property."""
self._router = router
def ___set_router(self, router):
"""Indirect setter for 'router' property."""
self.__set_router(router)
router = property(___get_router, ___set_router,
doc="""Gets or sets the router of the report entry.""")
def __get_location(self):
"""Calculates the 'location' property."""
return self._location
def ___get_location(self):
"""Indirect accessor for 'location' property."""
return self.__get_location()
def __set_location(self, location):
"""Sets the 'location' property."""
self._location = location
def ___set_location(self, location):
"""Indirect setter for 'location' property."""
self.__set_location(location)
location = property(___get_location, ___set_location,
doc="""Gets or sets the location of the report entry.""")
def __get_index(self):
"""Calculates the 'index' property."""
return self._index
def ___get_index(self):
"""Indirect accessor for 'index' property."""
return self.__get_index()
def __set_index(self, index):
"""Sets the 'index' property."""
self._index = index
def ___set_index(self, index):
"""Indirect setter for 'index' property."""
self.__set_index(index)
index = property(___get_index, ___set_index,
doc="""Gets or sets the index of the report entry.""")
def __get_response_time(self):
"""Calculates the 'response_time' property."""
return self._response_time
def ___get_response_time(self):
"""Indirect accessor for 'response_time' property."""
return self.__get_response_time()
def __set_response_time(self, response_time):
"""Sets the 'response_time' property."""
self._response_time = response_time
def ___set_response_time(self, response_time):
"""Indirect setter for 'response_time' property."""
self.__set_response_time(response_time)
response_time = property(___get_response_time, ___set_response_time,
doc="""Gets or sets the response_time of the report entry.""")
def __get_packet_loss(self):
"""Calculates the 'packet_loss' property."""
return self._packet_loss
def ___get_packet_loss(self):
"""Indirect accessor for 'packet_loss' property."""
return self.__get_packet_loss()
def __set_packet_loss(self, packet_loss):
"""Sets the 'packet_loss' property."""
self._packet_loss = packet_loss
def ___set_packet_loss(self, packet_loss):
"""Indirect setter for 'packet_loss' property."""
self.__set_packet_loss(packet_loss)
packet_loss = property(___get_packet_loss, ___set_packet_loss,
doc="""Gets or sets the packet_loss of the report entry.""")
def __get_continent(self):
"""Calculates the 'continent' property."""
return self._continent
def ___get_continent(self):
"""Indirect accessor for 'continent' property."""
return self.__get_continent()
def __set_continent(self, continent):
"""Sets the 'continent' property."""
self._continent = continent
def ___set_continent(self, continent):
"""Indirect setter for 'continent' property."""
self.__set_continent(continent)
continent = property(___get_continent, ___set_continent,
doc="""Gets or sets the continent of the report entry.""")
class ReportEntryBuilder(object):
def __init__(self):
self.router = None
self.location = None
self.index = None
self.response_time = None
self.packet_loss = None
self.continent = None
def with_router(self, router):
self.router = router
return self
def with_location(self, location):
self.location = location
return self
def with_index(self, index):
if index is not None:
self.index = int(index)
return self
def with_response_time(self, response_time):
if response_time is not None:
self.response_time = int(response_time)
return self
def with_packet_loss(self, packet_loss):
if packet_loss is not None:
self.packet_loss = int(packet_loss)
return self
def with_continent(self, continent):
self.continent = continent
return self
def build(self):
retval = ReportEntry.create( router = self.router,
location = self.location,
index = self.index,
response_time = self.response_time,
packet_loss = self.packet_loss,
continent = self.continent )
return retval
|
import mock
from django.contrib import admin
from django.urls import reverse
from cms.models import PageContent
from cms.models.fields import PlaceholderRelationField
from djangocms_versioning import versionables
from djangocms_versioning.admin import VersionAdmin
from djangocms_versioning.constants import DRAFT, PUBLISHED
from djangocms_versioning.test_utils.factories import (
PageVersionFactory,
PlaceholderFactory,
)
from djangocms_moderation.monkeypatch import _is_placeholder_review_unlocked
from .utils.base import BaseTestCase, MockRequest
class VersionAdminMonkeypatchTestCase(BaseTestCase):
def setUp(self):
versionable = versionables.for_content(PageContent)
self.version_admin = VersionAdmin(
versionable.version_model_proxy, admin.AdminSite()
)
self.mock_request = MockRequest()
self.mock_request.user = self.user
super().setUp()
@mock.patch("djangocms_moderation.monkeypatch.is_obj_review_locked")
def test_get_edit_link(self, mock_is_obj_review_locked):
"""
VersionAdmin should call moderation's version of _get_edit_link
"""
pg1_version = PageVersionFactory(created_by=self.mock_request.user)
mock_is_obj_review_locked.return_value = True
edit_link = self.version_admin._get_edit_link(
pg1_version, self.mock_request, disabled=False
)
# We test that moderation check is called when getting an edit link
self.assertTrue(mock_is_obj_review_locked.called)
# Edit link is inactive as `mock_is_obj_review_locked` is True
self.assertIn("inactive", edit_link)
@mock.patch("djangocms_moderation.monkeypatch.is_registered_for_moderation")
@mock.patch("djangocms_moderation.monkeypatch.is_obj_review_locked")
def test_get_edit_link_not_moderation_registered(
self, mock_is_obj_review_locked, mock_is_registered_for_moderation
):
"""
VersionAdmin should *not* call moderation's version of _get_edit_link
"""
pg1_version = PageVersionFactory(created_by=self.mock_request.user)
mock_is_registered_for_moderation.return_value = False
mock_is_obj_review_locked.return_value = True
edit_link = self.version_admin._get_edit_link(
pg1_version, self.mock_request, disabled=False
)
# Edit link is not blanked out because moderation is not registered
self.assertTrue(mock_is_registered_for_moderation.called)
self.assertFalse(mock_is_obj_review_locked.called)
self.assertNotEqual(edit_link, "")
@mock.patch("djangocms_moderation.monkeypatch.is_obj_review_locked")
def test_get_archive_link(self, _mock):
"""
VersionAdmin should call moderation's version of _get_archive_link
"""
version = PageVersionFactory(state=DRAFT, created_by=self.user)
archive_url = reverse(
"admin:{app}_{model}version_archive".format(
app=version._meta.app_label, model=version.content._meta.model_name
),
args=(version.pk,),
)
_mock.return_value = True
archive_link = self.version_admin._get_archive_link(version, self.mock_request)
# We test that moderation check is called when getting an edit link
self.assertEqual(1, _mock.call_count)
# Edit link is inactive as `is_obj_review_locked` is True
self.assertIn("inactive", archive_link)
self.assertNotIn(archive_url, archive_link)
_mock.return_value = None
archive_link = self.version_admin._get_archive_link(version, self.mock_request)
# We test that moderation check is called when getting the link
self.assertEqual(2, _mock.call_count)
# Archive link is active there as `get_active_moderation_request` is None
self.assertNotIn("inactive", archive_link)
self.assertIn(archive_url, archive_link)
def test_get_state_actions(self):
"""
Make sure publish actions is not present, and moderation actions
were added
"""
actions = self.version_admin.get_state_actions()
action_names = [action.__name__ for action in actions]
self.assertIn("_get_moderation_link", action_names)
self.assertNotIn("_get_publish_link", action_names)
def test_get_moderation_link(self):
link = self.version_admin._get_moderation_link(
self.pg1_version, self.mock_request
)
self.assertIn(
"In collection "{} ({})"".format(
self.collection1.name, self.collection1.id
),
link,
)
version = PageVersionFactory(state=PUBLISHED)
link = self.version_admin._get_moderation_link(version, self.mock_request)
self.assertEqual("", link)
draft_version = PageVersionFactory(created_by=self.user3)
# Request has self.user, so the moderation link won't be displayed.
# This is version lock in place
link = self.version_admin._get_moderation_link(draft_version, self.mock_request)
self.assertEqual("", link)
draft_version = PageVersionFactory(created_by=self.mock_request.user)
# Now the version lock is lifted, so we should be able to add to moderation
link = self.version_admin._get_moderation_link(draft_version, self.mock_request)
self.assertIn("Submit for moderation", link)
@mock.patch("djangocms_moderation.monkeypatch.is_registered_for_moderation")
def test_get_moderation_link_when_not_registered(
self, mock_is_registered_for_moderation
):
mock_is_registered_for_moderation.return_value = False
link = self.version_admin._get_moderation_link(
self.pg1_version, self.mock_request
)
self.assertEqual("", link)
@mock.patch("djangocms_moderation.monkeypatch.is_registered_for_moderation", return_value=True)
def test_get_publish_link_when_registered(self, mock_is_registered_for_moderation):
link = self.version_admin._get_publish_link(self.pg1_version, self.mock_request)
self.assertEqual("", link)
@mock.patch("djangocms_moderation.monkeypatch.is_registered_for_moderation", return_value=False)
def test_get_publish_link_when_not_registered(self, mock_is_registered_for_moderation):
link = self.version_admin._get_publish_link(self.pg1_version, self.mock_request)
publish_url = reverse(
"admin:{app}_{model}version_publish".format(
app=self.pg1_version._meta.app_label,
model=self.pg1_version.content._meta.model_name,
),
args=(self.pg1_version.pk,),
)
self.assertNotEqual("", link)
self.assertIn(publish_url, link)
class PlaceholderChecksTestCase(BaseTestCase):
@mock.patch("djangocms_moderation.monkeypatch.is_registered_for_moderation")
@mock.patch("djangocms_moderation.monkeypatch.is_obj_review_locked")
def test_is_placeholder_review_unlocked(
self, mock_is_registered_for_moderation, mock_is_obj_review_locked
):
"""
Check that the monkeypatch returns expected value
"""
version = PageVersionFactory()
placeholder = PlaceholderFactory.create(source=version.content)
mock_is_registered_for_moderation.return_value = True
mock_is_obj_review_locked.return_value = True
self.assertFalse(_is_placeholder_review_unlocked(placeholder, self.user))
mock_is_registered_for_moderation.return_value = True
mock_is_obj_review_locked.return_value = False
self.assertTrue(_is_placeholder_review_unlocked(placeholder, self.user))
mock_is_registered_for_moderation.return_value = False
mock_is_obj_review_locked.return_value = True
self.assertTrue(_is_placeholder_review_unlocked(placeholder, self.user))
def test_function_added_to_checks_framework(self):
"""
Check that the method has been added to the checks framework
"""
self.assertIn(
_is_placeholder_review_unlocked, PlaceholderRelationField.default_checks
)
|
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'),\
'Please use TensorFlow version 1.0 or newer. You are using {}'.format(
tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn(
'No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder,
containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model
(image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
# 56 * 56 * 256 -> 28 * 28 * 256
vgg_layer3_out_tensor_name = 'layer3_out:0'
# 28 * 28 * 512 -> 14 * 14 * 512
vgg_layer4_out_tensor_name = 'layer4_out:0'
# 7 * 7 * 4096
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return image_input, keep_prob, layer3_out, layer4_out, layer7_out
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network.
Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# freeze pre-trained model
vgg_layer7_out = tf.stop_gradient(vgg_layer7_out)
vgg_layer4_out = tf.stop_gradient(vgg_layer4_out)
vgg_layer3_out = tf.stop_gradient(vgg_layer3_out)
# already do 1x1 conv
# kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=1e-3)
score_layer7 = tf.layers.conv2d(
vgg_layer7_out, num_classes, 1, strides=(1, 1), padding='same')
layer7x2 = tf.layers.conv2d_transpose(
score_layer7, num_classes, 4, strides=(2, 2), padding='same')
# scale layer4
scaled_layer4 = tf.multiply(vgg_layer4_out, 0.01, name='layer4_out_scaled')
score_layer4 = tf.layers.conv2d(
scaled_layer4, num_classes, 1, strides=(1, 1), padding='same')
fuse_layer4 = tf.add(layer7x2, score_layer4)
layer4x2 = tf.layers.conv2d_transpose(
fuse_layer4, num_classes, 4, strides=(2, 2), padding='same')
# scale layer3
scaled_layer3 = tf.multiply(vgg_layer3_out, 0.0001,
name='layer3_out_scaled')
score_layer3 = tf.layers.conv2d(
scaled_layer3, num_classes, 1, strides=(1, 1), padding='same')
fuse_layer3 = tf.add(layer4x2, score_layer3)
layer3x8 = tf.layers.conv2d_transpose(
fuse_layer3, num_classes, 16, strides=(8, 8), padding='same')
return layer3x8
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function, no reshape
logits = nn_last_layer
cross_entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=correct_label))
# regularization error
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
cross_entropy_loss = cross_entropy_loss # + 1e-3 * sum(reg_losses)
train_op =\
tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn,
train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data.
Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
lr = 0.005
for i in range(epochs):
decay_lr = lr * 0.8 ** (i / 1.0)
print("epoch {}: lr: {}".format(i, decay_lr))
for images, labels in get_batches_fn(batch_size):
# print("{}:{}".format(images.shape, labels.shape))
_, loss = sess.run(
[train_op, cross_entropy_loss],
feed_dict={input_image: images,
correct_label: labels,
keep_prob: 0.5,
learning_rate: lr})
print("loss: {}".format(loss))
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576)
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset
# instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
# parameters
epochs = 16
batch_size = 32
# writer = tf.summary.FileWriter("data/log/")
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(
os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# TODO: Build NN using load_vgg, layers, and optimize function
# the first two tensor is not useful for network construction
image_input, keep_prob, layer3_out, layer4_out, layer7_out =\
load_vgg(sess, vgg_path)
fc8_output = layers(layer3_out, layer4_out,
layer7_out, num_classes)
# build placeholders
# input_image = tf.placeholder(tf.float32, name='input_image')
correct_label = tf.placeholder(tf.float32, name='correct_label')
# keep_prob = tf.placeholder(tf.float32, name='keep_prob')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
logits, train_op, cross_entropy_loss =\
optimize(fc8_output, correct_label, learning_rate, num_classes)
# graph = tf.get_default_graph()
# print("saving graph")
# writer.add_graph(graph)
# TODO: Train NN using the train_nn function
sess.run(tf.global_variables_initializer())
train_nn(sess, epochs, batch_size, get_batches_fn,
train_op, cross_entropy_loss, image_input,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
# helper.save_inference_samples(runs_dir, data_dir, sess,
# image_shape, logits, keep_prob, input_image)
helper.save_inference_samples(
runs_dir, data_dir, sess,
image_shape, logits, keep_prob, image_input)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
|
# puzzle8a.py
def main():
instrs = read_file()
for i in range(len(instrs)):
instrs = read_file()
# Swap the "jmp" and "nop" operations for
# each of the possibilities until we find
# a case where an instruction is not re-executed.
if instrs[i]["oper"] in ["jmp", "nop"]:
instrs[i] = swap_oper(instrs[i])
else:
# Can skip these cases, since they will have
# no difference.
continue
soln_found = True
accum = 0
index = 0
while True:
try:
instr = instrs[index]
# If instruction already executed, exit loop.
if instr["executed"]:
soln_found = False
break
else:
# Set flag that this instruction has been executed.
instrs[index]["executed"] = True
if instr["oper"] == "nop":
index += 1
elif instr["oper"] == "jmp":
index += instr["incr"]
elif instr["oper"] == "acc":
accum += instr["incr"]
index += 1
else:
raise ValueError("Invalid instruction: " + str(instr["oper"]))
except Exception:
break
if soln_found:
break
print("Accumulator value: " + str(accum))
def swap_oper(instr):
if instr["oper"] == "jmp":
instr["oper"] = "nop"
else:
instr["oper"] = "jmp"
return instr
def read_file():
with open("input.txt", "r") as input_file:
lines = input_file.readlines()
items = []
index = 0
for line in lines:
elems = line.split(" ")
items.append({"idx": index, "oper": elems[0], "incr": int(elems[1]), "executed": False})
return items
if __name__ == "__main__":
main()
|
import unittest
import src.OptimalControl.DynamicProgramming.VectorCubicSpline as cubic
import numpy as np
from numpy.linalg import norm
class MyTestCase(unittest.TestCase):
def spline_test(self, point, a0, a1, a2, a3):
spline = cubic.VectorCubicSpline(a0, a1, a2, a3)
points_on_spline = []
for s in np.linspace(0, 1, 30):
points_on_spline.append(spline.get_point(s))
# Calculate minimum distance and point on the spline.
s, min_dist = spline.get_s_distance(point)
min_point = spline.get_point(s)
min_dist_recalculated = norm(point - min_point)
self.assertAlmostEqual(min_dist, min_dist_recalculated)
for point_on_spline in points_on_spline:
dist_to_spline = norm(point - point_on_spline)
self.assertTrue(dist_to_spline >= min_dist)
def test_spline_0(self):
self.spline_test(np.array([10, 10]), [3.5, 2.7], [2.3, 5], [-4.3, -2], [2.5, 3.8])
def test_spline_1(self):
self.spline_test(np.array([0, 0]), [3.5, 2.7], [2.3, 5], [-4.3, -2], [2.5, 3.8])
def test_spline_2(self):
self.spline_test(np.array([5, 5]), [3.5, 2.7], [2.3, 5], [-4.3, -2], [2.5, 3.8])
if __name__ == '__main__':
unittest.main()
|
import glob
import os
sources = glob.glob("src/*.asm")
out_dir = os.getcwd() + "/bin/"
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for src in sources:
name = os.path.splitext(os.path.basename(src))[0]
out = out_dir + name
o = out + ".o"
asm = "as -o " + o + " " + src
lnk = "gcc -o " + out + " " + o + " -no-pie"
print(asm + "\n")
os.system(asm)
print(lnk + "\n")
os.system(lnk)
os.remove(o)
binaries = glob.glob("bin/*")
for bin in binaries:
os.system("./" + bin)
|
from setuptools import setup
from ptulsconv import __author__, __license__, __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='ptulsconv',
version=__version__,
author=__author__,
description='Parse and convert Pro Tools text exports',
long_description_content_type="text/markdown",
long_description=long_description,
license=__license__,
url='https://github.com/iluvcapra/ptulsconv',
project_urls={
'Source':
'https://github.com/iluvcapra/ptulsconv',
'Issues':
'https://github.com/iluvcapra/ptulsconv/issues',
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio',
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Development Status :: 4 - Beta",
"Topic :: Text Processing :: Filters"],
packages=['ptulsconv'],
keywords='text-processing parsers film tv editing editorial',
install_requires=['parsimonious', 'tqdm', 'reportlab'],
package_data={
"ptulsconv": ["xslt/*.xsl"]
},
entry_points={
'console_scripts': [
'ptulsconv = ptulsconv.__main__:main'
]
}
)
|
class Stack(list):
"""docstring for Stack"""
def __init__(self, n):
super(Stack, self).__init__()
self.n = n
init_list = [0]*n
self.extend(init_list)
self.top = 0
def stack_empty(self):
if self.top == 0:
return True
else:
return False
def push(self, x):
self.top += 1
self[self.top] = x
def pop(self):
if self.stack_empty():
print "Stack underflow"
else:
self.top -= 1
return self[self.top+1]
# s = Stack(n=10)
# s.push(x=10)
# s.push(x=20)
# s.push(x=30)
# s.push(x=40)
# s.push(x=50)
# print s.pop()
# print s.pop()
|
from typing import Any, List, Literal, TypedDict
from .FHIR_code import FHIR_code
from .FHIR_dateTime import FHIR_dateTime
from .FHIR_decimal import FHIR_decimal
from .FHIR_Element import FHIR_Element
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_Meta import FHIR_Meta
from .FHIR_Narrative import FHIR_Narrative
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
from .FHIR_TestReport_Participant import FHIR_TestReport_Participant
from .FHIR_TestReport_Setup import FHIR_TestReport_Setup
from .FHIR_TestReport_Teardown import FHIR_TestReport_Teardown
from .FHIR_TestReport_Test import FHIR_TestReport_Test
from .FHIR_uri import FHIR_uri
# A summary of information based on the results of executing a TestScript.
FHIR_TestReport = TypedDict(
"FHIR_TestReport",
{
# This is a TestReport resource
"resourceType": Literal["TestReport"],
# The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes.
"id": FHIR_id,
# The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource.
"meta": FHIR_Meta,
# A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc.
"implicitRules": FHIR_uri,
# Extensions for implicitRules
"_implicitRules": FHIR_Element,
# The base language in which the resource is written.
"language": FHIR_code,
# Extensions for language
"_language": FHIR_Element,
# A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety.
"text": FHIR_Narrative,
# These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope.
"contained": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Identifier for the TestScript assigned for external purposes outside the context of FHIR.
"identifier": FHIR_Identifier,
# A free text natural language name identifying the executed TestScript.
"name": FHIR_string,
# Extensions for name
"_name": FHIR_Element,
# The current state of this test report.
"status": Literal[
"completed", "in-progress", "waiting", "stopped", "entered-in-error"
],
# Extensions for status
"_status": FHIR_Element,
# Ideally this is an absolute URL that is used to identify the version-specific TestScript that was executed, matching the `TestScript.url`.
"testScript": FHIR_Reference,
# The overall result from the execution of the TestScript.
"result": Literal["pass", "fail", "pending"],
# Extensions for result
"_result": FHIR_Element,
# The final score (percentage of tests passed) resulting from the execution of the TestScript.
"score": FHIR_decimal,
# Extensions for score
"_score": FHIR_Element,
# Name of the tester producing this report (Organization or individual).
"tester": FHIR_string,
# Extensions for tester
"_tester": FHIR_Element,
# When the TestScript was executed and this TestReport was generated.
"issued": FHIR_dateTime,
# Extensions for issued
"_issued": FHIR_Element,
# A participant in the test execution, either the execution engine, a client, or a server.
"participant": List[FHIR_TestReport_Participant],
# The results of the series of required setup operations before the tests were executed.
"setup": FHIR_TestReport_Setup,
# A test executed from the test script.
"test": List[FHIR_TestReport_Test],
# The results of the series of operations required to clean up after all the tests were executed (successfully or otherwise).
"teardown": FHIR_TestReport_Teardown,
},
total=False,
)
|
from django.db import models
from tests.example import managers as app_managers
from tests.example import querysets as app_queryset
class AfterSaveExampleModel(models.Model):
objects = app_managers.AfterSaveExampleModelManager.from_queryset(
app_queryset.AfterSaveExampleModelQuerySet
)()
test_text = models.TextField("Test text")
class Meta:
verbose_name = "After save example model"
verbose_name_plural = "After save example models"
def __str__(self):
return str(self.id)
|
from __future__ import absolute_import
import json
import os.path
from ocr4all.colors import ColorMap
from pkg_resources import resource_string
import numpy as np
from ocr4all_pixel_classifier.lib.pc_segmentation import RectSegment
from ocrd import Processor
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import (
MetadataItemType,
LabelsType, LabelType,
TextRegionType,
ImageRegionType,
NoiseRegionType,
CoordsType,
to_xml,
)
from ocrd_utils import (
assert_file_grp_cardinality,
getLogger,
make_file_id,
MIMETYPE_PAGE,
)
OCRD_TOOL = json.loads(resource_string(__name__, 'ocrd-tool.json').decode('utf8'))
TOOL = 'ocrd-pixelclassifier-segmentation'
FALLBACK_IMAGE_GRP = 'OCR-D-SEG-BLOCK'
def polygon_from_segment(segment: RectSegment):
from ocrd_utils import polygon_from_bbox
return polygon_from_bbox(segment.y_start, segment.x_start, segment.y_end, segment.x_end)
class PixelClassifierSegmentation(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super(PixelClassifierSegmentation, self).__init__(*args, **kwargs)
def process(self):
"""Performs segmentation on the input binary image
Produces a PageXML file as output.
"""
LOG = getLogger('processor.PixelClassifierSegmentation')
assert_file_grp_cardinality(self.input_file_grp, 1)
assert_file_grp_cardinality(self.output_file_grp, 1)
overwrite_regions = self.parameter['overwrite_regions']
xheight = self.parameter['xheight']
gpu_allow_growth = self.parameter['gpu_allow_growth']
resize_height = self.parameter['resize_height']
model = self.parameter['model']
if model == '__DEFAULT__':
from ocrd_pc_segmentation import DEFAULT_SEGMENTATION_MODEL_PATH
model = DEFAULT_SEGMENTATION_MODEL_PATH
elif model == '__LEGACY__':
from ocrd_pc_segmentation import LEGACY_SEGMENTATION_MODEL_PATH
model = LEGACY_SEGMENTATION_MODEL_PATH
page_grp = self.output_file_grp
for n, input_file in enumerate(self.input_files):
page_id = input_file.pageId or input_file.ID
LOG.info("INPUT FILE %i / %s", n, page_id)
pcgts = page_from_file(self.workspace.download_file(input_file))
metadata = pcgts.get_Metadata() # ensured by from_file()
metadata.add_MetadataItem(
MetadataItemType(type_="processingStep",
name=self.ocrd_tool['steps'][0],
value=TOOL,
Labels=[LabelsType(
externalModel="ocrd-tool",
externalId="parameters",
Label=[LabelType(type_=name,
value=self.parameter[name])
for name in self.parameter.keys()])]))
page = pcgts.get_Page()
if page.get_TextRegion():
if overwrite_regions:
LOG.info('removing existing TextRegions')
page.set_TextRegion([])
else:
LOG.warning('keeping existing TextRegions')
page.set_AdvertRegion([])
page.set_ChartRegion([])
page.set_ChemRegion([])
page.set_GraphicRegion([])
page.set_ImageRegion([])
page.set_LineDrawingRegion([])
page.set_MathsRegion([])
page.set_MusicRegion([])
page.set_NoiseRegion([])
page.set_SeparatorRegion([])
page.set_TableRegion([])
page.set_UnknownRegion([])
page_image, page_coords, _ = self.workspace.image_from_page(page, page_id)
# ensure the image doesn't have an alpha channel
if page_image.mode[-1] == "A":
page_image = page_image.convert(mode=page_image.mode[0:-1])
page_binary = page_image.convert(mode='1')
self._process_page(page, np.asarray(page_image), np.asarray(page_binary), page_coords, xheight, model,
gpu_allow_growth, resize_height)
file_id = make_file_id(input_file, self.output_file_grp)
self.workspace.add_file(
ID=file_id,
file_grp=page_grp,
pageId=input_file.pageId,
mimetype=MIMETYPE_PAGE,
local_filename=os.path.join(page_grp,
file_id + '.xml'),
content=to_xml(pcgts))
@staticmethod
def _process_page(page, page_image, page_binary, page_coords, xheight, model, gpu_allow_growth,
resize_height):
from ocr4all_pixel_classifier.lib.pc_segmentation import find_segments
from ocr4all_pixel_classifier.lib.predictor import PredictSettings, Predictor
from ocr4all_pixel_classifier.lib.dataset import SingleData
from ocr4all.colors import \
DEFAULT_COLOR_MAPPING, DEFAULT_LABELS_BY_NAME
from ocr4all_pixel_classifier.lib.dataset import prepare_images
image, binary = prepare_images(page_image, page_binary, target_line_height=8, line_height_px=xheight)
color_map = ColorMap(DEFAULT_COLOR_MAPPING)
labels_by_name = DEFAULT_LABELS_BY_NAME
data = SingleData(binary=binary, image=image, original_shape=binary.shape, line_height_px=xheight)
settings = PredictSettings(
network=os.path.abspath(model),
high_res_output=True,
color_map=color_map,
n_classes=len(DEFAULT_COLOR_MAPPING),
gpu_allow_growth=gpu_allow_growth,
)
predictor = Predictor(settings)
masks = predictor.predict_masks(data)
orig_height, orig_width = page_image.shape[0:2]
mask_image = masks.inverted_overlay
segments_text, segments_image = find_segments(orig_height, mask_image, xheight,
resize_height, labels_by_name)
def add_region(region: RectSegment, index: int, region_type: str):
from ocrd_utils import coordinates_for_segment, points_from_polygon
polygon = polygon_from_segment(region)
polygon = coordinates_for_segment(polygon, page_image, page_coords)
points = points_from_polygon(polygon)
indexed_id = "region%04d" % index
coords = CoordsType(points=points)
if region_type == "text":
page.add_TextRegion(TextRegionType(id=indexed_id, Coords=coords))
elif region_type == "image":
page.add_ImageRegion(ImageRegionType(id=indexed_id, Coords=coords))
else:
page.add_NoiseRegion(NoiseRegionType(id=indexed_id, Coords=coords))
count = 0
for r in segments_text:
add_region(r, count, "text")
count += 1
for r in segments_image:
add_region(r, count, "image")
count += 1
|
#declaretion
Data=[];data1=[];data2=[];x_val=[];y_val=[]
data1.append("x")
data2.append("y")
#implementation
import numpy,tabulate,matplotlib.pyplot as plt
def f(x):
return eval(eq)
eq = input('Equation: ')
n = int(input("Number of output: "))
x = numpy.zeros(n)
y = numpy.zeros(n)
ul = float(input("Upper limit: "))
ll = float(input("lower limit: "))
x[0]=ll
x[n-1]=ul
h = (ul - ll) / (n-1)
for i in range(0, n):
y[i] = f(x[i])
data1.append(round(x[i],3))
data2.append(round(y[i],3))
x_val.append(round(x[i],3))
y_val.append(round(y[i],3))
try:
x[i+1] = x[i] + h
except:
continue
def simson(y,n,h):
sum1=sum2=0
for i in range(1,n-1):
if i%3!=0:
sum1+=y[i]
else:
sum2+=y[i]
return (3*h/8)*(y[0]+y[n-1]+3*sum1+2*sum2)
Data.append(data1)
Data.append(data2)
print(tabulate.tabulate(Data,tablefmt="fancy_grid"))
print("the result is : ",round(simson(y,n,h),4))
plt.style.use('seaborn')
plt.title("Simson's three eight rule")
plt.plot(x_val,y_val,color='deepskyblue',linewidth=1,marker='o',markersize=5,label='f(x)')
plt.legend()
plt.xlabel('x axis',color='red')
plt.ylabel('y axis',color='red')
plt.show()
|
# Import required modules
import os
import time
# Import custom modules
import file
import solidity
import geth
import crypto
import input
# Definition for a ballot
class Ballot:
def __init__(self, title, address):
# The local title of the ballot
self.title = title
# Address where the ballot is located
self.address = address
# Save a ballot to file
def saveBallot(title, address):
# Save the ballot to the file system
file.save('ballots/' + title + '.ballot', address)
# Get a saved ballot by an index
def getBallotByIndex(index):
# Get the ballots
allBallots = savedBallots()
# Return false if there is no ballot at the index
if len(allBallots) <= index or index < 0:
return False
# Return the ballot at the index
return allBallots[index]
# Get a saved ballot by it's name
def getBallotByName(name):
# Get the ballots
allBallots = savedBallots()
# Loop through each saved ballot
for index in xrange(len(allBallots)):
# Return this ballot if the name matches
if allBallots[index].title == title:
return allBallots[index]
# Loop completed, no ballot found with given name
return False
# Get the index of a candidate by their name
def getCandidateByName(ballot, name):
# Get the candidates
candidates = getCandidates(ballot)
# Loop through each candidate
for index in xrange(len(candidates)):
# Return this candidate if the name matches
if candidates[index] == name:
return index
# Loop completed, no candidate found with given name
return False
# Extract the details of a ballot from a file
def extractBallotFileData(ballotFile):
# Create an array of values from the ballot file
ballotFileArray = ballotFile.split(',')
# Extract the ballot title
ballotTitle = ballotFileArray[0]
# Extract the ballot address
ballotAddress = ballotFileArray[1]
# Return the extracted data
return ballotTitle, ballotAddress
# Import a ballot
def importBallot(filePath):
# Read the ballot file at the given path
ballotFile = file.read(filePath)
# Get the data from the csv
title, address = extractBallotFileData(ballotFile)
# Save the ballot
saveBallot(title, address)
# Export a ballot
def export(ballotToExport):
# Add the ballot name to the export
content = ballotToExport.title
# Add the ballot address to the export
content += "," + ballotToExport.address
# Save the export
file.save(ballotToExport.title + ' Export.csv', content)
# Get the saved ballots
def savedBallots():
# Initalise the saved ballots array
savedBallots = []
# Loop through the directory for the saved ballots
for fileName in os.listdir('ballots/'):
# Check that it is a csv file
if os.path.isfile('ballots/' + fileName) and fileName.endswith('.ballot'):
# Deduce the ballot title from the filename
title = fileName.replace('.ballot', '')
# Get the ballot address from the file
address = file.read('ballots/' + fileName)
# Create a new saved ballot object and add it to the return array
savedBallots.append(Ballot(title, address))
# Return the array of saved ballots
return savedBallots
# Delete a saved ballot
def delete(ballot):
# Delete the ballot file
file.delete('ballots/' + ballot.title + '.ballot')
# Initalise a new ballot
def initalise(account, title, description, candidates, voters, key):
# Initalise the public key 'n' chunks array
publicKeyNChunks = []
# Calculate the number of candidates
candidatesCount = len(candidates)
# Rebuild the candidates string
candidatesString = ','.join(candidates)
# Get the number of public key 'n' 256 bit chunks required
nChunksLength = (key.publicKey.n.bit_length() / 256) + 1
# Convert the public key 'n' into hexidecimal
publicKeyNHex = geth.numberToHex(key.publicKey.n)
# Loop through each 'n' chunk
for index in xrange(nChunksLength):
# Add this chunk to the array
publicKeyNChunks.append(publicKeyNHex[index * 64 : (index + 1) * 64])
# Initalise the public key 'g' chunks array
publicKeyGChunks = []
# Get the number of public key 'g' 256 bit chunks required
gChunksLength = (key.publicKey.g.bit_length() / 256) + 1
# Convert the public key 'g' into hexidecimal
publicKeyGHex = geth.numberToHex(key.publicKey.g)
# Loop through each 'g' chunk
for index in xrange(gChunksLength):
# Add this chunk to the array
publicKeyGChunks.append(publicKeyGHex[index * 64 : (index + 1) * 64])
# Convert the ballot arguments to hexidecimal format
titleHex = geth.stringToHex(title)
descriptionHex = geth.stringToHex(description)
candidatesHex = geth.stringToHex(candidatesString)
candidatesCountHex = geth.numberToHex(candidatesCount)
# Get the length of the ballot arguments and convert to hex
# Characters for strings, array size for arrays
titleLengthHex = geth.numberToHex(len(title))
descriptionLengthHex = geth.numberToHex(len(description))
candidatesLengthHex = geth.numberToHex(len(candidatesString))
votersLengthHex = geth.numberToHex(len(voters))
publicKeyNChunksLengthHex = geth.numberToHex(len(publicKeyNChunks))
publicKeyGChunksLengthHex = geth.numberToHex(len(publicKeyGChunks))
# Compile the ballot contract
compiledBallot = solidity.compile('contracts/ballot.sol').itervalues().next()
# Extract the ballot contract ABI (Application Binary Interface)
contractAbi = compiledBallot['abi']
# Initalise the ballot creation bytecode
ballotCreationCode = ""
# Add the contract bytecode
ballotCreationCode += compiledBallot['code']
# Declare the number of arguments (for use in calculating offsets)
ballotArguments = 7
# Add the offset for the title argument
ballotCreationCode += geth.pad(geth.numberToHex(ballotArguments * 32))
# Add the offset for the description argument
ballotCreationCode += geth.pad(geth.numberToHex((2 + (len(titleHex) / 64) + ballotArguments) * 32))
# Add the offset for the candidates argument
ballotCreationCode += geth.pad(geth.numberToHex((2 + (len(titleHex) / 64) + 2 + (len(descriptionHex) / 64) + ballotArguments) * 32))
# Add the candidates count argument
ballotCreationCode += geth.pad(candidatesCountHex)
# Add the offset for the voters argument
ballotCreationCode += geth.pad(geth.numberToHex((2 + (len(titleHex) / 64) + 2 + (len(descriptionHex) / 64) + 2 + (len(candidatesHex) / 64) + (len(candidatesCountHex) / 64) + ballotArguments) * 32))
# Add the offset for the public key 'n' argument
ballotCreationCode += geth.pad(geth.numberToHex((2 + (len(titleHex) / 64) + 2 + (len(descriptionHex) / 64) + 2 + (len(candidatesHex) / 64) + 1 + (len(candidatesCountHex) / 64) + (len(voters)) + ballotArguments) * 32))
# Add the offset for the public key 'g' argument
ballotCreationCode += geth.pad(geth.numberToHex((2 + (len(titleHex) / 64) + 2 + (len(descriptionHex) / 64) + 2 + (len(candidatesHex) / 64) + 2 + (len(candidatesCountHex) / 64) + (len(voters)) + len(publicKeyNChunks) + ballotArguments) * 32))
# Add the ballot title
ballotCreationCode += geth.pad(titleLengthHex)
ballotCreationCode += geth.pad(titleHex, 'left')
# Add the ballot description
ballotCreationCode += geth.pad(descriptionLengthHex)
ballotCreationCode += geth.pad(descriptionHex, 'left')
# Add the ballot candidates
ballotCreationCode += geth.pad(candidatesLengthHex)
ballotCreationCode += geth.pad(candidatesHex, 'left')
# Add the ballot voters
ballotCreationCode += geth.pad(votersLengthHex)
for index in xrange(len(voters)):
ballotCreationCode += geth.pad(voters[index])
# Add the ballot public key
ballotCreationCode += geth.pad(publicKeyNChunksLengthHex)
for index in xrange(len(publicKeyNChunks)):
ballotCreationCode += geth.pad(publicKeyNChunks[index])
ballotCreationCode += geth.pad(publicKeyGChunksLengthHex)
for index in xrange(len(publicKeyGChunks)):
ballotCreationCode += geth.pad(publicKeyGChunks[index])
# Deploy the ballot contract
contractTransactionHash = geth.deployContract(account, ballotCreationCode)
# Check if there was an error sending the transaction
if('error' in contractTransactionHash):
# Output the error and abort the ballot
print 'Error deploying contract: "' + contractTransactionHash['error']['message'] + '"'
else:
# Loop to wait until a transaction is confirmed
print 'Waiting for contract to deploy...'
while True:
# Wait 1 seconds before rechecking for a transaction receipt
time.sleep(1)
# Attempt to get the transaction receipt for the deployed contract
transactionReceipt = geth.getTransactionReceipt(contractTransactionHash)['result']
# Check if a receipt is available
if transactionReceipt is not None:
# The transaction has been mined, break the loop
break
# Get the contract address from the receipt
contractAddress = transactionReceipt['contractAddress']
# Save the ballot
saveBallot(title, contractAddress)
# Return control to the program
print 'Ballot contract deployed.'
# Attempt to vote in a ballot
def executeVote(ballotAddress, account, vote):
# Initalise the sendable votes
sendableVotes = []
# Loop through each 2048 bit vote value
for voteIndex in xrange(len(vote)):
# Convert the value to hex
voteHex = geth.numberToHex(vote[voteIndex])
# Get the length of the hex
hexLength = len(str(voteHex))
# Check if the hex is less than 512
if hexLength < 512:
# Get how much to pad the first value by
paddingRequired = 512 - hexLength
# Split the vote value into sendable 256 bit chunks
for index in xrange(8):
# Check if this is the first chunk
if index == 0:
# Add this 256 bit chunk
sendableVotes.append(voteHex[64*index:64*(index + 1)-paddingRequired])
else:
# Add this 256 bit chunk
sendableVotes.append(voteHex[64*index-paddingRequired:64*(index + 1)-paddingRequired])
else:
# Split the vote value into sendable 256 bit chunks
for index in xrange(8):
# Add this 256 bit chunk
sendableVotes.append(voteHex[64*index:64*(index + 1)])
# Initalise the vote bytecode with the offset
voteBytecode = geth.pad(geth.numberToHex(32))
# Add the vote array length
voteBytecode += geth.pad(geth.numberToHex(len(sendableVotes)))
# Loop through each 256 bit chunk of the sendable vote
for index in xrange(len(sendableVotes)):
# Add this chunk to the sendable bytecode
voteBytecode += geth.pad(sendableVotes[index])
# Attempt the vote transaction
voteTransactionHash = geth.castVote(ballotAddress, account, voteBytecode)
# Check if there was an error sending the transaction
if('error' in voteTransactionHash):
# Output the error and abort the ballot
print 'Error deploying contract: "' + voteTransactionHash['error']['message'] + '"'
else:
# Loop to wait until a transaction is confirmed
print 'Waiting for vote to send...'
while True:
# Wait 1 seconds before rechecking for a transaction receipt
time.sleep(1)
# Attempt to get the transaction receipt for the deployed contract
transactionReceipt = geth.getTransactionReceipt(voteTransactionHash)['result']
# Check if a receipt is available
if transactionReceipt is not None:
# The transaction has been mined, break the loop
break
# Build a vote for a ballot
def buildVote(ballot, candidateIndex):
# Get the ballots public key
publicKey = getPublicKey(ballot)
# Initalise the vote
vote = []
# Loop through each candidate in the ballot
for index in xrange(len(getCandidates(ballot))):
# Check if this is the candidate to vote for
if (index == candidateIndex):
# Add a positive value to the vote
value = crypto.encrypt(publicKey, 1)
vote.append(value)
else:
# Add a negative value to the vote
value = crypto.encrypt(publicKey, 0)
vote.append(value)
# Return the encrypted vote
return vote
# Get a ballots title
def getTitle(ballot):
titleResponse = geth.callFunction(ballot.address, 'getTitle')['result']
return geth.responseToString(titleResponse)
# Get a ballots description
def getDescription(ballot):
descriptionResponse = geth.callFunction(ballot.address, 'getDescription')['result']
return geth.responseToString(descriptionResponse)
# Get a ballots candidates
def getCandidates(ballot):
candidatesResponse = geth.callFunction(ballot.address, 'getCandidates')['result']
return geth.responseToCandidates(candidatesResponse)
# Get a ballots candidate count
def getCandidateCount(ballot):
candidateCountResponse = geth.callFunction(ballot.address, 'getCandidateCount')['result']
return geth.responseToInteger(candidateCountResponse)
# Get a ballots voters
def getVoters(ballot):
votersResponse = geth.callFunction(ballot.address, 'getVoters')['result']
return geth.responseToVoters(votersResponse)
# Get a ballots public key
def getPublicKey(ballot):
# Get the public key 'n'
publicKeyResponse = geth.callFunction(ballot.address, 'getPublicKeyN')['result']
publicKeyN = geth.responseToPublicKey(publicKeyResponse)
# Get the public key 'g'
publicKeyResponse = geth.callFunction(ballot.address, 'getPublicKeyG')['result']
publicKeyG = geth.responseToPublicKey(publicKeyResponse)
# Return a public key object
return crypto.PublicKey(publicKeyN, publicKeyG)
# Get a ballots votes
def getVotes(ballot):
# Get the vote values for the selected ballot
votesResponse = geth.callFunction(ballot.address, 'getVotes')['result']
voteValues = geth.responseToVoteValues(votesResponse)
# Get the candidate count
candidateCount = getCandidateCount(ballot)
# Initalise the votes array
votes = []
# Loop through each set of votes
for index in xrange((len(voteValues) / 8) / candidateCount):
# Add a row for this set of votes
votes.append([])
# Loop through each candidate
for candidateIndex in xrange(candidateCount):
# Initalise this vote value
voteValue = ''
# Loop through each value for this candidate
for valueIndex in xrange(8):
# Compile this vote
voteValue += str(voteValues[((index * (candidateCount * 8)) + (candidateIndex) * 8) + valueIndex])
# Add the vote value to the vote array
votes[index].append(geth.hexToNumber(voteValue))
# Return the votes array
return votes
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/MSVC/multiple-pdb.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that setting $PDB to '${TARGET}.pdb allows us to build multiple
programs with separate .pdb files from the same environment.
Under the covers, this verifies that emitters support expansion of the
$TARGET variable (and implicitly $SOURCE), using the original specified
list(s).
"""
import TestSCons
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.skip_if_not_msvc()
test.write('SConstruct', """\
env = Environment(PDB = '${TARGET.base}.pdb')
env.Program('test1.cpp')
env.Program('test2.cpp')
""")
test.write('test1.cpp', """\
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv)
{
printf("test1.cpp\\n");
exit (0);
}
""")
test.write('test2.cpp', """\
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv)
{
printf("test2.cpp\\n");
exit (0);
}
""")
test.run(arguments = '.')
test.must_exist('test1%s' % _exe)
test.must_exist('test1.pdb')
test.must_exist('test2%s' % _exe)
test.must_exist('test2.pdb')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
# Generated by Django 3.0.8 on 2021-07-20 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210720_1103'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='food_order_day',
field=models.CharField(choices=[('0', 'Monday'), ('1', 'Tuesday'), ('2', 'Wednesday'), ('3', 'Thursday'), ('4', 'Friday'), ('5', 'Saturday'), ('6', 'Sunday')], default='0', max_length=1),
),
]
|
"""
Created on Oct 25, 2017
@author: Jafar Taghiyar (jtaghiyar@bccrc.ca)
"""
from django.conf.urls import url
from . import views
app_name = 'bulk'
urlpatterns = [
url(r'^$', views.home_view, name='home'),
url(r'^sample/(?P<pk>\d+)$', views.sample_detail, name='sample_detail'),
url(r'^sample/list$', views.sample_list, name='sample_list'),
url(r'^sample/create/$', views.sample_create, name='sample_create'),
url(r'^sample/update/(?P<pk>\d+)$', views.sample_update, name='sample_update'),
url(r'^sample/delete/(?P<pk>\d+)$', views.sample_delete, name='sample_delete'),
url(r'^library/(?P<pk>\d+)$', views.library_detail, name='library_detail'),
url(r'^library/list$', views.library_list, name='library_list'),
url(r'^library/create/$', views.LibraryCreate.as_view(), name='library_create'),
url(r'^library/create/(?P<from_sample>\d+)$', views.LibraryCreate.as_view(), name='library_create_from_sample'),
url(r'^library/update/(?P<pk>\d+)$', views.LibraryUpdate.as_view(), name='library_update'),
url(r'^library/delete/(?P<pk>\d+)$', views.library_delete, name='library_delete'),
]
|
from pyleap import *
rect = Rectangle(100, 100, 200, 50, "green")
rect2 = Rectangle(100, 100, 200, 50, "green")
circle = Circle(200, 200, 50, "green")
p = Point(point_size=10, color="red")
circle.transform.scale_x = 2
# rect.transform.set_anchor_rate(1, 1)
def draw(dt):
window.clear()
window.show_axis()
rect.rotation += 1
circle.rotation -= 1
rect.stroke()
rect2.stroke()
circle.stroke()
c = rect.collide(circle)
if(c):
p.x = c.x
p.y = c.y
p.draw()
c = rect2.collide(rect)
if(c):
p.x = c.x
p.y = c.y
p.draw()
c = rect2.collide(circle)
if(c):
p.x = c.x
p.y = c.y
p.draw()
window.show_fps()
def move():
circle.x = mouse.x
circle.y = mouse.y
mouse.on_move(move)
repeat(draw)
run()
|
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install tclib
#
# And where it will live on PyPI: https://pypi.org/project/tclib/
#
name="tclib",
version="1.0.0",
description="A python library for managing test cases, test plans",
long_description=long_description,
long_description_content_type="text/plain",
url="https://github.com/rhinstaller/tclib",
author="Pavel Holica",
author_email="pholica@redhat.com",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
keywords="testcases, testplans, yaml",
package_dir={"": "."},
packages=find_packages(where="."),
python_requires=">=3, <4",
install_requires=[
'jinja2',
'pyyaml'
],
package_data={
"": ["doc/*", "examples/*"],
},
entry_points={
"console_scripts": [
"tcdiff=tclib.diff_main:main",
"tc_generate_documents=tclib.generate_documents_main:main",
"tcquery=tclib.query_main:main",
"tcvalidate=tclib.validate_main:main",
],
},
)
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import glob
import os
import shlex
import subprocess
import sys
from multiprocessing.dummy import Pool
ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11',
'-Wno-unused-local-typedef', '-Wno-sign-compare']
INCLUDES = [
'..',
'../kenlm',
'third_party/openfst-1.6.7/src/include',
'third_party/ThreadPool'
]
COMMON_FILES = (glob.glob('../kenlm/util/*.cc')
+ glob.glob('../kenlm/lm/*.cc')
+ glob.glob('../kenlm/util/double-conversion/*.cc'))
COMMON_FILES += glob.glob('third_party/openfst-1.6.7/src/lib/*.cc')
COMMON_FILES = [
fn for fn in COMMON_FILES
if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(
'unittest.cc'))
]
COMMON_FILES += glob.glob('*.cpp')
def build_common(out_name='common.a', build_dir='temp_build/temp_build', num_parallel=1):
compiler = os.environ.get('CXX', 'g++')
ar = os.environ.get('AR', 'ar')
libtool = os.environ.get('LIBTOOL', 'libtool')
cflags = os.environ.get('CFLAGS', '') + os.environ.get('CXXFLAGS', '')
for file in COMMON_FILES:
outfile = os.path.join(build_dir, os.path.splitext(file)[0] + '.o')
outdir = os.path.dirname(outfile)
if not os.path.exists(outdir):
print('mkdir', outdir)
os.makedirs(outdir)
def build_one(file):
outfile = os.path.join(build_dir, os.path.splitext(file)[0] + '.o')
if os.path.exists(outfile):
return
cmd = '{cc} -fPIC -c {cflags} {args} {includes} {infile} -o {outfile}'.format(
cc=compiler,
cflags=cflags,
args=' '.join(ARGS),
includes=' '.join('-I' + i for i in INCLUDES),
infile=file,
outfile=outfile,
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
return outfile
pool = Pool(num_parallel)
obj_files = list(pool.imap_unordered(build_one, COMMON_FILES))
if sys.platform.startswith('darwin'):
cmd = '{libtool} -static -o {outfile} {infiles}'.format(
libtool=libtool,
outfile=out_name,
infiles=' '.join(obj_files),
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
else:
cmd = '{ar} rcs {outfile} {infiles}'.format(
ar=ar,
outfile=out_name,
infiles=' '.join(obj_files)
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
if __name__ == '__main__':
build_common()
|
vault_operation = [
"update",
"read",
"delete"
]
vault_attackers = [
"200.174.194.100",
"200.174.194.101",
"200.174.194.102",
"200.174.194.103",
"200.174.194.104"
]
vault_benign = [
"",
"",
"",
""
]
# the user that tried to login
vault_benign_user = [
"auth/userpass/login/Daniel_P",
"auth/userpass/login/Sandy_P",
"auth/userpass/login/Samuel_G",
"auth/userpass/login/Dana_W",
"auth/userpass/login/George_C",
]
vault_attacker_user = [
"auth/userpass/login/Diana_L",
"auth/userpass/login/Barry_S",
"auth/userpass/login/Guy_B",
"auth/userpass/login/Eric_J",
"auth/userpass/login/Doris_ZC",
]
|
SELIA_VISUALIZERS_APPS = [
'selia_visualizers',
]
|
'''
Created on 23.08.2017
@author: falensaa
'''
import pickle
import numpy as np
import dynet
import imsnpars.nparser.features
from imsnpars.tools import utils, neural
from imsnpars.nparser.graph import features as gfeatures
class LblTagDict(object):
def __init__(self):
self.__lbl2Id = { }
self.__id2lbl = { }
def getNrOfLbls(self):
return len(self.__lbl2Id)
def getLbl(self, lId):
return self.__id2lbl[lId]
def getLblId(self, lbl):
return self.__lbl2Id[lbl]
def readData(self, sentences):
for sent in sentences:
for tok in sent:
if tok.dep not in self.__lbl2Id:
self.__id2lbl[len(self.__lbl2Id)] = tok.dep
self.__lbl2Id[tok.dep] = len(self.__lbl2Id)
def save(self, pickleOut):
pickle.dump((self.__lbl2Id, self.__id2lbl), pickleOut)
def load(self, pickleIn):
(self.__lbl2Id, self.__id2lbl) = pickle.load(pickleIn)
class LabelerGraphTask(neural.NNTreeTask):
def __init__(self, featReprBuilder, network, reprLayer):
self.__network = network
self.__featReprBuilder = featReprBuilder
self.__lbls = LblTagDict()
self.__reprLayer = reprLayer
def save(self, pickleOut):
self.__lbls.save(pickleOut)
def load(self, pickleIn):
self.__lbls.load(pickleIn)
def readData(self, sentences):
self.__lbls.readData(sentences)
def initializeParameters(self, model, reprDim):
self.__featReprBuilder.initializeParameters(model, reprDim)
self.__network.initializeParameters(model, reprDim, self.__lbls.getNrOfLbls())
def renewNetwork(self):
self.__network.renewNetwork()
def buildLosses(self, vectors, instance, currentEpoch, predictTrain = True):
outputsLbls = self.__buildLblOutputs(instance, instance.correctTree, vectors, isTraining=True)
correctLbls = [ self.__lbls.getLblId(instance.correctTree.getLabel(tokPos)) for tokPos in range(instance.correctTree.nrOfTokens()) ]
losses = self.__buildBestLosses(outputsLbls, correctLbls)
if predictTrain:
lblPred = self.__predictLbls(outputsLbls)
else:
lblPred = None
return losses, lblPred
def predict(self, instance, tree, vectors):
transLblOut = self.__buildLblOutputs(instance, tree, vectors, isTraining=False)
transLbls = [ ]
for output in transLblOut:
netVal = output.value()
bestTagId = np.argmax(netVal)
transLbls.append(self.__lbls.getLbl(bestTagId))
return transLbls
def reportLabels(self):
return True
def __buildLblOutputs(self, instance, tree, vectors, isTraining):
outputs = [ ]
for dId in range(tree.nrOfTokens()):
hId = tree.getHead(dId)
depRepr = self.__featReprBuilder.extractAndBuildFeatRepr(gfeatures.FeatId.DEP, dId, instance.sentence, vectors, isTraining)
headRepr = self.__featReprBuilder.extractAndBuildFeatRepr(gfeatures.FeatId.HEAD, hId, instance.sentence, vectors, isTraining)
distRepr = self.__featReprBuilder.onlyBuildFeatRepr(gfeatures.FeatId.DIST, (hId, dId), isTraining)
featRepr = headRepr + depRepr
if distRepr != None:
featRepr.append(distRepr)
assert len(featRepr) == self.__featReprBuilder.getNrOfFeatures()
featRepr = dynet.esum(featRepr)
netOut = self.__network.buildOutput(featRepr, isTraining=isTraining)
outputs.append(netOut)
return outputs
def __predictLbls(self, lblOuts):
predLbls = [ ]
for output in lblOuts:
netVal = output.value()
bestTagId = np.argmax(netVal)
bestLbl = self.__lbls.getLbl(bestTagId)
predLbls.append(bestLbl)
return predLbls
def __buildBestLosses(self, outputsLbls, correctLbls):
losses = [ ]
for (output, correct) in zip(outputsLbls, correctLbls):
sortedIds = np.argsort(output.value())[::-1]
predicted = utils.first(lambda x : x != correct, sortedIds)
losses.append(self.__network.buildLoss(output, correct, predicted))
return losses
class DummyLabeler(neural.NNTreeTask):
def __init__(self, lblData = None):
self.__lblData = lblData
def save(self, pickleOut):
if self.__lblData is not None:
self.__lblData.save(pickleOut)
def load(self, pickleIn):
if self.__lblData is not None:
self.__lblData.load(pickleIn)
def readData(self, sentences):
if self.__lblData is not None:
self.__lblData.readData(sentences)
def initializeParameters(self, model, reprDim):
pass
def renewNetwork(self):
pass
def buildLosses(self, vectors, instance, currentEpoch, predictTrain = False):
return [ ], None
def predict(self, instance, tree, vectors):
return None
def reportLabels(self):
return False
|
# (c) @AbirHasan2005
import aiohttp
import asyncio
import datetime
from pyrogram import Client
from pyrogram.types import Message
from core.send_msg import SendMessage, EditMessage
MessagesDB = {}
async def GetData(date: str):
async with aiohttp.ClientSession() as session:
async with session.get(f"https://www.fotmob.com/matches?date={date}") as resp:
data = await resp.json()
return data
async def GetLiveStatus(bot: Client, updates_channel_id: int):
running = False
status = ""
reason = ""
while True:
print("Getting Data ...")
data = await GetData(str(datetime.datetime.now().date()).replace('-', ''))
for i in range(len(data["leagues"])):
print("Loading Data ...")
for x in range(len(data["leagues"][i]["matches"])):
leagueName = data["leagues"][i]["name"]
firstMatchTime = data["leagues"][i]["matches"][x]["time"]
finished = data["leagues"][i]["matches"][x]["status"].get("finished", True)
started = data["leagues"][i]["matches"][x]["status"].get("started", False)
cancelled = data["leagues"][i]["matches"][x]["status"].get("cancelled", False)
ongoing = data["leagues"][i]["matches"][x]["status"].get("ongoing", False)
score = data["leagues"][i]["matches"][x]["status"].get("scoreStr", "")
if score == "":
score = f"{data['leagues'][i]['matches'][x]['home']['score']} - {data['leagues'][i]['matches'][x]['away']['score']}"
if (finished is False) and (started is True) and (cancelled is False) and (ongoing is True):
running, status = True, "Started"
elif finished is True:
running, status = False, "Finished"
elif cancelled is True:
running, status, reason = False, "Cancelled", data["leagues"][i]["matches"][x]["status"].get("reason", {}).get("long", "")
if (running is True) and (finished is False) and (ongoing is True):
liveTime = data["leagues"][i]["matches"][x]["status"].get("liveTime", {}).get("long", "")
text = f"**League Name:** `{leagueName}`\n\n" \
f"**Match Date:** `{firstMatchTime}`\n\n" \
f"**Match Status:** `{status}`\n\n" \
f"**Time Passed:** `{liveTime}`\n\n" \
f"**Teams:** `{data['leagues'][i]['matches'][x]['home']['name']}` __VS__ `{data['leagues'][i]['matches'][x]['away']['name']}`\n\n" \
f"**Score:** `{score}`"
if MessagesDB.get(data["leagues"][i]["matches"][x]["id"], None) is None:
message = await SendMessage(bot, text, updates_channel_id)
MessagesDB[data["leagues"][i]["matches"][x]["id"]] = message
print("Sleeping 5s ...")
await asyncio.sleep(5)
else:
editable: Message = MessagesDB[data["leagues"][i]["matches"][x]["id"]]
await EditMessage(editable, text)
elif running is False:
if MessagesDB.get(data["leagues"][i]["matches"][x]["id"], None) is not None:
status_reason = f"{status}\n\n" \
f"**Reason:** `{reason}`\n\n"
text = f"**League Name:** `{leagueName}`\n\n" \
f"**Match Date:** `{firstMatchTime}`\n\n" \
f"**Match Status:** `{status if (status == 'Finished') else status_reason}`\n\n" \
f"**Teams:** `{data['leagues'][i]['matches'][x]['home']['name']}` __VS__ `{data['leagues'][i]['matches'][x]['away']['name']}`\n\n" \
f"**Score:** `{score}`"
editable: Message = MessagesDB[data["leagues"][i]["matches"][x]["id"]]
await EditMessage(editable, text)
print("Sleeping 60s ...")
await asyncio.sleep(60)
|
"""Maintain db function match_individual_name here."""
from alembic_utils.pg_function import PGFunction
match_individual_name = PGFunction(
schema="public",
signature="match_individual_name(lastname IN VARCHAR, firstname IN VARCHAR)",
definition="""
RETURNS int[]
LANGUAGE plpgsql
AS
$$
DECLARE
v_ids integer ARRAY;
BEGIN
SET pg_trgm.word_similarity_threshold = 0.4;
SELECT array_agg(p.id)
INTO v_ids
FROM parties p
WHERE p.registration_id_end IS NULL
AND p.party_type = 'DI'
AND lastname <% p.last_name_key
AND ((firstname <% p.first_name_key AND word_similarity(firstname, p.first_name_key) >= .50) OR
(firstname <% p.middle_initial AND word_similarity(firstname, p.middle_initial) >= .50) OR
p.first_name_key IN (SELECT n.name
FROM nicknames n
WHERE n.name_id IN (SELECT n2.name_id
FROM nicknames n2
WHERE n2.name = firstname)));
RETURN v_ids;
END
;
$$;
"""
)
|
"""Module to deal with storing data files on disk."""
from __future__ import absolute_import
import os
def _root():
"""Returns the directory at which all other persisted files are rooted.
"""
default_root = os.path.expanduser('~/gutenberg_data')
return os.environ.get('GUTENBERG_DATA', default_root)
def local_path(path):
"""Returns a path that the caller may use to store local files.
"""
return os.path.join(_root(), path)
|
# -*- coding: utf-8 -*-
from split_settings.tools import include
# Includes all python files without scope:
include('*.py')
|
import os
import torch
import torchvision
from google_drive_downloader import GoogleDriveDownloader as gd
DIR = os.path.abspath(os.path.dirname(__file__))
def get_pretrained_model(model_type):
model = eval("{}".format(model_type))
return model().get_model()
class VGG16():
def __init__(self, pretrained=True,
root=os.path.join(DIR, '../data/PreTrainedModels/vgg16_from_caffe.pth')):
self.pretrained = pretrained
self.root = root
def get_model(self):
model = torchvision.models.vgg16(pretrained=False)
if not self.pretrained:
return model
self._fetch_vgg16_pretrained_model()
state_dict = torch.load(self.root)
model.load_state_dict(state_dict)
return model
def _fetch_vgg16_pretrained_model(self):
gd.download_file_from_google_drive(
file_id='0B9P1L--7Wd2vLTJZMXpIRkVVRFk',
dest_path=self.root,
unzip=False,
showsize=True)
|
from math import log2, floor,sqrt
from torch import nn, cat, add, Tensor
from torch.nn import init, Upsample, Conv2d, ReLU, AvgPool2d, BatchNorm2d
from torch.nn.functional import interpolate
class Residual_Block(nn.Module):
def __init__(self):
super(Residual_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, x):
identity_data = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = add(output, identity_data)
return output
class Net(nn.Module):
def __init__(self, scale_factor):
super(Net, self).__init__()
assert log2(scale_factor) % 1 == 0, "Scale factor must be power of 2!"
self.conv_input = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=9, padding=4, bias=False)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.residual = self.make_layer(Residual_Block, 16)
self.conv_mid = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1, bias=False)
self.bn_mid = nn.InstanceNorm2d(64, affine=True)
upscale_block = [
nn.Conv2d(in_channels=64, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.PixelShuffle(2),
nn.LeakyReLU(0.2, inplace=True)
]
upscale = [layer for _ in range(int(log2(scale_factor))) for layer in upscale_block]
self.upscale4x = nn.Sequential(*upscale)
self.conv_output = nn.Conv2d(in_channels=64, out_channels=3, kernel_size=9, padding=4, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
def make_layer(self, block, num_of_layer):
layers = []
for _ in range(num_of_layer):
layers.append(block())
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.conv_input(x))
residual = out
out = self.residual(out)
out = self.bn_mid(self.conv_mid(out))
out = add(out, residual)
out = self.upscale4x(out)
out = self.conv_output(out)
return out
|
# (c) Copyright [2016] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import uuid
from oslo_serialization import base64
from oslo_utils import importutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from hpedockerplugin import exception
from hpedockerplugin.i18n import _, _LE, _LI, _LW
hpe3parclient = importutils.try_import("hpe3parclient")
if hpe3parclient:
from hpe3parclient import client
from hpe3parclient import exceptions as hpeexceptions
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '4.0.0'
DEDUP_API_VERSION = 30201120
FLASH_CACHE_API_VERSION = 30201200
hpe3par_opts = [
cfg.StrOpt('hpe3par_api_url',
default='',
help="3PAR WSAPI Server Url like "
"https://<3par ip>:8080/api/v1",
deprecated_name='hp3par_api_url'),
cfg.StrOpt('hpe3par_username',
default='',
help="3PAR username with the 'edit' role",
deprecated_name='hp3par_username'),
cfg.StrOpt('hpe3par_password',
default='',
help="3PAR password for the user specified in hpe3par_username",
secret=True,
deprecated_name='hp3par_password'),
cfg.ListOpt('hpe3par_cpg',
default=["OpenStack"],
help="List of the CPG(s) to use for volume creation",
deprecated_name='hp3par_cpg'),
cfg.BoolOpt('hpe3par_debug',
default=False,
help="Enable HTTP debugging to 3PAR",
deprecated_name='hp3par_debug'),
cfg.ListOpt('hpe3par_iscsi_ips',
default=[],
help="List of target iSCSI addresses to use.",
deprecated_name='hp3par_iscsi_ips'),
cfg.BoolOpt('hpe3par_iscsi_chap_enabled',
default=False,
help="Enable CHAP authentication for iSCSI connections.",
deprecated_name='hp3par_iscsi_chap_enabled'),
cfg.BoolOpt('strict_ssh_host_key_policy',
default=False,
help='Option to enable strict host key checking. When '
'set to "True" the plugin will only connect to systems '
'with a host key present in the configured '
'"ssh_hosts_key_file". When set to "False" the host key '
'will be saved upon first connection and used for '
'subsequent connections. Default=False'),
cfg.StrOpt('ssh_hosts_key_file',
default='$state_path/ssh_known_hosts',
help='File containing SSH host keys for the systems with which '
'the plugin needs to communicate. OPTIONAL: '
'Default=$state_path/ssh_known_hosts'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(hpe3par_opts)
class HPE3PARCommon(object):
"""Class that contains common code for the 3PAR drivers.
Version history:
.. code-block:: none
0.0.1 - Initial version of 3PAR common created.
0.0.2 - Added the ability to choose volume provisionings.
0.0.3 - Added support for flash cache.
"""
VERSION = "0.0.3"
# TODO(Ramy): move these to the 3PAR Client
VLUN_TYPE_EMPTY = 1
VLUN_TYPE_PORT = 2
VLUN_TYPE_HOST = 3
VLUN_TYPE_MATCHED_SET = 4
VLUN_TYPE_HOST_SET = 5
THIN = 2
DEDUP = 6
CONVERT_TO_THIN = 1
CONVERT_TO_FULL = 2
CONVERT_TO_DEDUP = 3
# Valid values for volume type extra specs
# The first value in the list is the default value
valid_prov_values = ['thin', 'full', 'dedup']
valid_persona_values = ['2 - Generic-ALUA',
'1 - Generic',
'3 - Generic-legacy',
'4 - HPUX-legacy',
'5 - AIX-legacy',
'6 - EGENERA',
'7 - ONTAP-legacy',
'8 - VMware',
'9 - OpenVMS',
'10 - HPUX',
'11 - WindowsServer']
hpe_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency',
'priority']
qos_priority_level = {'low': 1, 'normal': 2, 'high': 3}
hpe3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs',
'flash_cache']
def __init__(self, config):
self.config = config
self.client = None
self.uuid = uuid.uuid4()
def get_version(self):
return self.VERSION
def check_flags(self, options, required_flags):
for flag in required_flags:
if not getattr(options, flag, None):
msg = _('%s is not set') % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _create_client(self):
cl = client.HPE3ParClient(
self.config.hpe3par_api_url,
suppress_ssl_warnings=CONF.suppress_requests_ssl_warnings)
client_version = hpe3parclient.version
if client_version < MIN_CLIENT_VERSION:
ex_msg = (_('Invalid hpe3parclient version found (%(found)s). '
'Version %(minimum)s or greater required. Run "pip'
' install --upgrade python-3parclient" to upgrade'
' the hpe3parclient.')
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
return cl
def client_login(self):
try:
LOG.debug("Connecting to 3PAR")
self.client.login(self.config.hpe3par_username,
self.config.hpe3par_password)
except hpeexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': self.config.hpe3par_api_url, 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
known_hosts_file = CONF.ssh_hosts_key_file
policy = "AutoAddPolicy"
if CONF.strict_ssh_host_key_policy:
policy = "RejectPolicy"
self.client.setSSHOptions(
self.config.san_ip,
self.config.san_login,
self.config.san_password,
port=self.config.san_ssh_port,
conn_timeout=self.config.ssh_conn_timeout,
privatekey=self.config.san_private_key,
missing_key_policy=policy,
known_hosts_file=known_hosts_file)
def client_logout(self):
LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid)
self.client.logout()
def do_setup(self):
if hpe3parclient is None:
msg = _('You must install hpe3parclient before using 3PAR'
' drivers. Run "pip install python-3parclient" to'
' install the hpe3parclient.')
raise exception.VolumeBackendAPIException(data=msg)
try:
self.client = self._create_client()
wsapi_version = self.client.getWsApiVersion()
self.API_VERSION = wsapi_version['build']
except hpeexceptions.UnsupportedVersion as ex:
raise exception.InvalidInput(ex)
if self.config.hpe3par_debug:
self.client.debug_rest(True)
def check_for_setup_error(self):
LOG.info(_LI("HPE3PARCommon %(common_ver)s,"
"hpe3parclient %(rest_ver)s"),
{"common_ver": self.VERSION,
"rest_ver": hpe3parclient.get_version_string()})
self.client_login()
try:
cpg_names = self.config.hpe3par_cpg
for cpg_name in cpg_names:
self.validate_cpg(cpg_name)
finally:
self.client_logout()
def validate_cpg(self, cpg_name):
try:
self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
def get_domain(self, cpg_name):
try:
cpg = self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("Failed to get domain because CPG (%s) doesn't "
"exist on array.") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
if 'domain' in cpg:
return cpg['domain']
return None
def _get_3par_vol_name(self, volume_id):
"""Get converted 3PAR volume name.
Converts the openstack volume id from
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
to
dcv-7P.DD5jLTPWF7tcwnMF80g
We convert the 128 bits of the uuid into a 24character long
base64 encoded string to ensure we don't exceed the maximum
allowed 31 character name limit on 3Par
We strip the padding '=' and replace + with .
and / with -
"""
volume_name = self._encode_name(volume_id)
return "dcv-%s" % volume_name
def _get_3par_vvs_name(self, volume_id):
vvs_name = self._encode_name(volume_id)
return "vvs-%s" % vvs_name
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.encode_as_text(vol_uuid.bytes)
# 3par doesn't allow +, nor /
vol_encoded = vol_encoded.replace('+', '.')
vol_encoded = vol_encoded.replace('/', '-')
# strip off the == as 3par doesn't like those.
vol_encoded = vol_encoded.replace('=', '')
return vol_encoded
def _capacity_from_size(self, vol_size):
# because 3PAR volume sizes are in Mebibytes.
if int(vol_size) == 0:
capacity = units.Gi # default: 1GiB
else:
capacity = vol_size * units.Gi
capacity = int(math.ceil(capacity / units.Mi))
return capacity
def _delete_3par_host(self, hostname):
self.client.deleteHost(hostname)
def _create_3par_vlun(self, volume, hostname, nsp):
try:
location = None
if nsp is None:
location = self.client.createVLUN(volume, hostname=hostname,
auto=True)
else:
port = self.build_portPos(nsp)
location = self.client.createVLUN(volume, hostname=hostname,
auto=True, portPos=port)
vlun_info = None
if location:
# The LUN id is returned as part of the location URI
vlun = location.split(',')
vlun_info = {'volume_name': vlun[0],
'lun_id': int(vlun[1]),
'host_name': vlun[2],
}
if len(vlun) > 3:
vlun_info['nsp'] = vlun[3]
return vlun_info
except hpeexceptions.HTTPBadRequest as e:
if 'must be in the same domain' in e.get_description():
LOG.error(e.get_description())
raise exception.Invalid3PARDomain(err=e.get_description())
def _safe_hostname(self, hostname):
"""We have to use a safe hostname length for 3PAR host names."""
try:
index = hostname.index('.')
except ValueError:
# couldn't find it
index = len(hostname)
# we'll just chop this off for now.
if index > 31:
index = 31
return hostname[:index]
def _get_3par_host(self, hostname):
return self.client.getHost(hostname)
def get_ports(self):
return self.client.getPorts()
def get_active_target_ports(self):
ports = self.get_ports()
target_ports = []
for port in ports['members']:
if (
port['mode'] == self.client.PORT_MODE_TARGET and
port['linkState'] == self.client.PORT_STATE_READY
):
port['nsp'] = self.build_nsp(port['portPos'])
target_ports.append(port)
return target_ports
def get_active_iscsi_target_ports(self):
ports = self.get_active_target_ports()
iscsi_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_ISCSI:
iscsi_ports.append(port)
return iscsi_ports
def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None):
"""find a VLUN on a 3PAR host."""
vluns = self.client.getHostVLUNs(hostname)
found_vlun = None
for vlun in vluns:
if volume_name in vlun['volumeName']:
if lun_id is not None:
if vlun['lun'] == lun_id:
if nsp:
port = self.build_portPos(nsp)
if vlun['portPos'] == port:
found_vlun = vlun
break
else:
found_vlun = vlun
break
else:
found_vlun = vlun
break
if found_vlun is None:
LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"),
{'name': volume_name, 'host': hostname})
return found_vlun
def create_vlun(self, volume, host, nsp=None):
"""Create a VLUN.
In order to export a volume on a 3PAR box, we have to create a VLUN.
"""
volume_name = self._get_3par_vol_name(volume['id'])
vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp)
return self._get_vlun(volume_name,
host['name'],
vlun_info['lun_id'],
nsp)
def delete_vlun(self, volume, hostname):
volume_name = self._get_3par_vol_name(volume['id'])
vluns = self.client.getHostVLUNs(hostname)
# Find all the VLUNs associated with the volume. The VLUNs will then
# be split into groups based on the active status of the VLUN. If there
# are active VLUNs detected a delete will be attempted on them. If
# there are no active VLUNs but there are inactive VLUNs, then the
# inactive VLUNs will be deleted. The inactive VLUNs are the templates
# on the 3PAR backend.
active_volume_vluns = []
inactive_volume_vluns = []
volume_vluns = []
for vlun in vluns:
if volume_name in vlun['volumeName']:
if vlun['active']:
active_volume_vluns.append(vlun)
else:
inactive_volume_vluns.append(vlun)
if active_volume_vluns:
volume_vluns = active_volume_vluns
elif inactive_volume_vluns:
volume_vluns = inactive_volume_vluns
if not volume_vluns:
msg = (
_LW("3PAR vlun for volume %(name)s not found on "
"host %(host)s"), {'name': volume_name, 'host': hostname})
LOG.warning(msg)
return
# VLUN Type of MATCHED_SET 4 requires the port to be provided
removed_luns = []
for vlun in volume_vluns:
if self.VLUN_TYPE_MATCHED_SET == vlun['type']:
self.client.deleteVLUN(volume_name, vlun['lun'], hostname,
vlun['portPos'])
else:
# This is HOST_SEES or a type that is not MATCHED_SET.
# By deleting one VLUN, all the others should be deleted, too.
if vlun['lun'] not in removed_luns:
self.client.deleteVLUN(volume_name, vlun['lun'], hostname)
removed_luns.append(vlun['lun'])
# Determine if there are other volumes attached to the host.
# This will determine whether we should try removing host from host set
# and deleting the host.
vluns = []
try:
vluns = self.client.getHostVLUNs(hostname)
except hpeexceptions.HTTPNotFound:
LOG.debug("All VLUNs removed from host %s", hostname)
pass
for vlun in vluns:
if volume_name not in vlun['volumeName']:
# Found another volume
break
else:
# We deleted the last vlun, so try to delete the host too.
# This check avoids the old unnecessary try/fail when vluns exist
# but adds a minor race condition if a vlun is manually deleted
# externally at precisely the wrong time. Worst case is leftover
# host, so it is worth the unlikely risk.
try:
self._delete_3par_host(hostname)
except Exception as ex:
# Any exception down here is only logged. The vlun is deleted.
# If the host is in a host set, the delete host will fail and
# the host will remain in the host set. This is desired
# because docker was not responsible for the host set
# assignment. The host set could be used outside of docker
# for future needs (e.g. export volume to host set).
# The log info explains why the host was left alone.
LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not deleted "
"because: %(reason)s"),
{'name': volume_name, 'host': hostname,
'reason': ex.get_description()})
def _get_key_value(self, hpe3par_keys, key, default=None):
if hpe3par_keys is not None and key in hpe3par_keys:
return hpe3par_keys[key]
else:
return default
def _get_keys_by_volume_type(self, volume_type):
hpe3par_keys = {}
specs = volume_type.get('extra_specs')
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.hpe3par_valid_keys:
hpe3par_keys[key] = value
return hpe3par_keys
def get_cpg(self, volume, allowSnap=False):
volume_name = self._get_3par_vol_name(volume['id'])
vol = self.client.getVolume(volume_name)
if 'userCPG' in vol:
return vol['userCPG']
elif allowSnap:
return vol['snapCPG']
return None
def _get_3par_vol_comment(self, volume_name):
vol = self.client.getVolume(volume_name)
if 'comment' in vol:
return vol['comment']
return None
def create_volume(self, volume):
LOG.debug('CREATE VOLUME (%(disp_name)s: %(vol_name)s %(id)s on '
'%(host)s)',
{'disp_name': volume['display_name'],
'vol_name': volume['name'],
'id': self._get_3par_vol_name(volume['id']),
'host': volume['host']})
try:
comments = {'volume_id': volume['id'],
'name': volume['name'],
'type': 'Docker'}
name = volume.get('display_name', None)
if name:
comments['display_name'] = name
# TODO(leeantho): Choose the first CPG for now. In the future
# support selecting different CPGs if multiple are provided.
cpg = self.config.hpe3par_cpg[0]
# check for valid provisioning type
prov_value = volume['provisioning']
if prov_value not in self.valid_prov_values:
err = (_("Must specify a valid provisioning type %(valid)s, "
"value '%(prov)s' is invalid.") %
{'valid': self.valid_prov_values,
'prov': prov_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
tpvv = True
tdvv = False
if prov_value == "full":
tpvv = False
elif prov_value == "dedup":
tpvv = False
tdvv = True
if tdvv and (self.API_VERSION < DEDUP_API_VERSION):
err = (_("Dedup is a valid provisioning type, "
"but requires WSAPI version '%(dedup_version)s' "
"version '%(version)s' is installed.") %
{'dedup_version': DEDUP_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
extras = {'comment': json.dumps(comments),
'tpvv': tpvv, }
# Only set the dedup option if the backend supports it.
if self.API_VERSION >= DEDUP_API_VERSION:
extras['tdvv'] = tdvv
capacity = self._capacity_from_size(volume['size'])
volume_name = self._get_3par_vol_name(volume['id'])
self.client.createVolume(volume_name, cpg, capacity, extras)
# Check if flash cache needs to be enabled
flash_cache = self.get_flash_cache_policy(volume['flash_cache'])
if flash_cache is not None:
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, flash_cache)
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(_LE("Exception: %s"), ex)
raise exception.PluginException(ex)
except hpeexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpeexceptions.HTTPBadRequest as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except exception.PluginException as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.PluginException(ex)
def delete_volume(self, volume):
try:
volume_name = self._get_3par_vol_name(volume['id'])
# Try and delete the volume, it might fail here because
# the volume is part of a volume set which will have the
# volume set name in the error.
try:
self.client.deleteVolume(volume_name)
except hpeexceptions.HTTPBadRequest as ex:
if ex.get_code() == 29:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug("Found an online copy for %(volume)s",
{'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
LOG.error(_LE("Exception: %s"), ex)
raise
else:
LOG.error(_LE("Exception: %s"), ex)
raise
except hpeexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
# volume is part of a volume set.
vvset_name = self.client.findVolumeSet(volume_name)
LOG.debug("Returned vvset_name = %s", vvset_name)
if vvset_name is not None and \
vvset_name.startswith('vvs-'):
# We have a single volume per volume set, so
# remove the volume set.
self.client.deleteVolumeSet(
self._get_3par_vvs_name(volume['id']))
elif vvset_name is not None:
# We have a pre-defined volume set just remove the
# volume and leave the volume set.
self.client.removeVolumeFromVolumeSet(vvset_name,
volume_name)
self.client.deleteVolume(volume_name)
elif (ex.get_code() == 151 or ex.get_code() == 32):
# the volume is being operated on in a background
# task on the 3PAR.
# TODO(walter-boring) do a retry a few times.
# for now lets log a better message
msg = _("The volume is currently busy on the 3PAR"
" and cannot be deleted at this time. "
"You can try again later.")
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
else:
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except hpeexceptions.HTTPNotFound as ex:
LOG.warning(_LW("Delete volume id not found. Ex: %(msg)s"),
{'msg': ex})
except hpeexceptions.HTTPForbidden as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized(ex.get_description())
except hpeexceptions.HTTPConflict as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.PluginException(ex)
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
if wwns is not None and not isinstance(wwns, list):
wwns = [wwns]
if iqns is not None and not isinstance(iqns, list):
iqns = [iqns]
out = self.client.getHosts()
hosts = out['members']
for host in hosts:
if 'iSCSIPaths' in host and iqns is not None:
iscsi_paths = host['iSCSIPaths']
for iscsi in iscsi_paths:
for iqn in iqns:
if iqn == iscsi['name']:
return host['name']
if 'FCPaths' in host and wwns is not None:
fc_paths = host['FCPaths']
for fc in fc_paths:
for wwn in wwns:
if wwn == fc['wwn']:
return host['name']
def terminate_connection(self, volume, hostname, wwn=None, iqn=None):
"""Driver entry point to unattach a volume from an instance."""
# does 3par know this host by a different name?
hosts = None
if wwn:
hosts = self.client.queryHost(wwns=wwn)
elif iqn:
hosts = self.client.queryHost(iqns=[iqn])
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
hostname = hosts['members'][0]['name']
try:
self.delete_vlun(volume, hostname)
return
except hpeexceptions.HTTPNotFound as e:
if 'host does not exist' in e.get_description():
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
if hostname is None:
LOG.error(_LE("Exception: %s"), e)
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
LOG.error(_LE("Exception: %s"), e)
raise
# try again with name retrieved from 3par
self.delete_vlun(volume, hostname)
def build_nsp(self, portPos):
return '%s:%s:%s' % (portPos['node'],
portPos['slot'],
portPos['cardPort'])
def build_portPos(self, nsp):
split = nsp.split(":")
portPos = {}
portPos['node'] = int(split[0])
portPos['slot'] = int(split[1])
portPos['cardPort'] = int(split[2])
return portPos
def find_existing_vlun(self, volume, host):
"""Finds an existing VLUN for a volume on a host.
Returns an existing VLUN's information. If no existing VLUN is found,
None is returned.
:param volume: A dictionary describing a volume.
:param host: A dictionary describing a host.
"""
existing_vlun = None
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
# The first existing VLUN found will be returned.
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vlun = vlun
break
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vlun
def find_existing_vluns(self, volume, host):
existing_vluns = []
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vluns.append(vlun)
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vluns
def get_flash_cache_policy(self, flash_cache):
if flash_cache is not None:
# If requested, see if supported on back end
if self.API_VERSION < FLASH_CACHE_API_VERSION:
err = (_("Flash Cache Policy requires "
"WSAPI version '%(fcache_version)s' "
"version '%(version)s' is installed.") %
{'fcache_version': FLASH_CACHE_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
else:
if flash_cache.lower() == 'true':
return self.client.FLASH_CACHE_ENABLED
else:
return self.client.FLASH_CACHE_DISABLED
return None
def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name):
# Update virtual volume set
if flash_cache:
try:
self.client.modifyVolumeSet(vvs_name,
flashCachePolicy=flash_cache)
LOG.info(_LI("Flash Cache policy set to %s"), flash_cache)
except Exception as ex:
LOG.error(_LE("Error setting Flash Cache policy "
"to %s - exception"), flash_cache)
exception.PluginException(ex)
def _add_volume_to_volume_set(self, volume, volume_name,
cpg, flash_cache):
vvs_name = self._get_3par_vvs_name(volume['id'])
domain = self.get_domain(cpg)
self.client.createVolumeSet(vvs_name, domain)
try:
self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name)
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except Exception as ex:
# Cleanup the volume set if unable to create the qos rule
# or flash cache policy or add the volume to the volume set
self.client.deleteVolumeSet(vvs_name)
raise exception.PluginException(ex)
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
"""Generic resolver functions."""
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import six
from module_build_service.common.config import conf, SUPPORTED_RESOLVERS
class GenericResolver(six.with_metaclass(ABCMeta)):
"""
External Api for resolvers
"""
_resolvers = SUPPORTED_RESOLVERS
# Resolver name. Each subclass of GenericResolver must set its own name.
backend = "generic"
# Supported resolver backends registry. Generally, resolver backend is
# registered by calling :meth:`GenericResolver.register_backend_class`.
# This is a mapping from resolver name to backend class object
# For example, {'mbs': MBSResolver}
backends = {}
@classmethod
def register_backend_class(cls, backend_class):
GenericResolver.backends[backend_class.backend] = backend_class
@classmethod
def create(cls, db_session, config, backend=None, **extra):
"""Factory method to create a resolver object
:param config: MBS config object.
:type config: :class:`Config`
:kwarg str backend: resolver backend name, e.g. 'db'. If omitted,
system configuration ``resolver`` is used.
:kwarg extra: any additional arguments are optional extras which can
be passed along and are implementation-dependent.
:return: resolver backend object.
:rtype: corresponding registered resolver class.
:raises ValueError: if specified resolver backend name is not
registered.
"""
# get the appropriate resolver backend via configuration
if not backend:
backend = conf.resolver
if backend in GenericResolver.backends:
return GenericResolver.backends[backend](db_session, config, **extra)
else:
raise ValueError("Resolver backend='%s' not recognized" % backend)
@classmethod
def supported_builders(cls):
if cls is GenericResolver:
return {k: v["builders"] for k, v in cls._resolvers.items()}
else:
try:
return cls._resolvers[cls.backend]["builders"]
except KeyError:
raise RuntimeError(
"No configuration of builder backends found for resolver {}".format(cls))
@classmethod
def is_builder_compatible(cls, builder):
"""
:param backend: a string representing builder e.g. 'koji'
Get supported builder backend(s) via configuration
"""
if cls is not GenericResolver:
return builder in cls.supported_builders()
return False
@abstractmethod
def get_module(self, name, stream, version, context, state="ready", strict=False):
raise NotImplementedError()
@abstractmethod
def get_module_count(self, **kwargs):
raise NotImplementedError()
@abstractmethod
def get_latest_with_virtual_stream(self, name, virtual_stream):
raise NotImplementedError()
@abstractmethod
def get_module_modulemds(self, name, stream, version=None, context=None, strict=False):
raise NotImplementedError()
@abstractmethod
def get_compatible_base_module_modulemds(
self, base_module_mmd, stream_version_lte, virtual_streams, states
):
raise NotImplementedError()
@abstractmethod
def get_buildrequired_modulemds(self, name, stream, base_module_mmd, strict=False):
raise NotImplementedError()
@abstractmethod
def resolve_profiles(self, mmd, keys):
raise NotImplementedError()
@abstractmethod
def get_module_build_dependencies(
self, name=None, stream=None, version=None, mmd=None, context=None, strict=False
):
raise NotImplementedError()
@abstractmethod
def resolve_requires(self, requires):
raise NotImplementedError()
@abstractmethod
def get_modulemd_by_koji_tag(self, tag):
"""Get module metadata by module's koji_tag
:param str tag: name of module's koji_tag.
:return: module metadata
:rtype: Modulemd.Module
"""
raise NotImplementedError()
|
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
from .. import LabelType
class TestLabelType:
def test_init(self):
assert LabelType.CLASSIFICATION == LabelType("classification")
assert LabelType.BOX2D == LabelType("box2d")
assert LabelType.BOX3D == LabelType("box3d")
assert LabelType.POLYGON2D == LabelType("polygon2d")
assert LabelType.POLYLINE2D == LabelType("polyline2d")
assert LabelType.KEYPOINTS2D == LabelType("keypoints2d")
assert LabelType.SENTENCE == LabelType("sentence")
|
from pymsbuild import *
from pymsbuild.dllpack import *
PACKAGE = DllPackage(
"testdllpack",
PyFile("__init__.py"),
PyFile("mod1.py"),
File("data.txt"),
Package("sub",
PyFile("__init__.py"),
PyFile("mod2.py"),
File("data.txt"),
),
CSourceFile("extra.c"),
CFunction("myfunc"),
# Included as content, not code
File("test-dllpack.py"),
)
DIST_INFO = {
"Name": "testdllpack",
"Version": "1.0.0",
}
|
"""
Tradfri wrapper for Onaeri API
https://github.com/Lakitna/Onaeri-tradfri
"""
__version__ = '0.8.0'
import sys
import os
import traceback
import atexit
from time import sleep, strftime
from Onaeri.logger import log
from Onaeri import Onaeri, settings, __version__ as onaeriVersion
import control
import lampdata
onaeri = Onaeri(lampdata.poll())
restartTime = onaeri.time.code((3, 0), dry=True)
updateCounter = 0
log()
log.row()
log("RUNTIME STARTED")
log("Onaeri v%s" % (onaeriVersion))
log("Onaeri Tradfri v%s" % __version__)
log.row()
log()
def summaryBuild():
def colorSuccessRate(val):
if val < 80:
return "%s #superLow" % val
if val < 90:
return "%s #low" % val
if val < 95:
return "%s #ok" % val
if val > 98:
return "%s #awesome" % val
if val >= 95:
return "%s #good" % val
return val
version = {}
import Onaeri
version['Onaeri API'] = Onaeri.__version__
version['Onaeri Tradfri'] = __version__
time = {}
time['timecodes'] = onaeri.time.runtime
time['minutes'] = round(onaeri.time.runtime
* settings.Global.minPerTimeCode, 2)
time['hours'] = round((onaeri.time.runtime
* settings.Global.minPerTimeCode) / 60, 2)
observer = lampdata.metrics
observer["success rate"] = round((observer['success']
/ observer['total']) * 100, 2)
observer['success rate'] = colorSuccessRate(observer['success rate'])
ctrl = control.metrics
try:
ctrl['success rate'] = round(((ctrl['total'] - ctrl['timeout'])
/ ctrl['total']) * 100, 2)
ctrl['success rate'] = colorSuccessRate(ctrl['success rate'])
except ZeroDivisionError:
ctrl['success rate'] = None
log.summary({
'Versions': version,
'Program runtime': time,
'Observer calls': observer,
'Lamp changes made': ctrl,
'Updates handled': updateCounter,
'Cycles handled': [cycle.name for cycle in onaeri.cycles],
})
atexit.register(summaryBuild)
def restart():
"""
Restart entire program if the time is right
"""
if onaeri.time.latestCode == restartTime and onaeri.time.runtime > 0:
summaryBuild()
os.execl(sys.executable, sys.executable, *sys.argv)
def heartbeat(state=True):
"""
Display network heartbeat
"""
if state:
print("\033[1;31m♥\033[0;0m\b", end="", flush=True)
return
else:
print(" \b", end="", flush=True)
return
while True:
try:
heartbeat(True)
lampData = lampdata.poll()
heartbeat(False)
# Progress all cycles and pass the current state of all lamps
onaeri.tick(lampData)
if onaeri.update:
updateCounter += 1
print("[%s]:" % (strftime("%H:%M:%S")))
for cycle in onaeri.cycles:
for id in cycle.lamp:
if not cycle.lamp[id].isEmpty(['brightness',
'color',
'power']):
print("\t%s: %s" % (cycle.name, cycle.lamp[id]))
heartbeat(True)
control.color(onaeri)
control.brightness(onaeri)
control.power(onaeri)
heartbeat(False)
restart()
# Slow down a bit, no stress brah
sleep(settings.Global.mainLoopDelay)
except KeyboardInterrupt:
log()
log("Keyboard interrupt. Exiting.")
exit()
except Exception:
log()
log("An error occurred:")
log(traceback.format_exc())
exit()
|
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.parsers import FormParser, MultiPartParser, JSONParser, FileUploadParser
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from blog.mixins import SerializerRequestSwitchMixin
from blog.models import BlogPost
from blog.serializers.blog_serializers import CreateBlogSerializer, DetailedBlogSerializer, ShowBlogSerializer
class BlogViewSet(SerializerRequestSwitchMixin, ModelViewSet):
"""
ViewSet supporting all operations for Wishes.
"""
queryset = BlogPost.objects.all()
serializers = {
'show': ShowBlogSerializer,
'create': CreateBlogSerializer,
'update': CreateBlogSerializer,
'detailed': DetailedBlogSerializer,
}
permission_classes = (IsAuthenticatedOrReadOnly,)
parser_classes = [JSONParser, FormParser, MultiPartParser]
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
search_fields = ('topic', 'title')
filterset_fields = ['author__username', ]
ordering_fields = ''
ordering = '-id'
def create(self, request, *args, **kwargs):
user = request.user
request.data['author'] = user.pk
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
user = request.user
request.data['author'] = user.pk
return super().update(request, *args, **kwargs)
|
from django.conf.urls import url
from valentina.app.views import (welcome, chat, profile, facebook, report,
create_affiliation, list_affiliations,
chat_preferences, logout)
urlpatterns = [
url(r'^$', welcome, name='welcome'),
url(r'^chat/preferences/$', chat_preferences, name='preferences'),
url(r'^chat/(?P<hash_id>[\d\w]+)/$', chat, name='chat'),
url(r'^profile/$', profile, name='profile'),
url(r'^facebook/$', facebook, name='facebook'),
url(r'^join/$', create_affiliation, name='affiliation'),
url(r'^chats/$', list_affiliations, name='affiliations'),
url(r'^report/$', report, name='report'),
url(r'^logout/$', logout, name='logout'),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from pyisaf.validators import (
decimal,
max_int_digits,
max_length,
min_length,
non_negative,
)
class TestValidators(unittest.TestCase):
def test_decimal_total_digits_3_with_four_digits_returns_False(self):
v = decimal(3, 2)
self.assertFalse(v('12.34'))
def test_decimal_total_digits_3_with_3_digits_returns_True(self):
v = decimal(3, 2)
self.assertTrue(v('1.23'))
def test_decimal_total_digits_3_with_3_digits_and_negative_returns_True(
self):
v = decimal(3, 2)
self.assertTrue(v('-1.23'))
def test_max_int_digits_3_with_four_digits_returns_False(self):
v = max_int_digits(3)
self.assertFalse(v(1234))
def test_max_int_digits_3_with_3_digits_returns_True(self):
v = max_int_digits(3)
self.assertTrue(v(123))
def test_max_int_digits_3_with_3_digits_and_negative_returns_True(self):
v = max_int_digits(3)
self.assertTrue(v(-123))
def test_total_digits_with_float_raises_TypeError(self):
v = max_int_digits(3)
with self.assertRaises(TypeError):
v(1.23)
def test_max_length_3_with_length_of_2_returns_True(self):
v = max_length(3)
self.assertTrue(v('ab'))
def test_max_length_3_with_length_of_3_returns_True(self):
v = max_length(3)
self.assertTrue(v('abc'))
def test_max_length_3_with_length_of_4_returns_False(self):
v = max_length(3)
self.assertFalse(v('abcd'))
def test_min_length_2_with_length_of_2_returns_True(self):
v = min_length(2)
self.assertTrue(v('ab'))
def test_min_length_2_with_length_of_3_returns_True(self):
v = min_length(2)
self.assertTrue(v('abc'))
def test_min_length_2_with_length_of_1_returns_False(self):
v = min_length(2)
self.assertFalse(v('a'))
def test_non_negative_with_1_returns_True(self):
self.assertTrue(non_negative(1))
def test_non_negative_with_0_returns_True(self):
self.assertTrue(non_negative(0))
def test_non_negative_with_minus_1_returns_True(self):
self.assertFalse(non_negative(-1))
|
import os
import cv2
import numpy as np
import tensorflow as tf
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
gpus = tf.config.list_physical_devices('GPU')
print("Devices: ", gpus)
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, False)
img = cv2.imread("imgs/test-img.jpeg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (480, 480)).transpose(2, 0, 1)
imgs = np.expand_dims(img, axis=0)
with tf.device("/gpu:0"):
imported = tf.saved_model.load("weights/test2.pb")
inference_func = imported.signatures["serving_default"]
imgs = tf.convert_to_tensor(imgs, dtype=tf.float32)
for i in range(100):
start_time = time.time()
inference_func(input=imgs)
print(time.time() - start_time)
|
import platform
from pathlib import Path
import numpy as np
import torch
from spconv.pytorch import ops
from spconv.pytorch.conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d,
SparseConvTranspose3d, SparseInverseConv2d,
SparseInverseConv3d, SubMConv2d, SubMConv3d)
from spconv.pytorch.core import SparseConvTensor
from spconv.pytorch.identity import Identity
from spconv.pytorch.modules import SparseModule, SparseSequential
from spconv.pytorch.ops import ConvAlgo
from spconv.pytorch.pool import SparseMaxPool2d, SparseMaxPool3d
from spconv.pytorch.tables import AddTable, ConcatTable, JoinTable
class ToDense(SparseModule):
"""convert SparseConvTensor to NCHW dense tensor.
"""
def forward(self, x: SparseConvTensor):
return x.dense()
class RemoveGrid(SparseModule):
"""remove pre-allocated grid buffer.
"""
def forward(self, x: SparseConvTensor):
x.grid = None
return x
|
def quick_sort(arr):
if len(arr) < 2:
return arr
pivot = arr.pop()
left = []
right = []
for num in arr:
if num > pivot:
right.append(num)
else:
left.append(num)
return quick_sort(left) + [pivot] + quick_sort(right)
arr = [1,2,2,1,2,1,5,23,1,91,2,4,1,]
print(quick_sort(arr))
|
from .build import Build
from .comps import Comps
from .variants import Variants
__all__ = ['Build', 'Comps', 'Variants']
__version__ = '1.4.1'
|
from .canvas import Canvas3D
from .mcanvas import MCanvas3D
from .widget import Canvas3DFrame, Canvas3DNoteBook, Canvas3DNoteFrame
from .surface import Surface, MarkText, MeshSet
from .geoutil import *
|
#!/usr/bin/env python3
import sys
import ipaddress
import subprocess
from settings import default_tab_color
from utils import get_ip_by_host, color_text, parse_ssh_command
try:
import iterm2
from server_config import get_server_list
except (ModuleNotFoundError, ImportError) as e:
color_text.error(e)
exit()
def ip_math_rule(ip_addr, server_host):
"""
check target ip math server_host rule
:param ip_addr: target ip address
:param server_host: server_config host rule
:return:
"""
if '/' in server_host:
ip_set = ipaddress.IPv4Network(server_host, strict=False)
for ip in ip_set.hosts():
if ip_addr == ip.compressed:
return True
return False
else:
return ip_addr == server_host
def get_host_config(host: str) -> tuple:
"""
:param host: domain or ip or ssh_config
:return: tuple(host_name, iterm2_color)
"""
ip_addr = get_ip_by_host(host)
for server in get_server_list():
if ip_math_rule(ip_addr, server.host):
return server.name, server.color
return host, default_tab_color
async def main(connection):
app = await iterm2.async_get_app(connection)
session = app.current_terminal_window.current_tab.current_session
change = iterm2.LocalWriteOnlyProfile()
command = 'ssh ' + ' '.join(sys.argv[1:])
host = parse_ssh_command(full_command=command)
alias, color = get_host_config(host)
# set config
change.set_badge_text(alias)
change.set_tab_color(color)
change.set_use_tab_color(True)
change.set_badge_color(color)
# apply new config for iterm2 and ssh to server
await session.async_set_profile_properties(change)
subprocess.call(command.split())
# revert config
change.set_badge_text('')
change.set_use_tab_color(False)
await session.async_set_profile_properties(change)
if __name__ == '__main__':
iterm2.run_until_complete(main)
|
"""A Compile object (see compile_rule.py): foo.css -> foo.min.css.
We pass the CSS thorugh cssmin to minify it. However, we also inline
images at compress-time (it doesn't technically 'compress' this file,
but it does compress the overall number of network round-trips the
user needs, so we'll count it). We inline 'url(...)' CSS rules, but
only if the files are small enough, and only occur once (otherwise,
it's better to just let the user cache them); or alternately, if the
user manually indicates a desire for inlining via a text annotation:
/*! data-uri... */
"""
from __future__ import absolute_import
import base64
import os
import re
from shared import ka_root
import intl.data
import js_css_packages.packages
import js_css_packages.util
from kake.lib import compile_rule
from kake.lib import compile_util
from kake.lib import computed_inputs
from kake.lib import log
_IMAGE_EXTENSION = r'(?:png|gif|jpg|jpeg)'
# We capture only host-absolute urls (start with /, but no hostname),
# since we know what files those resolve to, and we know they're
# referring to KA content.
# This matches url(/image/foo.png), url('/image/foo.png?k'), etc.
# It also matches /*! data-uri */ right after this url().
# group(1): the url-path of the image
# group(2): the data-uri comment, if it exists (None otherwise)
_CSS_IMAGE_RE = re.compile(
r"\burl\(['\"]?(/[^)]*\.%s(?:\?[^'\")]*)?)"
r"(?:['\");} ]*?(\s*/\*! data-uri.*?\*/))?"
% _IMAGE_EXTENSION, re.I)
# This isn't used right now, so this is mostly for documentation purposes.
# This matches src="/img/foo.png", src='{{"/img/foo.png?k"|static_url}}', etc.
_HTML_IMAGE_RE = re.compile(
r"""\bsrc\s*=\s*['""]?(/[^'"" >?]*\.%s(\?[^'"" >]*)?)"""
r"""|\bsrc\s*=\s*['""]?{{"(/[^""?]*\.%s(\?[^""]*)?)"|static_url}}"""
% (_IMAGE_EXTENSION, _IMAGE_EXTENSION), re.I)
# Always inline files <= this size, but not bigger files. This should
# be well smaller than 32Kb, since IE8 only supports data-URIs shorter
# than 32K (after base64-encoding):
# http://caniuse.com/datauri
_MAX_INLINE_SIZE = 4 * 1024
# For files that occur twice, we'll *still* inline them if they're
# small enough. For instance, about the same size as just the url
# reference would be.
_MAX_INLINE_SIZE_IF_TWICE = 128
# This cache stores the information for images referenced in css files.
# The key is an image url, and the value is
# (list of files .css files with this image url, image relpath, img size)
# A .css file can be in the list multiple times if it includes the image
# multiple times.
# Whenever we notice a .css file has changed, we should update this cache.
_IMAGE_URL_INFO = compile_util.CachedFile(
os.path.join('genfiles', 'css_image_url_info.pickle'))
def _image_urls_and_file_info(content):
"""Given an image-url string, return an iterator with image info."""
matches = _CSS_IMAGE_RE.finditer(content)
for m in matches:
relative_filename = m.group(1)[1:] # relative to ka-root
# Sometimes urls have ?'s (url queries) in them to bust
# caches. Those are not part of the filename. :-)
relative_filename = relative_filename.split('?')[0]
pathname = ka_root.join(relative_filename)
try:
filesize = os.stat(pathname).st_size
yield (m.group(1), relative_filename, filesize)
except OSError: # file not found
log.warning('reference to non-existent image %s', pathname)
def _update_image_url_info(css_filename, image_url_info):
"""Given css_filenames relative to ka-root, update _IMAGE_URL_INFO.
Returns:
A list of image filenames, relative to ka-root, mentioned in
this css-filename.
"""
# First, we need to delete all old references to css_filenames.
for file_info in image_url_info.itervalues():
new_files = [f for f in file_info[0] if f != css_filename]
if len(new_files) < len(file_info[0]):
# We go through this contortion so we can edit the list in place.
del file_info[0][:]
file_info[0].extend(new_files)
# If the file no longer exists (has been deleted), we're done!
if not os.path.exists(ka_root.join(css_filename)):
log.v3("removing image-url info for %s: it's been deleted",
css_filename)
return
# Then, we need to add updated references, based on the current
# file contents.
log.v2('Parsing image-urls from %s', css_filename)
with open(ka_root.join(css_filename)) as f:
content = f.read()
retval = []
for (img_url, img_relpath, img_size) in (
_image_urls_and_file_info(content)):
image_url_info.setdefault(img_url, ([], img_relpath, img_size))
image_url_info[img_url][0].append(css_filename)
retval.append(img_relpath)
log.v4('Image-url info: %s', retval)
return retval
def _data_uri_for_file(filename, file_contents):
ext = os.path.splitext(filename)[1][1:].lower()
if ext == 'jpg':
ext = 'jpeg'
return 'data:image/%s;base64,%s' % (ext, base64.b64encode(file_contents))
def _maybe_inline_images(compressed_content):
"""For small images, it's more efficient to inline them in the html.
Most modern browsers support inlining image contents in html:
css: background-image: url(data:image/png;base64,...)
html: <img src='data:image/png;base64,...'>
The advantage of doing this is to avoid an http request. The
disadvantages are that the image can't be cached separately from
the webpage (bad if the web page changes often and the image
changes never), and the total size is bigger due to the need to
base64-encode.
In general, it makes sense to use data uris for small images, for
some value of 'small', or for (possibly large) images that a) are
only used on one web page, b) are on html pages that do not change
very much, and c) are on pages where rendering speed matters (just
because it's not worth the effort otherwise).
We also support a manual decision to inline via a text annotation:
/*! data-uri... */.
Arguments:
compressed_content: The content to inline the image-urls in.
Returns:
Returns the input content, but with zero, some, or all images
inlined.
"""
output = []
lastpos = 0
for m in _CSS_IMAGE_RE.finditer(compressed_content):
image_url = m.group(1)
always_inline = m.group(2)
# Find how often the image appears in our packages. If it
# only appears once, inlining it is a no-brainer (if it's
# 'small', anyway). If it appears twice, we probably don't
# want to inline -- it's better to use the browser cache.
# If it appears more than twice, we definitely don't inline.
try:
(callers, img_relpath, img_size) = _IMAGE_URL_INFO.get()[image_url]
except KeyError:
log.v4('Not inlining image-content of %s: file not found on disk',
image_url)
continue
url_count = len(callers)
if (always_inline or
(url_count == 1 and img_size <= _MAX_INLINE_SIZE) or
(url_count == 2 and img_size <= _MAX_INLINE_SIZE_IF_TWICE)):
log.v1('Inlining image-content of %s', img_relpath)
with open(ka_root.join(img_relpath)) as f:
image_content = f.read()
output.append(compressed_content[lastpos:m.start(1)])
output.append(_data_uri_for_file(img_relpath, image_content))
lastpos = m.end(1)
if always_inline: # let's nix the !data-uri comment in the output
output.append(compressed_content[lastpos:m.start(2)])
lastpos = m.end(2)
else:
log.v4('Not inlining image-content of %s '
'(url-count %s, img size %s)',
img_relpath, url_count, img_size)
# Get the last chunk, and then we're done!
output.append(compressed_content[lastpos:])
return ''.join(output)
class CalculateCssImageInfo(compile_rule.CompileBase):
def version(self):
"""Update every time build() changes in a way that affects output."""
return 2
def build(self, outfile_name, infile_names, changed_infiles, context):
image_url_info = {}
# The rule: if outfile-name has changed, we need to rebuild everything.
if outfile_name in changed_infiles:
changed_infiles = infile_names
# Start by modifying the existing data, except when we know we
# need to recompute *everything* (why bother then)?
if changed_infiles != infile_names:
try:
image_url_info = _IMAGE_URL_INFO.get() # start with old info
except Exception: # we are just best-effort to read old info
changed_infiles = infile_names
for infile_name in changed_infiles:
_update_image_url_info(infile_name, image_url_info)
# Store the image_url_info both in cache and on disk.
_IMAGE_URL_INFO.put(image_url_info)
class ComputedCssImageInfoInputs(computed_inputs.ComputedInputsBase):
def version(self):
"""Update if input_patterns() changes in a way that affects output."""
return 2
def input_patterns(self, outfile_name, context, triggers, changed):
# We depend on every .css file listed in the stylesheet.
retval = set()
# If the manifest file itself has changed, make sure we read
# the latest version in the get_by_name() calls below.
assert self.triggers[0].endswith('.json') # the manifest
packages = js_css_packages.packages.read_package_manifest(
self.triggers[0])
for (_, f) in js_css_packages.util.all_files(
packages, precompiled=True, dev=False):
if f.endswith('.css'):
retval.add(f)
return list(retval)
class CompressCss(compile_rule.CompileBase):
def version(self):
"""Update every time build() changes in a way that affects output."""
return 1
def build(self, outfile_name, infile_names, _, context):
assert len(infile_names) >= 2, infile_names # infile, cssmin, images
with open(self.abspath(infile_names[0])) as f:
minified_css = self.call_with_output(
[self.abspath(infile_names[1])], stdin=f)
minified_and_inlined_css = _maybe_inline_images(minified_css)
with open(self.abspath(outfile_name), 'w') as f:
f.write(minified_and_inlined_css)
class ComputedCssInputs(computed_inputs.ComputedInputsBase):
def __init__(self, triggers, infile_pattern):
super(ComputedCssInputs, self).__init__(triggers)
# The pattern (including `{{path}}` and `{lang}`) that
# indicates what the css input file should be for this rule.
self.infile_pattern = infile_pattern
def version(self):
"""Update if input_patterns() changes in a way that affects output."""
return 3
def input_patterns(self, outfile_name, context, triggers, changed):
(infile_name,) = compile_util.resolve_patterns([self.infile_pattern],
context)
# And we also need the module that minifies .css, and the
# rule that makes sure _IMAGE_URL_INFO is up to date.
retval = [infile_name,
'genfiles/node_modules/.bin/cssmin',
_IMAGE_URL_INFO.filename()]
# Finally, we also depend on each image in our .css file,
# since we (possibly) inline those images, so if they change,
# we need to know so we can re-inline them.
image_deps = []
for (image_url, (css_files, image_relpath, _)) in (
_IMAGE_URL_INFO.get().iteritems()):
if infile_name in css_files:
image_deps.append(image_relpath)
# We only sort to make diffs easier.
image_deps.sort()
retval.extend(image_deps)
return retval
# This holds the data that's read into _IMAGE_URL_INFO.
compile_rule.register_compile(
'CSS IMAGE INFO',
_IMAGE_URL_INFO.filename(),
ComputedCssImageInfoInputs(['stylesheets-packages.json']),
CalculateCssImageInfo())
# This also captures css that has been compiled, and lives in
# genfiles/compiled_less or wherever:
#
# genfiles/compiled_less/en/a/b.less.css ->
# genfiles/compressed_stylesheets/en/genfiles/compiled_less/en/a/b.less.min.css
#
compile_rule.register_compile(
'COMPRESSED CSS',
'genfiles/compressed_stylesheets/en/{{path}}.min.css',
# We depend on our input .css file, but we also depend on images
# that our input .css file has inlined, since if those images
# change we'll need to re-inline them. The information about what
# images our input .css currently file has inlined is stored in
# _IMAGE_URL_INFO, so whenever that changes we need to recalculate
# our inputs, in case what-we've-inlined has changed.
ComputedCssInputs(
[_IMAGE_URL_INFO.filename()],
infile_pattern='genfiles/compiled_autoprefixed_css/en/{{path}}.css'),
CompressCss())
# This gets translations.
compile_rule.register_compile(
'TRANSLATED COMPRESSED CSS',
'genfiles/compressed_stylesheets/{lang}/{{path}}.min.css',
ComputedCssInputs(
[_IMAGE_URL_INFO.filename()],
infile_pattern=(
'genfiles/compiled_autoprefixed_css/{lang}/{{path}}.css')),
CompressCss(),
maybe_symlink_to='genfiles/compressed_stylesheets/en/{{path}}.min.css')
# Special-case rule for translated CSS for RTL languages (he, ar, ur)
# so they can all symlink to one RTL file
symlink_lang = intl.data.right_to_left_languages()[0]
for lang in intl.data.right_to_left_languages():
compile_rule.register_compile(
'TRANSLATED COMPRESSED CSS (%s)' % lang,
'genfiles/compressed_stylesheets/%s/{{path}}.min.css' % lang,
ComputedCssInputs(
[_IMAGE_URL_INFO.filename()],
infile_pattern=(
'genfiles/compiled_autoprefixed_css/%s/{{path}}.css' % lang)),
CompressCss(),
maybe_symlink_to='genfiles/compressed_stylesheets/%s/{{path}}.min.css'
% symlink_lang)
|
#!/usr/bin/env python
import tarfile
import os
import subprocess
import fnmatch
tarsuffix = 'out.tar.gz'
##_______________________________________________________________||
def _UnTar(tars,dry_run):
try:
for i in tars:
cwd = os.getcwd()
directory = os.path.dirname(i)
os.chdir(directory)
tar = tarfile.open(tarsuffix)
if dry_run:
for tarinfo in tar:
print "Extracting" , tarinfo.name, "from", tarsuffix
sys.exit(1)
tar.extractall()
tar.close()
os.chdir(cwd)
print 'Extracted in %s directory' %directory
except TypeError:
print 'Unable to extract tar'
##_______________________________________________________________||
def _ListTars(direc,dry_run):
try:
tarfiles = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(direc)
for f in fnmatch.filter(files,tarsuffix)]
if len(tarfiles) == 0:
print 'No list formed'
sys.exit(1)
except ValueError:
print 'Could not form list'
_UnTar(tarfiles,dry_run)
##_______________________________________________________________||
if __name__ == '__main__':
import os
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--dry_run",action="store_true", default=False, help="Do not run any commands; only print them")
(options,args) = parser.parse_args()
if len(args)>1:
print 'Please only provide 1 argument'
sys.exit(1)
elif len(args) == 0:
print 'Please provide the path'
sys.exit(1)
else:
iDir = args[0]
if iDir == '.':
iDir = os.getcwd()
_ListTars(iDir,options.dry_run)
|
# %%
#######################################
def sqlite3get_table_names(sql_conn: sqlite3.Connection):
"""For a given sqlite3 database connection object, returns the table names within that database.
Examples:
>>> db_conn = sqlite3.connect('places.sqlite')\n
>>> sqlite3get_table_names(db_conn)\n
['moz_origins', 'moz_places', 'moz_historyvisits', 'moz_inputhistory', 'moz_bookmarks', 'moz_bookmarks_deleted', 'moz_keywords', 'moz_anno_attributes', 'moz_annos', 'moz_items_annos', 'moz_meta', 'sqlite_stat1']
References:
# How to list tables for a given database:\n
https://techoverflow.net/2019/10/14/how-to-list-tables-in-sqlite3-database-in-python/\n
# Firefox forensics - what sqlite databases store which artifacts\n
https://www.foxtonforensics.com/browser-history-examiner/firefox-history-location\n
Args:
sql_conn (sqlite3.Connection): Reference an existing slite3 Connection object.
Returns:
list: Returns a list of the table names in the database.
"""
import sqlite3
cursor = sql_conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_names = [
name[0] for name in cursor.fetchall() if name[0] != 'sqlite_sequence'
]
cursor.close()
return table_names
|
"""Support for Meteo-France weather data."""
import datetime
import logging
from meteofrance.client import meteofranceClient, meteofranceError
from vigilancemeteo import VigilanceMeteoError, VigilanceMeteoFranceProxy
import voluptuous as vol
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import Throttle
from .const import CONF_CITY, DATA_METEO_FRANCE, DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = datetime.timedelta(minutes=5)
def has_all_unique_cities(value):
"""Validate that all cities are unique."""
cities = [location[CONF_CITY] for location in value]
vol.Schema(vol.Unique())(cities)
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_CITY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
],
has_all_unique_cities,
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Meteo-France component."""
hass.data[DATA_METEO_FRANCE] = {}
# Check if at least weather alert have to be monitored for one location.
need_weather_alert_watcher = False
for location in config[DOMAIN]:
if (
CONF_MONITORED_CONDITIONS in location
and "weather_alert" in location[CONF_MONITORED_CONDITIONS]
):
need_weather_alert_watcher = True
# If weather alert monitoring is expected initiate a client to be used by
# all weather_alert entities.
if need_weather_alert_watcher:
_LOGGER.debug("Weather Alert monitoring expected. Loading vigilancemeteo")
weather_alert_client = VigilanceMeteoFranceProxy()
try:
weather_alert_client.update_data()
except VigilanceMeteoError as exp:
_LOGGER.error(
"Unexpected error when creating the vigilance_meteoFrance proxy: %s ",
exp,
)
else:
weather_alert_client = None
hass.data[DATA_METEO_FRANCE]["weather_alert_client"] = weather_alert_client
for location in config[DOMAIN]:
city = location[CONF_CITY]
try:
client = meteofranceClient(city)
except meteofranceError as exp:
_LOGGER.error(
"Unexpected error when creating the meteofrance proxy: %s", exp
)
return
client.need_rain_forecast = bool(
CONF_MONITORED_CONDITIONS in location
and "next_rain" in location[CONF_MONITORED_CONDITIONS]
)
hass.data[DATA_METEO_FRANCE][city] = MeteoFranceUpdater(client)
hass.data[DATA_METEO_FRANCE][city].update()
if CONF_MONITORED_CONDITIONS in location:
monitored_conditions = location[CONF_MONITORED_CONDITIONS]
_LOGGER.debug("meteo_france sensor platform loaded for %s", city)
load_platform(
hass,
"sensor",
DOMAIN,
{CONF_CITY: city, CONF_MONITORED_CONDITIONS: monitored_conditions},
config,
)
load_platform(hass, "weather", DOMAIN, {CONF_CITY: city}, config)
return True
class MeteoFranceUpdater:
"""Update data from Meteo-France."""
def __init__(self, client):
"""Initialize the data object."""
self._client = client
def get_data(self):
"""Get the latest data from Meteo-France."""
return self._client.get_data()
@Throttle(SCAN_INTERVAL)
def update(self):
"""Get the latest data from Meteo-France."""
try:
self._client.update()
except meteofranceError as exp:
_LOGGER.error(
"Unexpected error when updating the meteofrance proxy: %s", exp
)
|
# *_*coding:utf-8 *_*
import getpass
import sys
import typing
import click
import pandas as pd
from rich.console import Console
import warnings
import pymysql
from sqlstar.core import DatabaseURL
from sqlstar.interfaces import ConnectionBackend, DatabaseBackend
from sqlstar.utils import check_dtype_mysql
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
class MySQLBackend(DatabaseBackend):
def __init__(self, database_url: typing.Union[DatabaseURL, str],
**options: typing.Any) -> None:
self._database_url = DatabaseURL(database_url)
self._host = self._database_url.hostname
self._port = self._database_url.port or 3306
self._user = self._database_url.username or getpass.getuser()
self._password = self._database_url.password
self._db = self._database_url.database
self._autocommit = True
self._options = options
self._connection = None
def _get_connection_kwargs(self) -> dict:
url_options = self._database_url.options
kwargs = {}
ssl = url_options.get("ssl")
if ssl is not None:
kwargs["ssl"] = {"true": True, "false": False}[ssl.lower()]
return kwargs
def connect(self) -> None:
assert self._connection is None, "DatabaseBackend is already running"
kwargs = self._get_connection_kwargs()
self._connection = pymysql.connect(
host=self._host,
port=self._port,
user=self._user,
password=self._password,
db=self._db,
autocommit=self._autocommit,
cursorclass=pymysql.cursors.DictCursor,
**kwargs,
)
def disconnect(self) -> None:
assert self._connection is not None, "DatabaseBackend is not running"
self._connection.cursor().close()
self._connection = None
def connection(self) -> "MySQLConnection":
return MySQLConnection(self, self._connection)
class MySQLConnection(ConnectionBackend):
def __init__(self, database: MySQLBackend, connection: pymysql.Connection):
self._database = database
self._connection = connection
@property
def connection(self) -> pymysql.Connection:
assert self._connection is not None, "Connection is not acquired"
return self._connection
def fetch_all(self, query):
"""Fetch all the rows"""
assert self._connection is not None, "Connection is not acquired"
cursor = self._connection.cursor()
try:
cursor.execute(query)
result = cursor.fetchall()
return result
finally:
cursor.close()
def fetch_df(self, query: typing.Union[str]):
"""Fetch data, and format result into Dataframe
:param query:
:return: Dataframe
"""
data = self.fetch_all(query)
return pd.DataFrame(data)
def export_csv(self,
query: typing.Union[str],
fname: typing.Union[str],
sep: typing.Any = ','):
"""Export result to csv"""
df = self.fetch_df(query)
return df.to_csv(fname, sep=sep, encoding='utf-8', index=False)
def export_excel(self, query: typing.Union[str], fname: typing.Union[str]):
"""Export result to excel"""
df = self.fetch_df(query)
return df.to_excel(fname, encoding='utf-8', index=False)
def fetch_many(self, query, size: int = None):
"""Fetch several rows"""
assert self._connection is not None, "Connection is not acquired"
cursor = self._connection.cursor()
try:
cursor.execute(query)
result = cursor.fetchmany(size)
return result
finally:
cursor.close()
def execute(self, query):
"""Execute a query
:param str query: Query to execute.
:return: Number of affected rows
:rtype: int
"""
assert self._connection is not None, "Connection is not acquired"
cursor = self._connection.cursor()
try:
result = cursor.execute(query)
return result
finally:
cursor.close()
def execute_many(self, query):
"""Run several data against one query
:param query: query to execute on server
:return: Number of rows affected, if any.
:rtype: int
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
assert self._connection is not None, "Connection is not acquired"
cursor = self._connection.cursor()
try:
result = cursor.execute_many(query)
return result
finally:
cursor.close()
def insert_many(self, table, data: typing.Union[list, tuple],
cols: typing.Union[list, tuple]):
"""Insert many records
:param table: table name
:param data: data
:param cols: columns
:return:
"""
assert self._connection is not None, "Connection is not acquired"
cursor = self._connection.cursor()
INSERT_MANY = "INSERT IGNORE INTO {table} ({cols}) VALUES ({values});".format(
table=table,
cols=", ".join(["`%s`" % col for col in cols]),
values=", ".join(["%s" for col in cols]))
cursor.executemany(INSERT_MANY, data)
Console().print(f"[bold cyan]{table}[/bold cyan] inserts [bold cyan]"
f"{len(data)}[/bold cyan] records ✨ 🍰 ✨")
cursor.close()
def insert_df(self, table, df: pd.DataFrame, dropna=True, **kwargs):
"""Insert Dataframe type of data
# transform dtype
>>> df.loc[:, col] = df.loc[:, col].astype(str)
:param table:
:param df: Dataframe
:param dropna: bool
:return:
"""
if df.empty:
Console().print('There seems to be no data 😅', style='red')
else:
cols = df.columns.tolist()
if dropna:
df.dropna(axis=kwargs.get('axis', 0),
how=kwargs.get('how', 'any'),
thresh=kwargs.get('thresh'),
subset=kwargs.get('subset'),
inplace=True)
data = [tuple(row) for row in df[cols].values]
self.insert_many(table, data, cols)
def truncate_table(self, table):
"""Truncate table's data, but keep the table structure
:param table:
:return:
"""
TRUNCATE_TABLE = """TRUNCATE TABLE {};""".format(table)
self.execute(TRUNCATE_TABLE)
Console().print(
f"Table [bold cyan]{table}[/bold cyan] was truncated ✨ 🍰 ✨")
def drop_column(self, table, column: typing.Union[str, list, tuple]):
"""Drop column"""
if isinstance(column, str):
DROP_COLUMN = """ALTER TABLE {} DROP COLUMN {} ;""".format(
table, column)
if isinstance(column, (list, tuple)):
DROP_COLUMN = """ALTER TABLE {} DROP COLUMN {} ;""".format(
table, ',DROP COLUMN '.join([col for col in column]))
self.execute(DROP_COLUMN)
Console().print("Column was dropped ✨ 🍰 ✨")
def drop_table(self, table):
"""Drop table"""
DROP_TABLE = f"""DROP TABLE IF EXISTS `{table}`;"""
data = self.fetch_all(f'''SELECT * FROM {table} LIMIT 10;''')
# if the table is not empty, warning user
if data:
confirm = click.confirm(f"Are you sure to drop table {table} ?",
default=False)
if confirm:
self.execute(DROP_TABLE)
else:
self.execute(DROP_TABLE)
Console().print(
f"Table [bold cyan]{table}[/bold cyan] was dropped ✨ 🍰 ✨")
def create_table(self,
table,
df: pd.DataFrame = None,
comments: dict = None,
primary_key: typing.Union[str, list, tuple] = None,
dtypes: dict = None):
"""Create table"""
from toolz import merge
PREFIX = f'''CREATE TABLE IF NOT EXISTS `{table}` ('''
SUFFIX = ''') DEFAULT CHARSET=utf8mb4;'''
types = {}
if dtypes:
for dtype, type_cols in dtypes.items():
types = merge(types, {col: dtype for col in type_cols})
cols = df.columns.tolist() if df is not None else types.keys()
# if there is no id, add an auto_increment id
if ('id' not in cols) or ('id' not in primary_key):
PREFIX += '''`id` INT AUTO_INCREMENT COMMENT 'id','''
COLUMNS = []
for col in cols:
comment = comments.get(col, "...") if comments else "..."
dtype = types.get(col, None)
if dtype:
COLUMNS.append(f'''`{col}` {dtype} COMMENT "{comment}"''')
else:
infer_dtype = check_dtype_mysql(df[col].dtypes)
COLUMNS.append(
f'''`{col}` {infer_dtype} COMMENT "{comment}"''')
PRIMARY_SEG = f' ,PRIMARY KEY (`id`)'
if isinstance(primary_key, str) and (not primary_key == 'id'):
PRIMARY_SEG = f' ,PRIMARY KEY (`id`, `{primary_key}`)'
elif isinstance(primary_key, (list, tuple, set)):
PRIMARY_SEG = f' ,PRIMARY KEY (`id`, `{"`,`".join(primary_key)}`)'
else:
pass
CREATE_TABLE = PREFIX + ','.join(COLUMNS) + PRIMARY_SEG + SUFFIX
self.execute(CREATE_TABLE)
Console().print(
f"Table [blod cyan]{table}[/blod cyan] was created ✨ 🍰 ✨")
def rename_table(self, table: str, name: str):
"""Rename table
:param table:
:param name:
:return:
"""
RENAME_TABLE = """ALTER TABLE {} RENAME TO {} ;""".format(table, name)
self.execute(RENAME_TABLE)
Console().print(
"Renamed table [bold red]{}[/bold red] to [bold cyan]{}[/bold "
"cyan] ✨ 🍰 ✨".format(table, name))
def rename_column(self, table: str, column: str, name: str, dtype: str):
"""Rename column
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ALTER TABLE `table` CHANGE [column] [new column] [new type];
:param table:
:param column:
:param name:
:param dtype:
:return:
"""
RENAME_COLUMN = """ALTER TABLE {} CHANGE COLUMN {} {} {};""".format(
table, column, name, dtype)
self.execute(RENAME_COLUMN)
Console().print("Renamed column {} to {} ✨ 🍰 ✨".format(column, name))
def add_column(
self,
table: str,
column: str,
dtype: str,
comment: str = "...",
after: str = None,
):
"""Add new column
:param table:
:param column:
:param dtype:
:param comment:
:param after: insert column after which column, the default is insert
into the end
:return:
"""
MYSQL_KEYWORDS = ["CHANGE", "SCHEMA", "DEFAULT"]
if column.upper() in MYSQL_KEYWORDS:
Console().print("%(column)s was SQL keyword or reserved word 😯\n" %
{"column": column},
style='red')
sys.exit(1)
if after:
ADD_COLUMN = (
"""ALTER TABLE {} ADD {} {} COMMENT '{}' AFTER {} ;""".format(
table, column, dtype, comment, after))
else:
ADD_COLUMN = """ALTER TABLE {} ADD {} {} COMMENT '{}' ;""".format(
table, column, dtype, comment)
self.execute(ADD_COLUMN)
Console().print(f"Added column {column} to {table} ✨ 🍰 ✨")
def add_table_comment(self, table: str, comment: str):
"""Add comment for table"""
ADD_TABLE_COMMENT = """ALTER TABLE {} COMMENT '{}' ;""".format(
table, comment)
self.execute(ADD_TABLE_COMMENT)
Console().print("Table comment added ✨ 🍰 ✨")
def change_column_attribute(
self,
table: str,
column: str,
dtype: str,
notnull: bool = False,
comment: str = None,
):
"""Change column's attribute
:param table:
:param column:
:param dtype:
:param notnull:
:param comment:
:return:
"""
comment = 'COMMENT "{}"'.format(comment) if comment else ""
CHANG_COLUMN_ATTRIBUTE = """ALTER TABLE {} MODIFY {} {} {} {};""".format(
table, column, dtype, "NOT NULL" if notnull else "DEFAULT NULL",
comment)
self.execute(CHANG_COLUMN_ATTRIBUTE)
Console().print(
"Column [bold cyan]{}[/bold cyan]'s attribute was modified "
"✨ 🍰 ✨".format(column))
def add_primary_key(self, table: str, primary_key: typing.Union[str, list,
tuple]):
"""Set primary key
:param table:
:param primary_key:
:return:
"""
# checkout whether exist primary key
result = self.execute(f'''SELECT COUNT(*) PrimaryNum
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE t
WHERE t.TABLE_NAME ="{table}"''')
# if primary key exist, delete it first
if (result is not True) and (result >= 1):
DROP_PRIMARIY_KEY = f'ALTER TABLE {table} DROP PRIMARY KEY;'
self.execute(DROP_PRIMARIY_KEY)
PRIMARY_KEY = ''
if isinstance(primary_key, str):
PRIMARY_KEY = f'`{primary_key}`'
elif isinstance(primary_key, (list, tuple)):
PRIMARY_KEY = f'`{"`,`".join(primary_key)}`'
ADD_PRIMARY_KEY = f"""ALTER TABLE {table} ADD PRIMARY KEY ({PRIMARY_KEY});"""
self.execute(ADD_PRIMARY_KEY)
Console().print("Well done ✨ 🍰 ✨")
|
"""
The purpose of this code is to create the pytorch-geometric graphs, create the Data files, and to load the
train/val/test data
It can be run on sherlock using
$ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python pdbbind_dataloader.py all /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/train_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/val_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/test_index_balance_clash.txt /home/users/sidhikab/lig_clash_score/src/models/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/processed_without_protein/processed /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/combined.csv --no_protein
$ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python pdbbind_dataloader.py pdbbind_dataloader /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/train_index_balance_clash_large.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/val_index_balance_clash_large.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/test_index_balance_clash_large.txt /home/users/sidhikab/lig_clash_score/src/models/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/processed_conformer_no_score_feat /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/combined_conformer_poses.csv --decoy_type conformer_poses
$ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python pdbbind_dataloader.py group /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/train_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/val_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/test_index_balance_clash.txt /home/users/sidhikab/lig_clash_score/src/models/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/processed_without_protein/processed /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/combined.csv --index 0 --no_protein
$ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python pdbbind_dataloader.py check /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/train_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/val_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/test_index_balance_clash.txt /home/users/sidhikab/lig_clash_score/src/models/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/processed_without_protein/processed /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/combined.csv --no_protein
$ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python pdbbind_dataloader.py pdbbind_dataloader /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/train_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/val_index_balance_clash.txt /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits/test_index_balance_clash.txt /home/users/sidhikab/lig_clash_score/src/models/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/processed_clustered/processed /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/combined.csv --score_feature
"""
import sys
sys.path[-2] = '/home/users/sidhikab/lig_clash_score/src'
from util import splits as sp
import pandas as pd
import os
import torch
from torch_geometric.data import Dataset, Data, DataLoader
from tqdm import tqdm
import argparse
import pickle
import random
# loader for pytorch-geometric
class GraphPDBBind(Dataset):
"""
PDBBind dataset in pytorch-geometric format.
Ref: https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/data/dataset.html#Dataset
"""
def __init__(self, root, transform=None, pre_transform=None):
super(GraphPDBBind, self).__init__(root, transform, pre_transform)
@property
def raw_file_names(self):
f = open("/home/users/sidhikab/lig_clash_score/src/models/out/test.out", "a")
f.write('getting raw file names\n')
f.close()
return sorted(os.listdir(self.raw_dir))
@property
def processed_file_names(self):
f = open("/home/users/sidhikab/lig_clash_score/src/models/out/test.out", "a")
f.write('getting processed file names\n')
f.close()
return sorted(os.listdir(self.processed_dir))
def process(self):
f = open("/home/users/sidhikab/lig_clash_score/src/models/out/test.out", "a")
f.write('processing\n')
f.close()
pass
def len(self):
return len(self.processed_file_names)
def get(self, idx):
data = torch.load(os.path.join(self.processed_dir, 'data_{}.pt'.format(idx)))
return data
def get_prots(docked_prot_file):
"""
gets list of all protein, target ligands, and starting ligands in the index file
:param docked_prot_file: (string) file listing proteins to process
:return: process (list) list of all protein, target ligands, and starting ligands to process
"""
process = []
with open(docked_prot_file) as fp:
for line in fp:
if line[0] == '#': continue
protein, target, start = line.strip().split()
process.append((protein, target, start))
return process
def get_index_groups(process, raw_root, decoy_type, cluster, include_score, include_protein):
"""
gets list of all protein, target ligands, starting ligands, and starting indices information in the index file (up to
CUTOFF)
:param process: (list) shuffled list of all protein, target ligands, and starting ligands to process
:param pkl_file: (string) file containing list of all protein, target ligands, starting ligands, and starting
indices information (or file path where this information will be saved)
:param label_file: (string) file containing rmsd label information
:param raw_root: (string) path to directory with data
:return: grouped_files (list) list of all protein, target ligands, starting ligands, and starting indices to process
"""
index_groups = []
num_codes = 0
for protein, target, start in tqdm(process, desc='going through protein, target, start groups'):
index_groups.append((protein, target, start, num_codes))
#update num_codes
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start))
cluster_dir = os.path.join(pair_path, '{}-to-{}_clustered.pkl'.format(target, start))
if cluster:
infile = open(cluster_dir, 'rb')
cluster_data = pickle.load(infile)
infile.close()
num_codes += len(cluster_data)
else:
if include_score:
graph_dir = '{}/{}-to-{}_{}_graph_with_score.pkl'.format(pair_path, target, start,
decoy_type)
elif not include_protein:
graph_dir = '{}/{}-to-{}_{}_graph_without_protein.pkl'.format(pair_path, target, start,
decoy_type)
else:
graph_dir = '{}/{}-to-{}_{}_graph.pkl'.format(pair_path, target, start, decoy_type)
infile = open(graph_dir, 'rb')
graph_data = pickle.load(infile)
infile.close()
num_codes += len(graph_data.keys())
return index_groups
def group_files(n, process):
"""
groups pairs into sublists of size n
:param n: (int) sublist size
:param process: (list) list of pairs to process
:return: grouped_files (list) list of sublists of pairs
"""
grouped_files = []
for i in range(0, len(process), n):
grouped_files += [process[i: i + n]]
return grouped_files
def get_label(pdb, label_df, use_modified_rmsd):
"""
searches for pdb's rmsd in combined rmsd df
:param pdb: (string) {target}_lig{id}
:param label_df: (df) combined rmsd df
:return: (float) rmsd value
"""
if use_modified_rmsd:
return label_df[label_df['target'] == pdb]['modified_rmsd'].iloc[0]
else:
return label_df[label_df['target'] == pdb]['rmsd'].iloc[0]
def get_score_no_vdw(pdb, label_df):
"""
searches for pdb's rmsd in combined rmsd df
:param pdb: (string) {target}_lig{id}
:param label_df: (df) combined rmsd df
:return: (float) rmsd value
"""
return label_df[label_df['target'] == pdb]['target_start_score_no_vdw'].iloc[0]
def create_graph(graph_data, label_df, processed_root, pdb_code, protein, target, start, start_index, lower_score_bound,
upper_score_bound, use_modified_rmsd):
node_feats, edge_index, edge_feats, pos = graph_data[pdb_code]
y = torch.FloatTensor([get_label(pdb_code, label_df, use_modified_rmsd)])
data = Data(node_feats, edge_index, edge_feats, y=y, pos=pos)
data.pdb = '{}_{}-to-{}_{}'.format(protein, target, start, pdb_code)
score = get_score_no_vdw(pdb_code, label_df)
if score < lower_score_bound:
score = lower_score_bound
if score > upper_score_bound:
score = upper_score_bound
data.physics_score = score
torch.save(data, os.path.join(processed_root, 'data_{}.pt'.format(start_index)))
def split_process(protein, target, start, label_file, pair_path, processed_root, decoy_type, start_index, include_score,
lower_score_bound, upper_score_bound, include_protein, cluster, use_modified_rmsd):
"""
creates Data file for target/start pair
:param target: (string) name of target ligand
:param start: (string) name of start ligand
:param label_file: (string) file containing rmsd label information
:param pair_path: (string) path to directory with target/start info
:param processed_root: (string) directory where data files will be written to
:param start_index: (int) starting index for labeling data files for target/start pair
:return: grouped_files (list) list of sublists of pairs
"""
label_df = pd.read_csv(label_file)
if include_score:
graph_dir = '{}/{}-to-{}_{}_graph_with_score.pkl'.format(pair_path, target, start, decoy_type)
elif not include_protein:
graph_dir = '{}/{}-to-{}_{}_graph_without_protein.pkl'.format(pair_path, target, start, decoy_type)
else:
graph_dir = '{}/{}-to-{}_{}_graph.pkl'.format(pair_path, target, start, decoy_type)
infile = open(graph_dir, 'rb')
graph_data = pickle.load(infile)
infile.close()
if cluster:
cluster_dir = os.path.join(pair_path, '{}-to-{}_clustered.pkl'.format(target, start))
infile = open(cluster_dir, 'rb')
cluster_data = pickle.load(infile)
infile.close()
for pdb_code in tqdm(cluster_data, desc='pdb_codes'):
create_graph(graph_data, label_df, processed_root, pdb_code, protein, target, start, start_index,
lower_score_bound, upper_score_bound, use_modified_rmsd)
start_index += 1
else:
for pdb_code in graph_data:
create_graph(graph_data, label_df, processed_root, pdb_code, protein, target, start, start_index,
lower_score_bound, upper_score_bound, use_modified_rmsd)
start_index += 1
def pdbbind_dataloader(batch_size, data_dir='../../data/pdbbind', split_file=None):
"""
Creates dataloader for PDBBind dataset with specified split.
Assumes pre-computed split in 'split_file', which is used to index Dataset object
:param batch_size: (int) size of each batch of data
:param data_dir: (string) root directory of GraphPDBBind class
:param split_file: (string) file with pre-computed split information
:return: (dataloader) dataloader for PDBBind dataset with specified split
"""
dataset = GraphPDBBind(root=data_dir)
if split_file is None:
return DataLoader(dataset, batch_size, shuffle=True)
indices = sp.read_split_file(split_file)
dl = DataLoader(dataset.index_select(indices), batch_size, shuffle=True)
return dl
def run_all(train_prot_file, val_prot_file, test_prot_file, run_path, root, processed_root, label_file, decoy_type,
grouped_files, n, include_score, include_protein):
for i, group in enumerate(grouped_files):
cmd = 'sbatch -p owners -t 1:00:00 -o {} --wrap="' \
'/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python pdbbind_dataloader.py group ' \
'{} {} {} {} {} {} {} --n {} --index {} --decoy_type {}'
if include_score:
cmd += ' --score_feature'
if not include_protein:
cmd += ' --no_protein'
cmd += '"'
os.system(cmd.format(os.path.join(run_path, 'combined{}.out'.format(i)), train_prot_file, val_prot_file,
test_prot_file, run_path, root, processed_root, label_file, n, i, decoy_type))
def run_group(grouped_files, raw_root, processed_root, label_file, decoy_type, index, include_score, lower_score_bound,
upper_score_bound, include_protein, cluster, use_modified_rmsd):
for protein, target, start, start_index in grouped_files[index]:
print(protein, target, start)
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start))
split_process(protein, target, start, label_file, pair_path, processed_root, decoy_type, start_index,
include_score, lower_score_bound, upper_score_bound, include_protein, cluster, use_modified_rmsd)
def run_check(process, raw_root, processed_root, decoy_type, cluster, include_score, include_protein):
num_codes = 0
index_groups = []
for protein, target, start in tqdm(process, desc='going through protein, target, start groups'):
# update num_codes
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start))
if cluster:
cluster_dir = os.path.join(pair_path, '{}-to-{}_clustered.pkl'.format(target, start))
infile = open(cluster_dir, 'rb')
cluster_data = pickle.load(infile)
infile.close()
for _ in cluster_data:
if not os.path.exists(os.path.join(processed_root, 'data_{}.pt'.format(num_codes))):
index_groups.append((protein, target, start, num_codes))
break
num_codes += 1
else:
start_num_code = num_codes
added = False
if include_score:
graph_dir = '{}/{}-to-{}_{}_graph_with_score.pkl'.format(pair_path, target, start,
decoy_type)
elif not include_protein:
graph_dir = '{}/{}-to-{}_{}_graph_without_protein.pkl'.format(pair_path, target, start,
decoy_type)
else:
graph_dir = '{}/{}-to-{}_{}_graph.pkl'.format(pair_path, target, start, decoy_type)
infile = open(graph_dir, 'rb')
graph_data = pickle.load(infile)
infile.close()
for _ in graph_data:
if not os.path.exists(os.path.join(processed_root, 'data_{}.pt'.format(num_codes))) and not added:
print('data_{}.pt'.format(num_codes))
print((protein, target, start, start_num_code))
added = True
num_codes += 1
print('Missing', len(index_groups), '/', len(process))
print(index_groups)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('task', type=str, help='either all, group, check, MAPK14, combine_all, combine_group, '
'combine_check, or pdbbind_dataloader')
parser.add_argument('train_prot_file', type=str, help='file listing proteins to process for training dataset')
parser.add_argument('val_prot_file', type=str, help='file listing proteins to process for validation dataset')
parser.add_argument('test_prot_file', type=str, help='file listing proteins to process for testing dataset')
parser.add_argument('run_path', type=str, help='directory where script and output files will be written')
parser.add_argument('root', type=str, help='directory where data can be found')
parser.add_argument('save_root', type=str, help='directory where raw and processed directories can be found')
parser.add_argument('label_file', type=str, help='file with rmsd labels')
parser.add_argument('--index', type=int, default=-1, help='for group task, group number')
parser.add_argument('--n', type=int, default=3, help='number of protein, target, start groups processed in '
'group task')
parser.add_argument('--lower_score_bound', type=float, default=-20, help='any physics score below this value, will '
'be set to this value')
parser.add_argument('--upper_score_bound', type=float, default=20, help='any physics score above this value, will '
'be set to this value')
parser.add_argument('--decoy_type', type=str, default='ligand_poses', help='either cartesian_poses, ligand_poses, '
'or conformer_poses')
parser.add_argument('--score_feature', dest='include_score', action='store_true')
parser.add_argument('--no_score_feature', dest='include_score', action='store_false')
parser.set_defaults(include_score=False)
parser.add_argument('--protein', dest='include_protein', action='store_true')
parser.add_argument('--no_protein', dest='include_protein', action='store_false')
parser.set_defaults(include_protein=True)
parser.add_argument('--clustered_only', dest='cluster', action='store_true')
parser.add_argument('--no_cluster', dest='cluster', action='store_false')
parser.set_defaults(cluster=False)
parser.add_argument('--modified_rmsd', dest='use_modified_rmsd', action='store_true')
parser.add_argument('--reguular_rmsd', dest='use_modified_rmsd', action='store_false')
parser.set_defaults(use_modified_rmsd=False)
args = parser.parse_args()
raw_root = os.path.join(args.root, 'raw')
random.seed(0)
if not os.path.exists(args.save_root):
os.mkdir(args.save_root)
processed_root = os.path.join(args.save_root, 'processed')
if not os.path.exists(processed_root):
os.mkdir(processed_root)
if not os.path.exists(args.run_path):
os.mkdir(args.run_path)
if args.task == 'all':
process = get_prots(args.train_prot_file)
process.extend(get_prots(args.val_prot_file))
process.extend(get_prots(args.test_prot_file))
index_groups = get_index_groups(process, raw_root, args.decoy_type, args.cluster, args.include_score,
args.include_protein)
grouped_files = group_files(args.n, index_groups)
run_all(args.train_prot_file, args.val_prot_file, args.test_prot_file, args.run_path, args.root, args.save_root,
args.label_file, args.decoy_type, grouped_files, args.n, args.include_score, args.include_protein)
if args.task == 'group':
process = get_prots(args.train_prot_file)
process.extend(get_prots(args.val_prot_file))
process.extend(get_prots(args.test_prot_file))
index_groups = get_index_groups(process, raw_root, args.decoy_type, args.cluster, args.include_score,
args.include_protein)
grouped_files = group_files(args.n, index_groups)
run_group(grouped_files, raw_root, processed_root, args.label_file, args.decoy_type, args.index,
args.include_score, args.lower_score_bound, args.upper_score_bound, args.include_protein,
args.cluster, args.use_modified_rmsd)
if args.task == 'check':
process = get_prots(args.train_prot_file)
process.extend(get_prots(args.val_prot_file))
process.extend(get_prots(args.test_prot_file))
run_check(process, raw_root, processed_root, args.decoy_type, args.cluster, args.include_score,
args.include_protein)
if args.task == 'pdbbind_dataloader':
split_path = '/oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/splits'
split = 'balance_clash_large'
train_split = os.path.join(split_path, f'train_{split}.txt')
train_loader = pdbbind_dataloader(1, data_dir=args.save_root, split_file=train_split)
print(len(train_loader))
if __name__=="__main__":
main()
|
from mcu.utils import elastic2D
# Ref: https://doi.org/10.1021/jacs.8b13075
# Elastic tensors
AlB6_1 = [379.9,438.4,23.1,159.6]
AlB6_2 = [383,375.3,33.6,132.1]
AlB6_3 = [395.1,401,44,173.6]
AlB6_4 = [229.9,194.2,7.1,80.3]
AlB6_5 = [242.2,171.1,15.6,57.1]
AlB6_6 = [149.3,92.2,24.9,63.2]
# define a list of elastic tensors
elastic_tensors = [AlB6_1,AlB6_2,AlB6_3,AlB6_4,AlB6_5,AlB6_6]
# Analyse
legend = ["AlB6_1", "AlB6_2", "AlB6_3", "AlB6_4", "AlB6_5", "AlB6_6",]
elastic2D.analyze(elastic_tensors)
elastic2D.plot_polar(elastic_tensors, young=True, legend=legend, figname='young', save=True)
elastic2D.plot_polar(elastic_tensors, young=False, legend=legend, figname='poisson', save=True)
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module contains tests for the functions in aspects.py
Currently the tests are only for the slicing aspect - to
check it's time complexity.
"""
import sys
sys.path.append(".")
import pandas
import time
import randstr, random
from util import aspects, enums
def generate_1():
"""
This function hardcodes a pandas dataframe containing 1 Million rows &
3 columns, and stores it as a csv file in ../data/data_for_test_aspects/.
Columns = [Name, Age, Gender]
Around half of the rows would be Gender = Male, and such rows will be placed
at odd row indices.
Args:
Returns:
Raises:
"""
number_of_rows = 1000000
map_gender = {0 : 'Female', 1: 'Male'}
# Generating a list of random strings as Names
list_names = [randstr.randstr(16) for row in range(number_of_rows)]
# Generating a list of random integers between 1 - 100 as Ages
list_age = [random.randint(1, 100) for row in range(number_of_rows)]
list_gender = [map_gender[row % 2] for row in range(number_of_rows)]
# Generating a list of random 'Male' / 'Female'
table = pandas.DataFrame({'Name' : list_names,
'Age' : list_age,
'Gender' : list_gender})
table.to_csv('/data/data_for_test_aspects/test_1.csv', index=False)
def test_1():
"""
Situation : This test will check the time complexity of the drop
aspect.
Alternate rows are dropped in this test case.
The drop aspect should work in O(number_of_rows *
average_bytes_per_column). And not in O(number_of_rows *
number_of_rows * average_bytes_per_column).
This test checks if the slice_table aspect actually works in
the desired time complexiy.
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
# noting the calling time of the slice function
start_time = time.time()
table = aspects.slice_table(table, [('Gender', enums.Filters.EQUAL_TO,
'Female')])
# noting the end return time of the slice function
end_time = time.time()
time_taken = end_time - start_time
print('Execution Time ', time_taken)
assert(time_taken <= 20)
def test_2():
"""
Situation : This test will check the time complexity of the drop
aspect.
Rows with age > 50 will be dropped, so around half of the
rows will be dropped.
The drop aspect should work in O(number_of_rows *
average_bytes_per_column). And not in O(number_of_rows *
number_of_rows * average_bytes_per_column).
This test checks if the slice_table aspect actually works in
the desired time complexiy.
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
# noting the calling time of the slice function
start_time = time.time()
table = aspects.slice_table(table, [('Age', enums.Filters.LESS_THAN, 51)])
# noting the end return time of the slice function
end_time = time.time()
time_taken = end_time - start_time
print('Execution Time ', time_taken)
assert(time_taken <= 20)
def test_3():
"""
Situation : This tests the median aspect.
In the same randomly generated dataset calculate the median
age group by gender
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
result = aspects.group_by(table, ['Gender'], enums.SummaryOperators.MEDIAN)
result_table = result['table']
result_suggestions = result['suggestions']
print(result_table)
expected_result = """ Gender Age
0 Female 50
1 Male 51"""
expected_suggestions = "[]"
assert(result_table.to_string() == expected_result)
assert(str(result_suggestions) == expected_suggestions)
def test_4():
""" Test for summary operator = PROPORTION_OF_COUNT
Proportion of count of gender for each race/ethnicity
Dataset used : https://www.kaggle.com/spscientist/students-performance-in-exams
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')
result = aspects.group_by(table, ['race/ethnicity'],
enums.SummaryOperators.PROPORTION_OF_COUNT)
result_table = result['table']
result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'gender'])
result_suggestions = result['suggestions']
# Sum of proportion column should be(close to) 1.0
assert(result_table['gender'].sum() == 1.0)
print(result_table)
expected_result_table = """ race/ethnicity gender
0 group A 0.089
1 group B 0.190
2 group C 0.319
3 group D 0.262
4 group E 0.140"""
expected_suggestions = "[]"
assert(expected_result_table == result_table.to_string())
assert(str(result_suggestions) == expected_suggestions)
def test_5():
""" Test for summary operator = PROPORTION_OF_SUM
Proportion of sum of reading score for each race/ethnicity
Dataset used : https://www.kaggle.com/spscientist/students-performance-in-exams
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')
result = aspects.group_by(table, ['race/ethnicity'],
enums.SummaryOperators.PROPORTION_OF_SUM)
result_table = result['table']
result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'reading score'])
result_suggestions = result['suggestions']
# Sum of proportion column should be(close to) 1.0
assert(float(format(result_table['reading score'].sum(), '.5f')) == 1)
print(result_table)
expected_result_table = """ race/ethnicity reading score
0 group A 0.083216
1 group B 0.185011
2 group C 0.318698
3 group D 0.265263
4 group E 0.147812"""
expected_suggestions = "[]"
assert(expected_result_table == result_table.to_string())
assert(str(result_suggestions) == expected_suggestions)
def test_6():
""" Test for oversight : Attribution With Hidden Negative
Proportion of sum of reading score for each race/ethnicity
Dataset used : https://www.kaggle.com/spscientist/students-performance-in-exams
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/student_performance_updated_to_create_attribution_with_hidden_negative_oversight.csv')
result = aspects.group_by(table, ['race/ethnicity'],
enums.SummaryOperators.PROPORTION_OF_SUM)
result_table = result['table']
result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'reading score'])
result_suggestions = result['suggestions']
print(result_table)
expected_result_table = """ race/ethnicity reading score
0 group A 0.083434
1 group B 0.185493
2 group C 0.316920
3 group D 0.265955
4 group E 0.148198"""
expected_suggestions = "[{'suggestion': 'There exists negative values among the values on which proportion is being applied', 'oversight': <Oversights.ATTRIBUTION_WITH_HIDDEN_NEGATIVES: 11>, 'is_row_level_suggestion': True, 'confidence_score': 1, 'row_list': [{'row': 14, 'confidence_score': 1}]}]"
assert(expected_result_table == result_table.to_string())
assert(str(result_suggestions) == expected_suggestions)
# print(generate_1.__doc__)
# generate_1()
print(test_1.__doc__)
test_1()
print(test_2.__doc__)
test_2()
print(test_3.__doc__)
test_3()
print(test_4.__doc__)
test_4()
print(test_5.__doc__)
test_5()
print(test_6.__doc__)
test_6()
print('Test cases completed')
|
# Implementation based on tf.keras.callbacks.py
# https://github.com/tensorflow/tensorflow/blob/v2.2.0/tensorflow/python/keras/callbacks.py
from typing import Union
import wandb
from .callback import Callback
class WandbLogger(Callback):
"""
Callback that streams epoch results to tensorboard events folder.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
```python
tensorboard_logger = TensorBoard('runs')
model.fit(X_train, Y_train, callbacks=[tensorboard_logger])
```
"""
def __init__(
self,
update_freq: Union[str, int] = "epoch",
) -> None:
"""
Arguments:
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
batches. Note that writing too frequently to TensorBoard can slow down
your training.
"""
self.keys = None
self.write_per_batch = True
try:
self.update_freq = int(update_freq)
except ValueError as e:
self.update_freq = 1
if update_freq == "batch":
self.write_per_batch = True
elif update_freq == "epoch":
self.write_per_batch = False
else:
raise e
super().__init__()
def on_train_begin(self, logs=None):
self.steps = self.params["steps"]
self.global_step = 0
def on_train_batch_end(self, batch: int, logs=None):
logs = logs or {}
self.global_step = batch + self.current_epoch * (self.steps)
if self.global_step % self.update_freq == 0:
if self.keys is None:
self.keys = logs.keys()
wandb.log(logs, step=self.global_step)
def on_epoch_begin(self, epoch: int, logs=None):
self.current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.keys is None:
self.keys = logs.keys()
wandb.log(logs, step=self.global_step)
|
import argparse
import os
import shutil
import subprocess
import tempfile
import threading
import time
from .converters import PandocToPDFConverter
from .conf import conf
from .subproc import run_cmd
def get_src_context(src: str = None, src_ext: str = ""):
_src_ext = src_ext if src_ext != "" else "markdown"
if src is None:
return tempfile.NamedTemporaryFile(suffix=f".{_src_ext}", mode="w+")
else:
return open(os.path.expanduser(src), mode="a+")
def get_mtime(fn: str):
return os.stat(fn).st_mtime
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source_file", metavar="SOURCE_FILE", nargs="?")
parser.add_argument(
"-p",
"--print-command",
action="store_true",
default=False,
help="""Print converter (e.g. pandoc) command that would be executed and
exit.""",
)
parser.add_argument(
"-c",
"--convert",
nargs="?",
metavar="TARGET",
default=None,
const="",
help="""Convert mode. Only run converter (see --print-command) and
produce TARGET (optional, temp file used if omitted, use
'%(prog)s -c -- SOURCE_FILE' or '%(prog)s SOURCE_FILE -c' in
that case).""",
)
parser.add_argument(
"-o",
"--extra-opts",
default="",
help="""Additional options to pass to the converter, e.g. for pandoc:
%(prog)s -o '--bibliography=/path/to/lit.bib' SOURCE_FILE. Mind
the quoting. Some shells mess up quoting in the short form -f,
using the long form as in --extra-opts='-f rst' then helps.""",
)
parser.add_argument(
"-f",
"--source-format",
default="",
help="""Format of SOURCE_FILE (file type, typically file extension).
Same as %(prog)s --extra-opts='-f SOURCE_FORMAT'. Passed to
pandoc (-f/--from) if used. Else (default) we use pandoc's
automatic detection. Use in combination with omitted
SOURCE_FILE, e.g. "%(prog)s -f rst" to edit a temp rst
file.""",
)
args = parser.parse_args()
# That is (should be :)) the only pandoc-specific hard coded line.
converter = PandocToPDFConverter
conf_dct = conf[converter.conf_section]
if os.path.exists(conf_dct["logfile"]):
os.unlink(conf_dct["logfile"])
# src and extra_opts are the same in every place where we call
# converter(...). Better use smth like
# converter=functools.partial(PandocToPDFConverter,
# extra_opts=args.extra_opts,
# src=src)
# ...
# cv = converter(tgt=...)
# but that breaks access to converter.some_attrs (e.g.
# converter.conf_section)
if args.print_command:
cv = converter(
src=args.source_file
if args.source_file is not None
else "SOURCE_FILE",
tgt=f"TARGET.{converter.tgt_ext}",
extra_opts=args.extra_opts,
src_ext=args.source_format,
)
print(cv.cmd)
return
with tempfile.NamedTemporaryFile(
suffix=f".{converter.tgt_ext}"
) as fd_tgt, get_src_context(
src=args.source_file, src_ext=args.source_format
) as fd_src:
cv = converter(
src=fd_src.name,
tgt=fd_tgt.name,
extra_opts=args.extra_opts,
src_ext=args.source_format,
)
if os.stat(fd_src.name).st_size == 0:
fd_src.write(
f"Hi, I'm your new file '{os.path.basename(fd_src.name)}'. "
f"Delete this line and start hacking."
)
# Actually write to file now before we hand fd_src down.
fd_src.flush()
def target_viewer():
run_cmd(f"{conf_dct['pdf_viewer']} {cv.tgt}")
def target_watch_convert():
try:
mtime = get_mtime(cv.src)
while thread_viewer.is_alive():
this_mtime = get_mtime(cv.src)
if this_mtime > mtime:
mtime = this_mtime
cv.convert()
time.sleep(0.5)
except FileNotFoundError:
# Only when: src is NamedTemporaryFile().name and editor is
# closed before viewer. Then get_mtime() will raise
# FileNotFoundError since the temp file was removed. Maybe we
# should log this case (logfile).
return
if args.convert is not None:
cv.convert(onerror="fail")
if args.convert != "":
shutil.copy(cv.tgt, args.convert)
else:
cv.convert(onerror="fail")
thread_viewer = threading.Thread(target=target_viewer)
thread_viewer.start()
thread_watch_convert = threading.Thread(
target=target_watch_convert
)
thread_watch_convert.start()
subprocess.run(
f"{conf_dct['editor']} {cv.src}", shell=True, check=True
)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.HomeView.as_view(), name='home'),
url(r'^stream/$', views.ActivityResourceView.as_view(), name='stream'),
url(r'^drumkit/$', views.DrumKitView.as_view(), name='drumkit'),
]
|
"""Deactivate all pins."""
from . import config as cg
cg.quiet_logging(False)
def deactivate():
"""Deactivate the pins."""
cg.send('\nStart: Deactivating all PWM pins')
cg.set_pwm(cg.get_pin('Haptics', 'pin_buzzer'), 0)
cg.set_pwm(cg.get_pin('Haptics', 'pin_shaker'), 0)
for pin_color in ['red', 'blue', 'green']:
cg.set_pwm(cg.get_pin('RGB_Strip', 'pin_{}'.format(pin_color)), 0)
cg.send('\nEnd: Set all pins to off state [all_off.deactivate()]\n')
if __name__ == '__main__':
deactivate()
|
from serial.threaded import ReaderThread as ReaderThreadBase
class ReaderThread(ReaderThreadBase):
def __init__(self, port, serial_instance, protocol_factory):
super().__init__(serial_instance, protocol_factory)
self.__port = port
@property
def port(self):
return self.__port
|
# Generated by Django 3.2.7 on 2021-09-12 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kutub', '0015_manuscript_seal_description'),
]
operations = [
migrations.AddField(
model_name='manuscript',
name='support_description',
field=models.CharField(blank=True, default='', help_text='A description of the physical support for the written part of a manuscript.', max_length=1023),
),
]
|
from pathlib import Path
import os
import sys
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
"SECRET_KEY", "q+@9+9br!&gal37kganb367-9!+tra4(4g68^gdwm99pc&ja-^"
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("IS_DEBUG", "TRUE") == "TRUE"
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"corsheaders",
"api.apps.ApiConfig",
"storages",
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "stagifyapi.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
CORS_ALLOWED_ORIGINS = [
"http://localhost:4200",
"http://127.0.0.1:4200",
"http://stagifyapp.azurewebsites.net",
"https://stagifyapp.azurewebsites.net",
]
CORS_ALLOW_CREDENTIALS = True
WSGI_APPLICATION = "stagifyapi.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.environ.get("DATABASE_NAME", "stagify"),
"USER": os.environ.get("DATABASE_USER", "stagify-user"),
"PASSWORD": os.environ.get("DATABASE_PASSWORD", "stagify-password"),
"HOST": os.environ.get("DATABASE_HOST", "localhost"),
"PORT": os.environ.get("DATABASE_PORT", "5433"),
"OPTIONS": {"sslmode": os.environ.get("DATABASE_SSL_MODE", "allow")},
}
}
if "test" in sys.argv:
DATABASES["default"] = {"ENGINE": "django.db.backends.sqlite3"}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = "/uploads/"
MEDIA_ROOT = os.path.join(BASE_DIR, "uploads")
AZURE_CONTAINER = os.environ.get(
"AZURE_MEDIA_STORAGE_CONTAINER", "container for media storage on azure"
)
AZURE_CONNECTION_STRING = os.environ.get(
"AZURE_MEDIA_STORAGE_CONNECTION_STRING",
"connection string for media storage on azure",
)
SESSION_COOKIE_SECURE = os.environ.get("IS_DEBUG", "TRUE") != "TRUE"
SESSION_COOKIE_HTTPONLY = False
if os.environ.get("IS_DEBUG", "TRUE") == "TRUE":
SESSION_COOKIE_SAMESITE = "Lax"
else:
SESSION_COOKIE_SAMESITE = "None"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = "apikey"
EMAIL_HOST_PASSWORD = os.environ.get("SENDGRID_API_KEY", "api key for sendgrid")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
import sys, os, json, ast, requests
from time import time as tm, sleep as slp
from datetime import datetime as dt
from itertools import cycle
from pyquery import PyQuery as pq_
from browse import browse_url_profile_details
from user_agents import user_agents
data_dir = 'profile_data/'
class indeed_resumes_details(object):
def __init__(self, unique_id):
self.user_agents_cycle = cycle(user_agents)
self.unique_id = unique_id
self.profile_identities = {
'workExperience': {'list_key': 'work_experiences',
'content': '.workExperience-content .items-container',
'item_w_id': '.workExperience-content .items-container #%s',
'items': [('.work_title', None), ('.work_dates', None), ('.work_description', None), ('.work_company', {'.work_company': 0, '.work_location': -1})]
},
'education': {'list_key': 'education_bgs',
'content': '.education-content .items-container',
'item_w_id': '.education-content .items-container #%s',
'items': [('.edu_title', None), ('.edu_school', None), ('.edu_dates', None)]
},
}
def resource_collection(self):
url_ = browse_url_profile_details % self.unique_id
data = self.get_resource(url_)
details = self.extract_details(data)
return details
def extract_details(self, data):
t1 = tm()
details = {}
if not data:
return details
details['name'] = data('#basic_info_row #basic_info_cell #resume-contact').text()
details['title'] = data('#basic_info_row #basic_info_cell #headline').text()
details['address'] = data('#basic_info_row #basic_info_cell #contact_info_container .adr #headline_location').text()
details['skills'] = data('.skills-content #skills-items .data_display .skill-container').text().split(',')
details['additional_info'] = data('.additionalInfo-content #additionalinfo-items .data_display').text().encode('ascii','ignore')
identities = {}
for k, v in self.profile_identities.iteritems():
identities[k] = {'data': []}
for item in data(v['content']).children():
data_= {}
it = pq_(item)
if it.attr('id').startswith(k):
it_id = it.attr('id')
item = data(v['item_w_id'] % it_id)
children = pq_(item.children())
for each, splits in v['items']:
if splits:
item_construct = children(each).text().split('-')
for sub, index in splits.iteritems():
data_[sub] = item_construct[index]
else:
data_[each] = children(each).text().encode('ascii','ignore')
identities[k]['data'].append(data_)
details[k] = identities[k]
t2 = tm()
details['time_taken'] = t2-t1
details['timestamp'] = tm()
return details
def get_resource(self, url_):
user_agent = self.user_agents_cycle.next()
try:
resp = requests.get(url_, headers = {'user_agent': user_agent})
except:
slp(100)
print 'sleeping for 100 secs due to a block..'
user_agent = self.user_agents_cycle.next()
resp = requests.get(url_, headers = {'user_agent': user_agent})
if resp.status_code == 200:
data = pq_(resp.text)
data = data('#resume_body').children()
if not data:
user_agent = self.user_agents_cycle.next()
resp = requests.get(url_, headers = {'user_agent': user_agent})
if resp.status_code == 200:
data = pq_(resp.text)
data = data('#resume_body').children()
return data
else:
return []
else:
return data
else:
return []
def save_distincts():
"""
This method parses the unique ids from the given
data directory of ids scrapped from indeed
"""
t1 = tm()
object_ = {}
data_dir = 'data/'
#export_folder = '/Volume/SKILLZEQ/resumes_v1/%s/%s/'
export_folder = '/Volume/SKILLZEQ/resumes_v1/%s/%s/'
target = 'profile_data/distincts_v2.json'
target_file = open(target, 'wb')
for root, directories, files in os.walk(data_dir):
for filename in files:
file_ = filename.split('.') #--complete filename
file_format = file_[1] #--.json
keyword = file_[0] #--file name
domain = root.split('/')[1] #--parent folder
if file_format == 'json':
filepath = os.path.join(root, filename)
f = open(filepath, 'rb')
for record in f:
try:
record = filter(lambda p: p['type'] == 'resource_id', ast.literal_eval(record))
for i in record:
unique_id = i['data']
if unique_id in object_:
object_[unique_id].append(keyword)
else:
object_[unique_id] = [keyword]
#object_[unique_id] = 1
except:
print filepath
continue
f.close()
target_file.write(json.dumps(object_))
target_file.close()
t2 = tm()
print '%d seconds taken..' % int(t2-t1)
return
def get_distincts():
"""
This method returns the parsed dict of the unique file generated from save_distincts
"""
target = 'profile_data/distincts_v2.json'
f = open(target, 'rb')
for a in f:
data = json.loads(a)
f.close()
print 'data fetched for resume links..'
return data
def scrap_profiles(load_done=False):
done_ = {}
done_target = 'profile_data/done_v1.json'
t1 = tm()
data = get_distincts()
#folder = '/Volumes/SKILLZEQ/%s.json'
folder = '/Users/saif/skillz_eq_samples/%s.json'
for i, key in enumerate(data):
if key not in done_:
try:
obj = indeed_resumes_details(key)
profile = obj.resource_collection()
profile['semantics'] = data[key]
except:
print 'put to sleep for 300 secs due to break..'
slp(300)
try:
obj = indeed_resumes_details(key)
profile = obj.resource_collection()
profile['semantics'] = data[key]
except:
for k_ in data:
if k_ not in done_:
done_[k_] = 0
df = open(done_target, 'wb')
df.write(json.dumps(done_))
df.close()
print 'script terminated at %d records...data for dones in %s' % (i, done_target)
f = open(folder % key, 'wb')
f.write(json.dumps(profile))
f.close()
done_[key] = 1
if i % 1000 == 0:
t2 = tm()
print '%d records saved in %d seconds..' % (i, int(t2-t1))
if i == 2000:
break
t2 = tm()
print 'success... %d records scrapped.. in %d mins..' % (i, int(float(t2-t1)/60))
return
if __name__ == '__main__':
scrap_profiles()
# get_distincts()
# save_distincts()
# get_ids()
|
#!/usr/bin/python3
# lpoDB.py by Muhammad Hafidz
from datetime import date, datetime, timedelta
from tkinter import messagebox
import sqlite3
import lpoWeb
class lpoDB():
'''
# A database module class to keep of Wind Speed, Air Temperature, and Barometric Pressure for specific dates.
# The module's functionalities:
- Open/create database and configure table with the appropriate columns.
- Determine which dates have complete/incomplete data available in the database.
- Use lpoWeb module to download incomplete data from online API (navy.mil).
- Cache downloaded data into the database for future use.
- Return data for all requested range of dates to the lpoApp module.
'''
def __init__(self, **kwargs):
# kwargs - keyword arguments (the caller can use to specify the name of the database file and the name of the table).
self.filename = kwargs.get('filename', 'lpo.db') # class variable with default values.
self.table = kwargs.get('table', 'Weather') # class variable with default values.
self.db = sqlite3.connect(self.filename) # sqlite3.connect() method is called to open the database.
self.db.row_factory = sqlite3.Row # the "Row" method to configure the row_factory for retrieving data.
self.db.execute('''CREATE TABLE IF NOT EXISTS {} (Date TEXT, Time TEXT, Status TEXT, Air_Temp FLOAT, Barometric_Press FLOAT, Wind_Speed FLOAT)'''.format(self.table))
def __iter__(self):
'''
# This method is to return generator object with dictionaries (dicts) of entire DB contents.
'''
cursor = self.db.execute('SELECT * FROM {} ORDER BY Date, Time'.format(self.table))
for row in cursor:
yield dict(row)
def get_data_for_range(self, start, end):
'''
# Given a start and end date, this method is to return a generator of dicts (dictionaries),
containing all available Air_Temp, Barometric_Press, and Wind_Speed.
NOTE - It updates the database as necessary first.
'''
dates_to_update = [] # create a list for dates.
### 1 - determine pre-2007 dates to update and append to list.
for year in range(start.year, 2007):
if list(self._get_status_for_range(date(year, 1, 12), date(year, 1, 12))) == []:
dates_to_update.append(date(year, 1, 12)) # format: date(Year, Month, Day).
### 2 - determine post-2006 dates to update and append to list.
if (end.year > 2006) and (start >= date(2007, 1, 1)):
temp_start = start
elif (end.year > 2006) and (start < date(2007, 1, 1)):
temp_start = date(2007, 1, 1)
else:
# otherwise, start and end dates are both pre-2007.
temp_start = end
### 3 - generate a list of dates between temp_start and end.
delta = end - temp_start # create a variable, delta.
for d in range(delta.days + 1):
# note: the +1 makes it inclusive.
dates_to_update(temp_start + timedelta(days = d))
statuses = list(self._get_status_for_range(temp_start, end))
### 4 - remove all dates from dates_to_update that have a 'COMPLETE' or 'PARTIAL' status.
for entry in statuses:
if entry['Status'] == 'COMPLETE':
dates_to_update.remove(datetime.strptime(str(entry['Date']), '%Y%m%d').date())
elif entry['Status'] == 'PARTIAL':
try:
# update for any new data first, then remove from dates_to_update list.
self._update_data_for_date(datetime.strptime(str(entry['Date']), '%Y%m%d').date(), True)
except:
raise dates_to_update.remove(datetime.strptime(str(entry['Date']), '%Y%m%d').date(), True)
### 5 - iterate through dates that were non-existent in DB and insert data.
error_dates = []
for day in dates_to_update:
try:
self._update_data_for_date(day, False)
except ValueError as e:
error_dates.append(e)
if error_dates != []:
error_message = 'There were problems accessing data for the following dates. They were not included in the result.\n'
for day in error_dates:
error_message += '\n{}'.format(day)
messagebox.showwarning(title = 'Warning', message = error_message)
### 6 - get Air_Temp, Barometric_Press, and Wind_Speed data from start/end dates range in database.
cursor = self.db.execute('''SELECT Air_Temp, Barometric_Press, Wind_Speed FROM {} WHERE Date BETWEEN {} AND {}'''.format(self.table, start.strftime('%Y%m%d'), end.strftime('%Y%m%d')))
for row in cursor:
yield dict(row)
def _get_status_for_range(self, start, end):
'''
# Given a start and end date, his method is to return a generator of dicts (dictionaries)
containing all available Date and Status values.
'''
### 1 - Dates/Statuses that already exist in DB (module).
cursor = self.db.execute('''SELECT DISTINCT Date, Status FROM {} WHERE Date BETWEEN {} AND {}'''.format(self.table, start.strftime('%Y%m%d'), end.strftime('%Y%m%d')))
for row in cursor:
yield dict(row)
def _update_data_for_date(self, date, partial):
'''
# This method uses lpoWeb module to retrieve data for specified date and
insert them into new DB entry.
NOTE - use partial parameter to specify if entry already exists.
'''
### 1 - this clears out any partial data for this entry.
if partial:
self.db.execute('DELETE FROM {} WHERE Date'.format(self.table, date.strftime('%Y%m%d')))
self.db.commit()
try:
data = lpoWeb.get_data_for_date(date)
except Exception as e:
raise
else:
pass
for entry in data:
self.db.execute('''INSERT INTO {} (Date, Time, Status, Air_Temp, Barometric_Press, Wind_Speed)
VALUES (?, ?, ?, ?, ?, ?)'''.format(self.table), (entry['Date'].replace("_", ""),
entry['Time'],
entry['Status'],
entry['Air_Temp'],
entry['Barometric_Press'],
entry['Wind_Speed']))
self.db.commit()
def clear(self):
'''
# Clears out the database by dropping the current table.
'''
self.db.execute('DROP TABLE IF EXISTS {}'.format(self.table))
def close(self):
'''
# Safely close down the database connection.
'''
self.db.close()
del self.filename
def test():
'''
# Method: a simple test routine.
'''
### 1 - create/clear/close to empty db before testing.
db = lpoDB(filename = 'test.db', table = 'Test')
db.clear()
db.close()
### 2 - This is to create db for testing.
db = lpoDB(filename = 'test.db', table = 'Test')
### 3 - This is to verify that the db is empty.
if dict(db) != {}:
print('Error in lpoDB test(): Database is not empty.')
### 4 - add data for current date.
try:
db._update_data_for_date(date.today(), False)
except:
print('ERROR in lpoDB.test(): Could not retrieve data for today\n')
for entry in db:
print(entry)
db.close() # close the connection of db.
if __name__ == '__main__':
### 1 - if this module (the source file) is run as main, it will execute the test routine.
test()
|
# Keypirinha: a fast launcher for Windows (keypirinha.com)
import keypirinha as kp
import keypirinha_util as kpu
from .lib import everything_ipc as evipc
import os.path
import threading
import time
import traceback
class Everything(kp.Plugin):
"""Search for files and folders via Everything"""
CONFIG_SECTION_MAIN = "main"
CONFIG_SECTION_DEFAULTS = "defaults"
CONFIG_SECTION_DEFAULT_SEARCH = "default_search"
CONFIG_SECTION_SEARCH = "search"
DEFAULT_ITEM_LABEL_FORMAT = "{plugin_name}: {search_name}"
DEFAULT_ENABLE_DEFAULT_SEARCHES = True
DEFAULT_ALLOW_EMPTY_SEARCH = False
DEFAULT_EXPECT_REGEX = False
mutex = None
searches = {}
def __init__(self):
super().__init__()
self.mutex = threading.Lock()
self.searches = {}
def on_start(self):
self._read_config()
def on_catalog(self):
self._read_config()
catalog = []
for search_name, search in self.searches.items():
catalog.append(self.create_item(
category=kp.ItemCategory.REFERENCE,
label=search['item_label'],
short_desc=search['description'],
target=search_name,
args_hint=kp.ItemArgsHint.REQUIRED,
hit_hint=kp.ItemHitHint.NOARGS))
catalog.append(self.create_item(
category=kp.ItemCategory.KEYWORD,
label="{}: {}".format(self.friendly_name(), "Rebuild DB"),
short_desc="Ask Everything to rebuild its database (v1.4+ only)",
target="rebuild_db",
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.NOARGS))
self.set_catalog(catalog)
def on_suggest(self, user_input, items_chain):
if not items_chain:
return
initial_item = items_chain[0]
current_item = items_chain[-1]
# support for pre-2.9 items
if (current_item.category() == kp.ItemCategory.KEYWORD and
current_item.target() in ("search", "search_regex")):
if not len(user_input):
return
try:
with self.mutex:
self.set_suggestions(
self._search(
user_input,
current_item.target() == "search_regex"),
kp.Match.ANY, kp.Sort.NONE)
except evipc.EverythingNotFound:
self.warn("Everything instance not found")
except:
self.err("Something bad happened while requesting Everything to perform your search.")
traceback.print_exc()
# handle "search" and "default_search" items defined in config
elif current_item.category() == kp.ItemCategory.REFERENCE:
if not initial_item.target() in self.searches.keys():
return
current_search_name = initial_item.target()
current_search = self.searches[current_search_name]
if not current_search:
return
if not current_search['allow_empty_search'] and not len(user_input):
return
# avoid flooding Everything with too much unnecessary queries in
# case user is still typing
if len(user_input) > 0 and self.should_terminate(0.250):
return
# query
search_string = current_search['pattern'].replace("%s", user_input)
try:
with self.mutex:
self.set_suggestions(
self._search(search_string, current_search['is_regex']),
kp.Match.ANY, kp.Sort.NONE)
except evipc.EverythingNotFound:
self.warn("Everything instance not found")
except:
self.err("Something bad happened while requesting Everything to perform your search.")
traceback.print_exc()
# handle file system browsing
elif current_item.category() == kp.ItemCategory.FILE:
if os.path.isdir(current_item.target()):
suggestions, match_method, sort_method = self._browse_dir(
current_item.target(), check_base_dir=False,
search_terms=user_input)
self.set_suggestions(suggestions, match_method, sort_method)
elif os.path.exists(current_item.target()):
clone = current_item.clone()
clone.set_args(user_input)
clone.set_loop_on_suggest(False)
self.set_suggestions([clone], kp.Match.ANY, kp.Sort.NONE)
else:
self.set_suggestions([self.create_error_item(
label=user_input,
short_desc="File/Dir not found: " + current_item.target())])
def on_execute(self, item, action):
if item.category() == kp.ItemCategory.FILE:
kpu.execute_default_action(self, item, action)
elif item.category() == kp.ItemCategory.KEYWORD and item.target() == "rebuild_db":
with self.mutex:
try:
client = evipc.Client()
client.rebuild_db()
except evipc.EverythingNotFound:
self.warn("Everything instance not found")
except:
self.err("Something bad happened while requesting Everything to perform your search.")
traceback.print_exc()
def on_events(self, flags):
if flags & kp.Events.PACKCONFIG:
self.on_catalog()
def _search(self, terms, is_regex=False):
max_results = kp.settings().get_int("max_results", "gui", 1000, 100, 1000)
client = evipc.Client()
res_list = client.query(terms, is_regex=is_regex,
max_results=max_results,
should_terminate_cb=self.should_terminate)
catitems = []
idx = 0
for item_full_path, item_is_file in res_list:
item_label = os.path.basename(item_full_path)
if not item_label:
item_label = item_full_path
catitems.append(self.create_item(
category=kp.ItemCategory.FILE,
label=item_label,
short_desc="",
target=item_full_path,
args_hint=kp.ItemArgsHint.ACCEPTED,
hit_hint=kp.ItemHitHint.KEEPALL,
loop_on_suggest=True))
idx += 1
if idx % 10 == 0 and self.should_terminate():
return []
return catitems
def _browse_dir(self, base_dir, check_base_dir=True, search_terms=""):
return kpu.browse_directory(self,
base_dir,
check_base_dir=check_base_dir,
search_terms=search_terms,
show_dirs_first=True,
show_hidden_files=True,
show_system_files=True)
def _read_config(self):
self.searches = {}
settings = self.load_settings()
# [main]
item_label_format = settings.get_stripped(
"item_label_format",
section=self.CONFIG_SECTION_MAIN,
fallback=self.DEFAULT_ITEM_LABEL_FORMAT)
enable_default_searches = settings.get_bool(
"enable_default_searches",
section=self.CONFIG_SECTION_MAIN,
fallback=self.DEFAULT_ENABLE_DEFAULT_SEARCHES)
# [default]
default_allow_empty_search = settings.get_bool(
"allow_empty_search",
section=self.CONFIG_SECTION_DEFAULTS,
fallback=self.DEFAULT_ALLOW_EMPTY_SEARCH)
default_is_regex = settings.get_bool(
"is_regex",
section=self.CONFIG_SECTION_DEFAULTS,
fallback=self.DEFAULT_EXPECT_REGEX)
# read [search/*] and [default_search/*] sections
for section in settings.sections():
if section.lower().startswith(self.CONFIG_SECTION_DEFAULT_SEARCH + "/"):
if not enable_default_searches:
continue
search_label = section[len(self.CONFIG_SECTION_DEFAULT_SEARCH) + 1:].strip()
elif section.lower().startswith(self.CONFIG_SECTION_SEARCH + "/"):
search_label = section[len(self.CONFIG_SECTION_SEARCH) + 1:].strip()
else:
continue
if not len(search_label):
self.warn('Ignoring empty search name (section "{}").'.format(section))
continue
forbidden_chars = ":;,/|\\"
if any(c in forbidden_chars for c in search_label):
self.warn('Forbidden character(s) found in search name "{}". Forbidden characters list "{}"'.format(search_label, forbidden_chars))
continue
if search_label.lower() in self.searches.keys():
self.warn('Ignoring duplicated search "{}" defined in section "{}".'.format(search_label, section))
continue
if not settings.get_bool("enable", section=section, fallback=True):
continue
search_item_label_format = settings.get_stripped(
"item_label_format", section=section, fallback=item_label_format)
search_item_label = search_item_label_format.format(
search_name=search_label, plugin_name=self.friendly_name())
search_pattern = settings.get_stripped("pattern", section=section)
if not len(search_pattern):
self.warn('Search "{}" does not have "pattern" value. Search ignored.'.format(search_label))
continue
if '%s' not in search_pattern:
self.warn('Search-terms placeholder "%s" not found in pattern of search "{}". Search ignored.'.format(search_label))
continue
search_description = settings.get_stripped(
"description", section=section, fallback="Search {}".format(search_label))
search_allow_empty_search = settings.get_bool(
"allow_empty_search", section=section, fallback=default_allow_empty_search)
search_is_regex = settings.get_bool(
"is_regex", section=section, fallback=default_is_regex)
self.searches[search_label.lower()] = {
'pattern': search_pattern,
'item_label': search_item_label,
'allow_empty_search': search_allow_empty_search,
'is_regex': search_is_regex,
'description': search_description}
|
"""
Test the position module.
"""
import dataclasses
import math
from pytest import fixture
from highlevel.robot.controller.motion.position import PositionController, tick_to_mm
from highlevel.robot.entity.configuration import Configuration
from highlevel.util.geometry.vector import Vector2
@fixture(name='configuration')
def configuration_stub(configuration_test: Configuration) -> Configuration:
"""
Configuration for tests.
"""
return dataclasses.replace(configuration_test,
initial_angle=0,
initial_position=Vector2(0, 0),
wheel_radius=1 / (2 * math.pi))
@fixture(name='position_controller')
def position_controller_setup(odometry_mock, configuration, probe_mock):
"""
Localization controller.
"""
return PositionController(
odometry_function=odometry_mock,
configuration=configuration,
probe=probe_mock,
)
class TestPositionController:
"""
Tests for position controller.
"""
@staticmethod
def test_update_first_call_noop(position_controller, odometry_mock):
"""
Test the update function's first call (no change to position/angle).
"""
position_controller.update_odometry(1, 1)
odometry_mock.assert_not_called()
@staticmethod
def test_update_two_calls(position_controller, odometry_mock,
configuration):
"""
Test the update function with two calls (one for init, one for call to odometry).
"""
perimeter = 2 * math.pi * configuration.wheel_radius
ticks = 100
distance = perimeter * ticks / configuration.encoder_ticks_per_revolution
position_controller.update_odometry(1, 2)
position_controller.update_odometry(1 + ticks, 2 - 2 * ticks)
odometry_mock.assert_called_once_with(distance, -2 * distance,
configuration.initial_position,
configuration.initial_angle,
configuration)
@staticmethod
def test_update_two_calls_position(position_controller, odometry_mock):
"""
Test the update function's result.
"""
position_controller.update_odometry(0, 0)
position_controller.update_odometry(777, 666)
assert (position_controller.position,
position_controller.angle) == odometry_mock.return_value
@staticmethod
def test_initial_values(position_controller, configuration):
"""
Test that the initial values for position/angle are set from configuration.
"""
assert position_controller.position == configuration.initial_position
assert position_controller.angle == configuration.initial_angle
@staticmethod
def test_distance_travelled_translation(position_controller, configuration,
odometry_mock):
"""
Test the travelled distance computation.
"""
odometry_mock.return_value = (
configuration.initial_position + Vector2(10, 0),
0,
)
step_ticks = 100
step_mm = tick_to_mm(100, configuration.encoder_ticks_per_revolution,
configuration.wheel_radius)
position_controller.update_odometry(0, 0)
position_controller.update_odometry(step_ticks, step_ticks)
assert position_controller.distance_travelled == step_mm
position_controller.update_odometry(2 * step_ticks, 2 * step_ticks)
assert position_controller.distance_travelled == 2 * step_mm
position_controller.update_odometry(step_ticks, step_ticks)
assert position_controller.distance_travelled == step_mm
|
from situations.complex.yoga_class import YogaClassScheduleMixin
from venues.relaxation_center_zone_director import VisitorSituationOnArrivalZoneDirectorMixin
from venues.scheduling_zone_director import SchedulingZoneDirector
class ParkZoneDirector(YogaClassScheduleMixin, VisitorSituationOnArrivalZoneDirectorMixin, SchedulingZoneDirector):
pass
|
import unittest
from py3status_lbl.lenovo_battery_level import BatteryInfoModel, BatteryLevelElement
class MyTestCase(unittest.TestCase):
def test_convert_linux_battery_status_to_data_model(self):
expected_battery_info = {'History (charge)': '',
'History (rate)': '',
'capacity': '87,4844%',
'energy': '21 Wh',
'energy-empty': '0 Wh',
'energy-full': '21,17 Wh',
'energy-full-design': '24,05 Wh',
'energy-rate': '9,796 W',
'has history': 'yes',
'has statistics': 'yes',
'icon-name': "'battery-full-symbolic'",
'model': '01AV424',
'native-path': 'BAT1',
'percentage': '99%',
'power supply': 'yes',
'present': 'yes',
'rechargeable': 'yes',
'serial': '4458',
'state': 'discharging',
'technology': 'lithium-polymer',
'time to empty': '2,1 hours',
'vendor': 'Celxpert',
'voltage': '12,23 V',
'warning-level': 'none'}
txt = b" native-path: BAT1\n vendor: Celxpert\n model: 01AV424\n serial: 4458\n power supply: yes\n updated: wto, 12 maj 2020, 10:39:01 (11 seconds ago)\n has history: yes\n has statistics: yes\n battery\n present: yes\n rechargeable: yes\n state: discharging\n warning-level: none\n energy: 21 Wh\n energy-empty: 0 Wh\n energy-full: 21,17 Wh\n energy-full-design: 24,05 Wh\n energy-rate: 9,796 W\n voltage: 12,23 V\n time to empty: 2,1 hours\n percentage: 99%\n capacity: 87,4844%\n technology: lithium-polymer\n icon-name: 'battery-full-symbolic'\n History (charge):\n 1589272741\t99,000\tdischarging\n History (rate):\n 1589272741\t9,796\tdischarging\n\n"
self.assertEqual(expected_battery_info, BatteryInfoModel.create(txt))
def test_should_return_formatted_message_with_battery_status_displayed_as_percentage(self):
battery_info_0 = {
'percentage': '99%',
'time to empty': '2,1 hours'
}
battery_info_1 = {
'percentage': '50%',
'time to empty': '1 hours'
}
self.assertEqual("B0(99%) B1(50%)",
BatteryLevelElement.format_message(battery_info_0, battery_info_1, 'percentage'))
def test_should_return_formatted_message_with_information_time_to_empty_no_charging_case(self):
battery_info_0 = {
'percentage': '99%',
}
battery_info_1 = {
'percentage': '50%',
'time to empty': '1 hours'
}
self.assertEqual("Time left: B0(-:--h) B1(1:00h)",
BatteryLevelElement.format_message(battery_info_0, battery_info_1, 'time to empty'))
def test_should_return_formatted_message_with_information__time_to_full_charging_case(self):
battery_info_0 = {
'percentage': '99%',
}
battery_info_1 = {
'percentage': '50%',
'time to full': '1 hours'
}
self.assertEqual("Charging: B0(-:--h) B1(1:00h)",
BatteryLevelElement.format_message(battery_info_0, battery_info_1, 'time to full'))
def test_should_return_time_in_hours(self):
self.assertEqual("1:12", BatteryLevelElement._time_in_hour_format("1,2 hours"))
self.assertEqual("0:33", BatteryLevelElement._time_in_hour_format("33,3 minutes"))
if __name__ == '__main__':
unittest.main()
|
"""
Divide a given video into multiple shots using the kernel temporal segmentation
library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import os
from scipy.misc import imresize
from PIL import Image
from skimage import color
# from skimage.feature import hog
import numpy as np
import _init_paths # noqa
import utils
from kts.cpd_auto import cpd_auto
def color_hist(im, colBins):
"""
Get color histogram descriptors for RGB and LAB space.
Input: im: (h,w,c): 0-255: np.uint8
Output: descriptor: (colBins*6,)
"""
assert im.ndim == 3 and im.shape[2] == 3, "image should be rgb"
arr = np.concatenate((im, color.rgb2lab(im)), axis=2).reshape((-1, 6))
desc = np.zeros((colBins * 6,), dtype=np.float)
for i in range(3):
desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
arr[:, i], bins=colBins, range=(0, 255))
desc[i * colBins:(i + 1) * colBins] /= np.sum(
desc[i * colBins:(i + 1) * colBins]) + (
np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
i += 1
desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
arr[:, i], bins=colBins, range=(0, 100))
desc[i * colBins:(i + 1) * colBins] /= np.sum(
desc[i * colBins:(i + 1) * colBins]) + (
np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
for i in range(4, 6):
desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
arr[:, i], bins=colBins, range=(-128, 127))
desc[i * colBins:(i + 1) * colBins] /= np.sum(
desc[i * colBins:(i + 1) * colBins]) + (
np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
return desc
def compute_features(im, colBins):
"""
Compute features of images: RGB histogram + SIFT
im: (h,w,c): 0-255: np.uint8
feat: (d,)
"""
colHist = color_hist(im, colBins=colBins)
# hogF = hog(
# color.rgb2gray(im), orientations=hogBins,
# pixels_per_cell=(hogCellSize, hogCellSize),
# cells_per_block=(int(np.sqrt(hogCells)),
# int(np.sqrt(hogCells))),
# visualise=False)
# return np.hstack((hogF, colHist))
return colHist
def vid2shots(imSeq, maxShots=5, vmax=0.6, colBins=40):
"""
Convert a given video into number of shots
imSeq: (n,h,w,c): 0-255: np.uint8: RGB
shotIdx: (k,): start Index of shot: 0-indexed
shotScore: (k,): First change ../lib/kts/cpd_auto.py return value to
scores2 instead of costs (a bug)
"""
X = np.zeros((imSeq.shape[0], compute_features(imSeq[0], colBins).size))
print('Feature Matrix shape:', X.shape)
for i in range(imSeq.shape[0]):
X[i] = compute_features(imSeq[i], colBins)
K = np.dot(X, X.T)
shotIdx, _ = cpd_auto(K, maxShots - 1, vmax)
shotIdx = np.concatenate(([0], shotIdx))
return shotIdx
def parse_args():
"""
Parse input arguments
"""
import argparse
parser = argparse.ArgumentParser(
description='Creates a tracker using deepmatch and epicflow')
parser.add_argument(
'-out', dest='outdir',
help='Directory to save output.',
default=os.getenv("HOME") + '/local/data/trash/', type=str)
parser.add_argument(
'-imdir', dest='imdir',
help='Directory containing video images. Will be read ' +
'alphabetically. Default is random Imagenet train video.',
default='', type=str)
parser.add_argument(
'-fgap', dest='frameGap',
help='Gap between frames while running tracker. Default 0.',
default=0, type=int)
parser.add_argument(
'-n', dest='maxShots',
help='Max number of shots to break into. Default 5.',
default=5, type=int)
parser.add_argument(
'-d', dest='colBins',
help='Number of bins in RGBLAB histogram. Default 40. ',
default=40, type=int)
parser.add_argument(
'-v', dest='vmax',
help='Parameter for KTS, lower value means more clips. Default 0.6.',
default=0.6, type=float)
parser.add_argument(
'-seed', dest='seed',
help='Random seed for numpy and python.', default=2905, type=int)
args = parser.parse_args()
return args
def demo_images():
"""
Input is the path of directory (imdir) containing images of a video
"""
# Hard coded parameters
maxSide = 400 # max length of longer side of Im
lenSeq = 1e8 # longer seq will be shrinked between [lenSeq/2, lenSeq]
# parse commandline parameters
args = parse_args()
np.random.seed(args.seed)
if args.imdir == '':
imagenetVideoList = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \
'imagenet_videos/ILSVRC2015/ImageSets/VID/' + \
'train_10.txt'
imagenetRoot = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \
'imagenet_videos/ILSVRC2015/Data/VID/train/'
with open(imagenetVideoList, 'r') as f:
lines = f.readlines()
imdirs = [x.strip().split(' ')[0] for x in lines]
imdirs = imdirs[np.random.randint(len(imdirs))]
args.imdir = os.path.join(imagenetRoot, imdirs)
args.outdir = os.path.join(args.outdir, imdirs)
# setup input directory
print('InputDir: ', args.imdir)
imPathList = utils.read_r(args.imdir, '*.*')
if len(imPathList) < 2:
print('Not enough images in image directory: \n%s' % args.imdir)
return
# setup output directory
suffix = args.imdir.split('/')[-1]
suffix = args.imdir.split('/')[-2] if suffix == '' else suffix
args.outdir = args.outdir + '/' + suffix
utils.mkdir_p(args.outdir)
print('OutputDir: ', args.outdir)
# load image sequence after adjusting frame gap and imsize
frameGap = args.frameGap
if frameGap <= 0 and len(imPathList) > lenSeq:
frameGap = int(len(imPathList) / lenSeq)
imPathList = imPathList[0:len(imPathList):frameGap + 1]
h, w, c = np.array(Image.open(imPathList[0])).shape
frac = min(min(1. * maxSide / h, 1. * maxSide / w), 1.0)
if frac < 1.0:
h, w, c = imresize(np.array(Image.open(imPathList[0])), frac).shape
imSeq = np.zeros((len(imPathList), h, w, c), dtype=np.uint8)
for i in range(len(imPathList)):
if frac < 1.0:
imSeq[i] = imresize(np.array(Image.open(imPathList[i])), frac)
else:
imSeq[i] = np.array(Image.open(imPathList[i]))
print('Total Video Shape: ', imSeq.shape)
# run the algorithm
shotIdx = vid2shots(imSeq, maxShots=args.maxShots, vmax=args.vmax,
colBins=args.colBins)
print('Total Shots: ', shotIdx.shape, shotIdx)
np.save(args.outdir + '/shotIdx_%s.npy' % suffix, shotIdx)
# save visual results
from PIL import ImageDraw
utils.rmdir_f(args.outdir + '/shots_%s/' % suffix)
utils.mkdir_p(args.outdir + '/shots_%s/' % suffix)
frameNo = 1
shotNo = 0
for i in range(imSeq.shape[0]):
img = Image.fromarray(imSeq[i])
draw = ImageDraw.Draw(img)
if i in shotIdx:
draw.text((100, 100), "New Shot Begins !!", (255, 255, 255))
shotNo += 1
frameNo = 1
draw.text((10, 10), "Shot: %02d, Frame: %03d" % (shotNo, frameNo),
(255, 255, 255))
img.save(
args.outdir + '/shots_%s/' % suffix + imPathList[i].split('/')[-1])
frameNo += 1
import subprocess
subprocess.call(
['tar', '-zcf', args.outdir + '/../shots_%s.tar.gz' % suffix,
'-C', args.outdir + '/shots_%s/' % suffix, '.'])
return
if __name__ == "__main__":
demo_images()
|
import logging
import os
import shutil
import numpy as np
from itertools import permutations
import tensorflow as tf
import model_fn.model_fn_2d.util_2d.graphs_2d as graphs
from model_fn.model_fn_base import ModelBase
from model_fn.util_model_fn.losses import batch_point3_loss, ordered_point3_loss
class ModelPolygon(ModelBase):
def __init__(self, params):
super(ModelPolygon, self).__init__(params)
self._flags = self._params['flags']
self._targets = None
self._point_dist = None
self._summary_object = {"tgt_points": [], "pre_points": [], "ordered_best": [], "unordered_best": []}
def get_graph(self):
return getattr(graphs, self._params['flags'].graph)(self._params)
def get_placeholder(self):
return {"fc": tf.compat.v1.placeholder(tf.float32, [None, 3, None], name="infc")}
def get_output_nodes(self, has_graph=True):
if has_graph:
tf.identity(self._graph_out['e_pred'], name="e_pred") # name to grab from java
return "e_pred" # return names as comma separated string without spaces
def get_target_keys(self):
return 'edges'
def get_predictions(self):
return self._graph_out['e_pred']
def info(self):
self.get_graph().print_params()
def get_loss(self):
# self._targets['points'] = tf.Print(self._targets['points'], [self._targets['points']])
# loss0 = tf.losses.absolute_difference(self._targets['points'], self._graph_out['p_pred'])
# print("params train batch size", self._params["flags"].train_batch_size)
# print("points", self._targets['points'])
loss = 0.0
loss_edge = 0
# loss_p = tf.print("tgt:", tf.squeeze(self._targets['edges']-3, axis=-1), "\npred", tf.argmax(self._graph_out['e_pred'], axis=1), tf.shape(self._graph_out['e_pred']), summarize=1000)
# with tf.control_dependencies([loss_p]):
if 'softmax_crossentropy' == self._flags.loss_mode:
loss_edge = tf.reduce_mean(tf.sqrt(tf.compat.v1.losses.softmax_cross_entropy(
tf.one_hot(tf.squeeze(self._targets['edges'], axis=-1) - 3, depth=4), self._graph_out['e_pred'])))
elif "abs_diff" == self._flags.loss_mode:
loss_edge = tf.reduce_mean(
tf.compat.v1.losses.absolute_difference(self._targets['edges'], self._graph_out['e_pred']))
else:
logging.error("no valid loss-mode in loss_params")
raise AttributeError
def num_step(x, bias):
return tf.math.divide(1, tf.add(tf.constant(1.0), tf.math.exp(-10.0 * (x - bias))))
ff_final_reshaped = tf.reshape(tf.cast(self._graph_out['p_pred'], dtype=tf.float32),
shape=(-1, self._flags.max_edges, 2))
num_step_tensor = 1.0 - num_step(
tf.cast(tf.range(self._flags.max_edges), dtype=tf.float32) + 1.0,
(tf.minimum(tf.maximum(tf.cast(self._targets['edges'], dtype=tf.float32), 0.0),
tf.cast(self._flags.max_edges, dtype=tf.float32) - 2.0) + 3.5))
num_step_tensor = tf.expand_dims(num_step_tensor, axis=-1)
num_step_tensor_broadcast = tf.broadcast_to(num_step_tensor, [tf.shape(ff_final_reshaped)[0], 6, 2])
print(num_step_tensor_broadcast)
print(self._targets["points"])
corrected_p_pred = tf.math.multiply(ff_final_reshaped, num_step_tensor_broadcast)
# paddings = tf.constant([[0, 0], [0,0], [0,tf.shape()]])
# target_extpanded = tf.pad(self._targets["points"], )
loss_points = tf.reduce_mean(tf.losses.absolute_difference(self._targets["points"], corrected_p_pred))
loss_points = tf.Print(loss_points, [self._targets['edges'], loss_edge, loss_points], summarize=1000,
message="loss_edge, loss_points")
loss_points = tf.Print(loss_points, [self._targets["points"]], summarize=1000, message="tgt_points")
loss_points = tf.Print(loss_points, [corrected_p_pred], summarize=1000, message="pre_points")
# loss_points = tf.Print(loss_points, [tf.losses.absolute_difference(self._targets["points"], corrected_p_pred)], summarize=1000, message="point_loss")
# loss0 = tf.cast(batch_point3_loss(self._targets['points'], self._graph_out['p_pred'],
# self._params["flags"].train_batch_size), dtype=tf.float32)
# loss0 = tf.cast(ordered_point3_loss(self._targets['points'], self._graph_out['p_pred'],
# self._params["flags"].train_batch_size), dtype=tf.float32)
# target_mom = tf.reduce_mean(
# tf.nn.moments(tf.reshape(self._targets['points'], (self._params["flags"].train_batch_size, 6)), axes=1),
# axis=1)
# pred_mom = tf.reduce_mean(
# tf.nn.moments(tf.reshape(self._graph_out['p_pred'], (self._params["flags"].train_batch_size, 6)), axes=1),
# axis=1)
# loss1 = tf.losses.absolute_difference(tf.reduce_sum(tf.abs(self._targets['points'])), tf.reduce_sum(tf.abs(self._graph_out['p_pred'])))
# loss = tf.Print(loss, [loss], message="loss:"
# print(loss0)
loss = loss_edge + loss_points
# loss = tf.Print(loss, [loss, tf.shape(target_mom), target_mom, pred_mom], message="loss0, loss1:")
# loss = tf.Print(loss, [loss], message="loss:")
return loss
def export_helper(self):
for train_list in self._params['flags'].train_lists:
data_id = os.path.basename(train_list)[:-8]
shutil.copy(os.path.join("data/synthetic_data", data_id, "log_{}_train.txt".format(data_id)),
os.path.join(self._params['flags'].checkpoint_dir, "export"))
data_id = os.path.basename(self._params['flags'].val_list)[:-8]
shutil.copy(os.path.join("data/synthetic_data", data_id, "log_{}_val.txt".format(data_id)),
os.path.join(self._params['flags'].checkpoint_dir, "export"))
def print_evaluate(self, output_dict, target_dict):
with tf.compat.v1.Session().as_default():
tgt_area_sum = 0
area_diff_sum = 0
loss_ordered = ordered_point3_loss(output_dict["p_pred"], target_dict["points"],
self._params['flags'].val_batch_size)
loss_best = batch_point3_loss(output_dict["p_pred"], target_dict["points"],
self._params['flags'].val_batch_size)
self._summary_object["tgt_points"].extend(
[target_dict["points"][x] for x in range(self._params['flags'].val_batch_size)])
self._summary_object["pre_points"].extend(
[output_dict["p_pred"][x] for x in range(self._params['flags'].val_batch_size)])
ob_buffer_list = []
ub_buffer_list = []
for i in range(output_dict["p_pred"].shape[0]):
# print("## {:4d} Sample ##".format(i))
# # print(loss_ordered.eval())
# print("loss: {:3.2f}(ordered)| {:3.2f} (best)".format(loss_ordered.eval()[i], loss_best.eval()[i]))
if np.abs(loss_ordered.eval()[i] - loss_best.eval()[i]) > 0.01:
# print("WARNING: losses are not equal")
ob_buffer_list.append(np.nan)
ub_buffer_list.append(loss_best.eval()[i])
else:
ob_buffer_list.append(loss_best.eval()[i])
ub_buffer_list.append(np.nan)
self._summary_object["ordered_best"].extend(ob_buffer_list)
self._summary_object["unordered_best"].extend(ub_buffer_list)
# print("predicted points")
# print(output_dict["p_pred"][i])
# print("target points")
# print(target_dict["points"][i])
# pred_area = np.abs(np.dot((output_dict["p_pred"][i][0] - output_dict["p_pred"][i][1]), (output_dict["p_pred"][i][1] - output_dict["p_pred"][i][2])) / 2.0)
# tgt_area = np.abs(np.dot((target_dict["points"][i][0] - target_dict["points"][i][1]), (target_dict["points"][i][1] - target_dict["points"][i][2])) / 2.0)
# area_diff_sum += np.max(pred_area - tgt_area)
# tgt_area_sum += tgt_area
# print("area diff: {:0.3f}".format(np.abs(pred_area - tgt_area) / tgt_area))
# print("target area: {:0.3f}".format(np.abs(tgt_area)))
return area_diff_sum, tgt_area_sum
# def print_evaluate_summary(self):
# sample_counter = 0
# import matplotlib.pyplot as plt
#
# from matplotlib.patches import Polygon
# from shapely import geometry
# from matplotlib.collections import PatchCollection
# summary_lenght= len(self._summary_object["tgt_points"])
# print("summary length: {}".format(summary_lenght))
#
# tgt_area_arr = np.zeros(summary_lenght)
# pre_area_arr = np.zeros(summary_lenght)
# pre_area_arr = np.zeros(summary_lenght)
# iou_arr = np.zeros(summary_lenght)
# co_loss_arr = np.ones(summary_lenght) * np.nan
# wo_loss_arr = np.ones(summary_lenght) * np.nan
#
# for i in range(summary_lenght):
# pre_points = np.reshape(self._summary_object["pre_points"][i], (3,2))
# tgt_points = np.reshape(self._summary_object["tgt_points"][i], (3,2))
# # print(pre_points)
# # print(tgt_points)
# pre_polygon = geometry.Polygon([pre_points[0], pre_points[1], pre_points[2]])
# tgt_polygon = geometry.Polygon([tgt_points[0], tgt_points[1], tgt_points[2]])
# # print(pre_points, tgt_points)
# # print(i)
# intersetion_area = pre_polygon.intersection(tgt_polygon).area
# union_area = pre_polygon.union(tgt_polygon).area
# iou_arr[i] = intersetion_area / union_area
# tgt_area_arr[i] = tgt_polygon.area
# pre_area_arr[i] = pre_polygon.area
# # co_loss_arr[i] = self._summary_object["ordered_best"][i]
# # wo_loss_arr[i] = self._summary_object["unordered_best"][i]
# # if True:
# # fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9.5, 14))
# #
# # ax1.fill(tgt_points.transpose()[0],tgt_points.transpose()[1], "b", pre_points.transpose()[0], pre_points.transpose()[1], "r", alpha=0.5)
# # ax1.set_aspect(1.0)
# # ax1.set_xlim(-20, 20)
# # ax1.set_ylim(-20, 20)
# #
# # ax2.set_title("F(phi)")
# # ## target
# # fc_arr_tgt = t2d.make_scatter_data(tgt_points, epsilon=0.002, dphi=0.001)
# # ax2.plot(fc_arr_tgt[0], fc_arr_tgt[1], label="real_tgt")
# # ax2.plot(fc_arr_tgt[0], fc_arr_tgt[2], label="imag_tgt")
# # ## prediction
# # fc_arr_pre = t2d.make_scatter_data(pre_points, epsilon=0.002, dphi=0.001)
# # ax2.plot(fc_arr_pre[0], fc_arr_pre[1], label="real_pre")
# # ax2.plot(fc_arr_pre[0], fc_arr_pre[2], label="imag_pre")
# # ax2.legend(loc=4)
# #
# #
# # ax1.set_title("(red) pre_points: p1={:2.2f},{:2.2f};p2={:2.2f},{:2.2f};p3={:2.2f},{:2.2f}\n"
# # "(blue)tgt_points: p1={:2.2f},{:2.2f};p2={:2.2f},{:2.2f};p3={:2.2f},{:2.2f}\n"
# # "iou: {:1.2f}; doa (real) {:1.2f}; doa (imag) {:1.2f}".format(
# # pre_points[0][0], pre_points[0][1], pre_points[1][0], pre_points[1][1], pre_points[2][0], pre_points[2][1],
# # tgt_points[0][0], tgt_points[0][1], tgt_points[1][0], tgt_points[1][1], tgt_points[2][0], tgt_points[2][1],
# # intersetion_area / union_area, np.sum(np.abs(fc_arr_tgt[1] - fc_arr_pre[1])) / np.sum(np.abs(fc_arr_tgt[1]) + np.abs(fc_arr_pre[1])),
# # np.sum(np.abs(fc_arr_tgt[2] - fc_arr_pre[2])) / np.sum(np.abs(fc_arr_tgt[2]) + np.abs(fc_arr_pre[2]))
# # ))
# # plt.grid()
# # pdf = os.path.join(self._params['flags'].graph_dir, "single_plot_{}.pdf".format(sample_counter))
# # sample_counter += 1
# # fig.savefig(pdf)
# # plt.clf()
# # plt.close()
# # plt.show()
#
# print("mean iou: {}".format(np.mean(iou_arr)))
# print("sum tgt area: {}; sum pre area: {}; p/t-area: {}".format(np.mean(tgt_area_arr), np.mean(pre_area_arr), np.sum(pre_area_arr) / np.sum(tgt_area_arr) ))
# # print("wrong order loss: {}; correct order loss: {}; order missed: {}".format(np.nanmean(wo_loss_arr), np.nanmean(co_loss_arr), np.count_nonzero(~np.isnan(wo_loss_arr)) ))
#
# from PyPDF2 import PdfFileMerger
#
# plt.close("all")
# pdfs = [os.path.join(self._params['flags'].graph_dir, "single_plot_{}.pdf".format(x)) for x in range(sample_counter)]
# merger = PdfFileMerger()
# for pdf in pdfs:
# merger.append(pdf)
# merger.write(os.path.join(self._params['flags'].graph_dir, "plot_summary.pdf"))
# merger.close()
# for pdf in pdfs:
# if os.path.isfile(pdf):
# os.remove(pdf)
# else:
# logging.warning("Can not delete temporary file, result is probably incomplete!")
#
|
import openpyxl
import MySQLdb
import datetime
import numpy as np
import sys
import cv2
import os
A2Z = [chr(i) for i in range(65, 65+26)]
defaultRowHeight = 15
baseColWidth = 10
def fetchData(id):
sql = MySQLdb.connect(
user=os.environ['MYSQL_USER'], passwd=os.environ['MYSQL_PASSWORD'],
host=os.environ['MYSQL_HOST'], db=os.environ['MYSQL_DATABASE'])
cur = sql.cursor()
cur.execute("SET NAMES utf8")
query = "SELECT * FROM shape"
cur.execute(query)
shapes = cur.fetchall()
query = "SELECT * FROM artwork WHERE `id`=" + str(id)
cur.execute(query)
artwork = cur.fetchone()
query = "SELECT * FROM damage WHERE `artwork_id`=" + str(id)
cur.execute(query)
damage = cur.fetchall()
query = "SELECT damage_img.id, damage_img.damage_id, damage_img.img FROM damage_img " \
+ "JOIN damage ON damage_img.damage_id = damage.id WHERE `artwork_id` =" + str(id)
cur.execute(query)
damage_img = cur.fetchall()
cur.close()
sql.close()
return shapes, artwork, damage, damage_img
def loadShapes(shapes):
shape_imgs = {}
for shape in shapes:
shape_imgs[shape[0]] = cv2.imread('../img/shape/' + shape[2], cv2.IMREAD_UNCHANGED)
return shape_imgs
def drawImgOnOverlay(img, overlay, x, y, add=False):
w, h = img.shape[1], img.shape[0]
x1 = int(x - w/2)
y1 = int(y - h/2)
x2 = int(x + w/2)
y2 = int(y + h/2)
o_x1 = 0
o_y1 = 0
o_x2 = w
o_y2 = w
if x1 < 0:
o_x1 = -x1 + 1
x1 = 0
if y1 < 0:
o_y1 = -y1 + 1
y1 = 0
if x2 >= overlay.shape[1]:
o_x2 -= x2 - overlay.shape[1] + 1
x2 = overlay.shape[1] - 1
if y2 >= overlay.shape[0]:
o_y2 -= y2 - overlay.shape[0] + 1
y2 = overlay.shape[0] - 1
if add:
overlay[y1:y2, x1:x2] += img[o_y1:o_y2, o_x1:o_x2]
else:
overlay[y1:y2, x1:x2] = img[o_y1:o_y2, o_x1:o_x2]
return overlay
def saveExcel(id):
shapes, artwork, damages, damage_imgs = fetchData(id=id)
shape_imgs = loadShapes(shapes)
wb = openpyxl.Workbook()
ws = wb.worksheets[0]
ws['A2'] = '美術品名'
ws['B2'] = artwork[1]
ws['A3'] = 'タグ'
ws['B3'] = artwork[2]
ws['A3'] = 'コメント'
ws['B3'] = artwork[3]
ws['A4'] = '最終更新'
ws['B4'] = artwork[5]
ws.column_dimensions['B'].width = 20
length = 100
img = cv2.imread('../img/artwork/' + artwork[4])
overlay = np.zeros((img.shape[0], img.shape[1], 4), dtype=np.uint8)
marker_size = int(20 * max(img.shape[1], img.shape[0]) / 1000)
text_size = int(max(img.shape[1], img.shape[0]) / 1000)
# 損傷を描画
for damage in damages:
x, y = damage[8], damage[9]
radius = damage[10]
if radius > 0:
color = damage[6][1:]
color = np.array(tuple(int(color[i:i+2], 16) for i in (4, 2, 0))) / 255
shape_img = shape_imgs[damage[7]]
draw_img = np.zeros((shape_img.shape[0], shape_img.shape[1], 3), np.uint8)
for i in range(draw_img.shape[2]):
draw_img[:,:,i] = (shape_img[:,:,3] * color[i]).astype(np.uint8)
draw_img = np.dstack((draw_img, shape_img[:,:,3:] // 2))
draw_img = cv2.resize(draw_img, (int(radius * 2), int(radius * 2)))
drawImgOnOverlay(draw_img, overlay, x, y, True)
for damage in damages:
x, y = damage[8], damage[9]
color = damage[6][1:]
color = np.array(tuple(int(color[i:i+2], 16) for i in (4, 2, 0))) / 255
shape_img = shape_imgs[damage[7]]
draw_img = np.zeros((shape_img.shape[0], shape_img.shape[1], 3), np.uint8)
margin = int(shape_img.shape[0] * 0.1)
for i in range(draw_img.shape[2]):
resize_img = cv2.resize(shape_img[:,:,3], (shape_img.shape[1]-margin*2, shape_img.shape[0]-margin*2))
draw_img[margin:-margin,margin:-margin,i] = (resize_img * color[i]).astype(np.uint8)
draw_img = np.dstack((draw_img, shape_img[:,:,3:]))
draw_img = cv2.resize(draw_img, (marker_size, marker_size))
drawImgOnOverlay(draw_img, overlay, x, y)
for damage in damages:
x, y = damage[8], damage[9]
w, h = text_size * 10, text_size * 10
cv2.putText(overlay, str(damage[0]), (int(x+w/2+text_size), int(y-h/2+text_size)), cv2.FONT_HERSHEY_PLAIN, int(text_size), (0, 0, 0, 255), text_size, cv2.LINE_AA)
cv2.putText(overlay, str(damage[0]), (int(x+w/2), int(y-h/2)), cv2.FONT_HERSHEY_PLAIN, int(text_size), (255, 255, 255, 255), text_size, cv2.LINE_AA)
scale = baseColWidth * length / img.shape[1]
img = cv2.resize(img, None, fx=scale, fy=scale)
overlay = cv2.resize(overlay, None, fx=scale, fy=scale)
cv2.imwrite('tmp/artwork_img.png', img)
cv2.imwrite('tmp/overlay.png', overlay)
img = openpyxl.drawing.image.Image('tmp/artwork_img.png')
ws.add_image(img, 'D2')
img = openpyxl.drawing.image.Image('tmp/overlay.png')
ws.add_image(img, 'D2')
# 参考画像を追加
wb.create_sheet()
ws = wb.worksheets[1]
ws.column_dimensions['B'].width = 20
row_count = 1
row_step = 10
height = 10
for damage in damages:
col_count = 0
ws['A' + str(row_count+1)] = 'ID'
ws['B' + str(row_count+1)] = damage[0]
ws['A' + str(row_count+2)] = damage[2]
ws['A' + str(row_count+3)] = damage[3]
ws['A' + str(row_count+4)] = '登録日'
ws['B' + str(row_count+4)] = damage[4]
ws['A' + str(row_count+5)] = '削除日'
ws['B' + str(row_count+5)] = damage[5]
for damage_img in damage_imgs:
if damage[0] == damage_img[1]:
img = cv2.imread('../img/damage/' + damage_img[2])
if img is None:
continue
scale = defaultRowHeight * height / img.shape[0]
img = cv2.resize(img, None, fx=scale, fy=scale)
cv2.imwrite('tmp/' + damage_img[2], img)
img2 = openpyxl.drawing.image.Image('tmp/' + damage_img[2])
ws.add_image(img2, A2Z[col_count+3] + str(row_count+1))
col_count += int(img.shape[1] / baseColWidth / defaultRowHeight) * 3
row_count += row_step
dt_now = datetime.datetime.now()
artwork_name = artwork[1].replace(' ', '_').replace('/', '').replace('\\', '')
fname = 'tmp/export_%d_%s_%s.xlsx' % (artwork[0], artwork_name, dt_now.strftime('%Y-%m-%d %H-%M-%S'))
wb.save(fname)
return fname
if __name__ == '__main__':
for i in range(1, len(sys.argv)):
fname = saveExcel(int(sys.argv[i]))
print(fname)
|
def _get_runfile_path(ctx, f):
"""Return the runfiles relative path of f."""
if ctx.workspace_name:
return "${RUNFILES}/" + ctx.workspace_name + "/" + f.short_path
else:
return "${RUNFILES}/" + f.short_path
def _impl(ctx):
runfiles = ctx.attr._sh_tpl.default_runfiles.files.to_list()
for target in ctx.attr.targets:
runfiles.append(target.files_to_run.executable)
runfiles.extend(target.default_runfiles.files.to_list())
ctx.actions.expand_template(
template = ctx.file._sh_tpl,
substitutions = {
"%{commands}": "\n".join([
"async \"%s\" \"$@\"" % _get_runfile_path(ctx, command.files_to_run.executable)
for command in ctx.attr.targets
]),
},
output = ctx.outputs.executable,
is_executable = True,
)
return [DefaultInfo(runfiles = ctx.runfiles(files = runfiles))]
run_parallel = rule(
attrs = {
"targets": attr.label_list(
allow_empty = False,
mandatory = True,
),
"_sh_tpl": attr.label(
default = Label("@cloud_robotics//bazel/build_rules/app_chart:run_parallel.sh.tpl"),
allow_single_file = True,
),
},
executable = True,
implementation = _impl,
)
"""Run multiple targets in parallel.
This rule builds a "bazel run" target that runs a series of subtargets in
parallel. If a subtarget has errors, execution results in an error when all
subtargets have completed.
Args:
targets: A list of targets that can be run with "bazel run".
"""
|
import logging
from typing import Any, List
import enlighten
from ib_insync.wrapper import RequestError
from .signal_handler import SignalHandler, ExitSignalDetectedError
from .contracts_db_connector import ContractsDbConnector
from .ib_details_db_connector import IbDetailsDbConnector
from .tws_connector import TwsConnector, IbDetailsInvalidError
logger = logging.getLogger(__name__)
class IbDetailsProcessor():
"""Downloading of contract details from IB TWS and storing to db"""
def __init__(self) -> None:
self.__contracts_db_connector = ContractsDbConnector()
self.__ib_details_db_connector = IbDetailsDbConnector()
self.__tws_connector = TwsConnector()
self.__contracts: List[Any] = []
self.__details: Any = None
self.__pbar: Any = None
self.__signal_handler = SignalHandler()
manager = enlighten.get_manager() # Setup progress bar
self.__pbar = manager.counter(
total=0,
desc="Contracts", unit="contracts")
def update_ib_contract_details(self) -> None:
"""Download and store all missing contract details entries from IB TWS.
"""
self.__get_contracts()
self.__pbar.total = len(self.__contracts)
self.__tws_connector.connect()
try:
for contract in self.__contracts:
self.__signal_handler.is_exit_requested()
logger.info(
f"{contract['broker_symbol']}_{contract['exchange']}_"
f"{contract['currency']}")
try:
self.__get_contract_details_from_tws(contract)
except RequestError as e:
logger.info(e)
continue
except IbDetailsInvalidError as e:
logger.info(e)
continue
else:
self.__insert_ib_details_into_db(contract)
finally:
self.__pbar.update(inc=1)
except ExitSignalDetectedError:
pass
else:
logger.info(
f"Updated IB details for master listings.")
finally:
self.__tws_connector.disconnect()
def __get_contracts(self) -> None:
"""Get contracts from db, where IB details are missing"""
return_columns = [
'contract_id', 'broker_symbol', 'exchange', 'currency']
filters = {'primary_exchange': "NULL"}
self.__contracts = self.__contracts_db_connector.get_contracts(
filters=filters,
return_columns=return_columns)
logger.info(f"Found {len(self.__contracts)} contracts with missing "
f"IB details in master listing.")
def __get_contract_details_from_tws(self, contract: Any) -> None:
self.__details = self.__tws_connector.download_contract_details(
broker_symbol=contract['broker_symbol'],
exchange=contract['exchange'],
currency=contract['currency'])
def __insert_ib_details_into_db(self, contract: Any) -> None:
self.__ib_details_db_connector.insert_ib_details(
contract_id=contract['contract_id'],
contract_type_from_details=self.__details.stockType,
primary_exchange=self.__details.contract.primaryExchange,
industry=self.__details.industry,
category=self.__details.category,
subcategory=self.__details.subcategory)
|
from flask import Flask, request, redirect, url_for, render_template
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
from werkzeug.utils import secure_filename
import librosa
import torch
import os
app = Flask(__name__)
where_to_save_files = "/home/aymenha2021/PycharmProjects/flaskProject1/uploads"
tokenizer = Wav2Vec2Tokenizer.from_pretrained(
"/home/aymenha2021/PycharmProjects/flaskProject/wav2vec2-large-xlsr-arabic-demo-v5")
model = Wav2Vec2ForCTC.from_pretrained("/home/aymenha2021/PycharmProjects/flaskProject/wav2vec2-large-xlsr-arabic-demo-v5")
def prepare_example(example):
speech, sampling_rate = librosa.load(example)
return speech
@app.route('/', methods=['GET', 'POST'])
def main_page():
if request.method == 'POST':
file = request.files['file']
filename = secure_filename(file.filename)
if not os.path.exists(where_to_save_files):
os.makedirs(where_to_save_files)
file.save(os.path.join('uploads', filename))
return redirect(url_for('prediction', filename=filename))
return render_template('index.html')
@app.route('/prediction/<filename>')
def prediction(filename):
file_to_predict = os.path.join('uploads', filename)
file_to_predict = prepare_example(file_to_predict)
inputs = tokenizer(file_to_predict, return_tensors="pt").input_values
logits = model(inputs).logits
predicted_ids = torch.argmax(logits, dim=-1)
Prediction = tokenizer.batch_decode(predicted_ids)
return render_template('predict.html', Prediction=Prediction)
app.run()
|
"""
Main script for grammar AssignmentStatement1 (version 1)
## author
Morteza Zakeri, (http://webpages.iust.ac.ir/morteza_zakeri/)
## date
20201029
## Required
- Compiler generator: ANTLR 4.x
- Target language(s): Python 3.8.x
## Changelog
### v2.0.0
- A lexer and parser for simple grammar without any attribute or listener
## Refs
- Reference: Compiler book by Dr. Saeed Parsa (http://parsa.iust.ac.ir/)
- Course website: http://parsa.iust.ac.ir/courses/compilers/
- Laboratory website: http://reverse.iust.ac.ir/
"""
__version__ = '0.1.0'
__author__ = 'Morteza'
from antlr4 import *
from language_apps.assignment_statement_v1.gen.AssignmentStatement1Lexer import AssignmentStatement1Lexer
from language_apps.assignment_statement_v1.gen.AssignmentStatement1Parser import AssignmentStatement1Parser
from language_apps.assignment_statement_v1.gen.AssignmentStatement1Listener import AssignmentStatement1Listener
import argparse
class MyListener(AssignmentStatement1Listener):
def exitFactor(self, ctx: AssignmentStatement1Parser.FactorContext):
print('Dummy listener!')
def exitNumber(self, ctx: AssignmentStatement1Parser.NumberContext):
pass
def main(args):
"""
Create lexer and parser
Args:
args (str):
return (None):
"""
# Step 1: Load input source into stream
stream = FileStream(args.file, encoding='utf8')
# input_stream = StdinStream()
# Step 2: Create an instance of AssignmentStLexer
lexer = AssignmentStatement1Lexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = AssignmentStatement1Parser(token_stream)
# Step 5: Create parse tree
parse_tree = parser.start()
# Step 6: Create an instance of AssignmentStListener
my_listener = MyListener()
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
quit()
# return
lexer.reset()
token = lexer.nextToken()
while token.type != Token.EOF:
print('Token text: ', token.text, 'Token line: ', token.line)
token = lexer.nextToken()
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument(
'-n', '--file',
help='Input source', default=r'A.java')
args = argparser.parse_args()
main(args)
|
from functools import update_wrapper
from django.conf.urls import url
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.contrib import admin
from django.contrib.admin.utils import unquote
from django.contrib.admin.views.main import ChangeList
from django import VERSION
class OrderedModelAdmin(admin.ModelAdmin):
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
return [
url(r'^(.+)/move-(up)/$', wrap(self.move_view),
name='{app}_{model}_order_up'.format(**self._get_model_info())),
url(r'^(.+)/move-(down)/$', wrap(self.move_view),
name='{app}_{model}_order_down'.format(**self._get_model_info())),
] + super(OrderedModelAdmin, self).get_urls()
def _get_changelist(self, request):
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
args = (request, self.model, list_display,
list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_max_show_all, self.list_editable, self)
if VERSION >= (2, 1):
args = args + (self.sortable_by, )
return ChangeList(*args)
request_query_string = ''
def changelist_view(self, request, extra_context=None):
cl = self._get_changelist(request)
self.request_query_string = cl.get_query_string()
return super(OrderedModelAdmin, self).changelist_view(request, extra_context)
def move_view(self, request, object_id, direction):
qs = self._get_changelist(request).get_queryset(request)
obj = get_object_or_404(self.model, pk=unquote(object_id))
obj.move(direction, qs)
# guts from request.get_full_path(), calculating ../../ and restoring GET arguments
mangled = '/'.join(escape_uri_path(request.path).split('/')[0:-3])
redir_path = '%s%s%s' % (mangled, '/' if not mangled.endswith('/') else '',
('?' + iri_to_uri(request.META.get('QUERY_STRING', ''))) if request.META.get('QUERY_STRING', '') else '')
return HttpResponseRedirect(redir_path)
def move_up_down_links(self, obj):
model_info = self._get_model_info()
return render_to_string("ordered_model/admin/order_controls.html", {
'app_label': model_info['app'],
'model_name': model_info['model'],
'module_name': model_info['model'], # for backwards compatibility
'object_id': obj.pk,
'urls': {
'up': reverse("{admin_name}:{app}_{model}_order_up".format(
admin_name=self.admin_site.name, **model_info), args=[obj.pk, 'up']),
'down': reverse("{admin_name}:{app}_{model}_order_down".format(
admin_name=self.admin_site.name, **model_info), args=[obj.pk, 'down']),
},
'query_string': self.request_query_string
})
move_up_down_links.allow_tags = True
move_up_down_links.short_description = _(u'Move')
def _get_model_info(self):
return {
'app': self.model._meta.app_label,
'model': self.model._meta.model_name,
}
class OrderedInlineMixin(object):
ordering = None
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
paginator = Paginator
preserve_filters = True
@classmethod
def get_model_info(cls):
return dict(app=cls.model._meta.app_label,
model=cls.model._meta.model_name)
@classmethod
def get_urls(cls, model_admin):
def wrap(view):
def wrapper(*args, **kwargs):
return model_admin.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
return [
url(r'^(.+)/{model}/(.+)/move-(up)/$'.format(**cls.get_model_info()), wrap(cls.move_view),
name='{app}_{model}_order_up_inline'.format(**cls.get_model_info())),
url(r'^(.+)/{model}/(.+)/move-(down)/$'.format(**cls.get_model_info()), wrap(cls.move_view),
name='{app}_{model}_order_down_inline'.format(**cls.get_model_info())),
]
@classmethod
def get_list_display(cls, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return cls.list_display
@classmethod
def get_list_display_links(cls, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if cls.list_display_links or not list_display:
return cls.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
@classmethod
def _get_changelist(cls, request):
list_display = cls.get_list_display(request)
list_display_links = cls.get_list_display_links(request, list_display)
cl = ChangeList(request, cls.model, list_display,
list_display_links, cls.list_filter, cls.date_hierarchy,
cls.search_fields, cls.list_select_related,
cls.list_per_page, cls.list_max_show_all, cls.list_editable,
cls)
return cl
request_query_string = ''
@classmethod
def changelist_view(cls, request, extra_context=None):
cl = cls._get_changelist(request)
cls.request_query_string = cl.get_query_string()
return super(OrderedTabularInline, cls).changelist_view(request, extra_context)
@classmethod
def get_queryset(cls, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = cls.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = cls.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
@classmethod
def get_ordering(cls, request):
"""
Hook for specifying field ordering.
"""
return cls.ordering or () # otherwise we might try to *None, which is bad ;)
@classmethod
def get_paginator(cls, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return cls.paginator(queryset, per_page, orphans, allow_empty_first_page)
@classmethod
def get_search_fields(cls, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return cls.search_fields
@classmethod
def get_search_results(cls, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "{0!s}__istartswith".format(field_name[1:])
elif field_name.startswith('='):
return "{0!s}__iexact".format(field_name[1:])
elif field_name.startswith('@'):
return "{0!s}__search".format(field_name[1:])
else:
return "{0!s}__icontains".format(field_name)
use_distinct = False
search_fields = cls.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(cls.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
@classmethod
def move_view(cls, request, admin_id, object_id, direction):
qs = cls._get_changelist(request).get_queryset(request)
obj = get_object_or_404(cls.model, pk=unquote(object_id))
obj.move(direction, qs)
# guts from request.get_full_path(), calculating ../../ and restoring GET arguments
mangled = '/'.join(escape_uri_path(request.path).split('/')[0:-4] + ['change'])
redir_path = '%s%s%s' % (mangled, '/' if not mangled.endswith('/') else '',
('?' + iri_to_uri(request.META.get('QUERY_STRING', ''))) if request.META.get('QUERY_STRING', '') else '')
return HttpResponseRedirect(redir_path)
@classmethod
def get_preserved_filters(cls, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if cls.preserve_filters and match:
opts = cls.model._meta
current_url = '{0!s}:{1!s}'.format(match.app_name, match.url_name)
changelist_url = 'admin:{0!s}_{1!s}_changelist'.format(opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def move_up_down_links(self, obj):
if not obj.id:
return ''
# Find the fields which refer to the parent model of this inline, and
# use one of them if they aren't None.
order_with_respect_to = obj._get_order_with_respect_to() or []
parent_model = self.parent_model._meta
fields = [
str(value.pk) for field_name, value in order_with_respect_to
if value.__class__ is self.parent_model and value is not None and value.pk is not None]
order_obj_name = fields[0] if len(fields) > 0 else None
if order_obj_name:
return render_to_string("ordered_model/admin/order_controls.html", {
'app_label': self.model._meta.app_label,
'model_name': self.model._meta.model_name,
'module_name': self.model._meta.model_name, # backwards compat
'object_id': obj.id,
'urls': {
'up': reverse("admin:{app}_{model}_order_up_inline".format(
admin_name=self.admin_site.name, **self.get_model_info()),
args=[order_obj_name, obj.id, 'up']),
'down': reverse("admin:{app}_{model}_order_down_inline".format(
admin_name=self.admin_site.name, **self.get_model_info()),
args=[order_obj_name, obj.id, 'down']),
},
'query_string': self.request_query_string
})
return ''
move_up_down_links.allow_tags = True
move_up_down_links.short_description = _(u'Move')
class OrderedTabularInline(OrderedInlineMixin, admin.TabularInline):
pass
class OrderedStackedInline(OrderedInlineMixin, admin.StackedInline):
pass
|
import os
import urllib
import torch
from torch.utils import model_zoo
import shutil
import datetime
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def backup_model_best(self, filename, **kwargs):
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
# Backup model
backup_dir = os.path.join(self.checkpoint_dir, 'backup_model_best')
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
ts = datetime.datetime.now().timestamp()
filename_backup = os.path.join(backup_dir, '%s.pt' % ts)
shutil.copy(filename, filename_backup)
def load(self, filename='model.pt'):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
''' Checks if input string is a URL.
Args:
url (string): URL
'''
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https')
|
#170401011 Berfin Okuducu
import socket
import time
import subprocess
import shlex
import pickle
import datetime
from datetime import datetime
host = "127.0.0.1"
port = 142
baslangic=datetime.now()
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host,port))
zaman,address=client.recvfrom(1024)
zaman=pickle.loads(zaman)
zaman=zaman[0]
bitis=datetime.now()
sonzaman=bitis.microsecond - baslangic.microsecond
zamanekle = datetime(zaman.year,zaman.month,zaman.day,zaman.hour,zaman.minute,zaman.second,zaman.microsecond+sonzaman).isoformat()
subprocess.call(shlex.split("timedatectl set-ntp false"))
subprocess.call(shlex.split("sudo date -s '%s'" % zamanekle))
subprocess.call(shlex.split("sudo hwclock -w"))
|
from common.tools.endianness import LittleEndian
from common.hash.md import MDHashFunction
class MD4(MDHashFunction):
# Custom implementation of MD4.
A = 0x67452301
B = 0xefcdab89
C = 0x98badcfe
D = 0x10325476
@classmethod
def get_OID(cls):
return '\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x04'
@classmethod
def endianness(cls):
return LittleEndian
@classmethod
def initial_state(cls):
return [cls.A, cls.B, cls.C, cls.D]
def F(self, x, y, z):
return (x & y) | (self._not(x) & z)
def G(self, x, y, z):
return (x & y) | (x & z) | (y & z)
def H(self, x, y, z):
return x ^ y ^ z
def _round_operation(self, a, b, c, d, x, s, func, z=0):
return self._rotate_left((a + func(b, c, d) + x + z) & self.mask, s)
def _round_1(self, words, a, b, c, d):
a = self._round_operation(a, b, c, d, words[0], 3, self.F)
d = self._round_operation(d, a, b, c, words[1], 7, self.F)
c = self._round_operation(c, d, a, b, words[2], 11, self.F)
b = self._round_operation(b, c, d, a, words[3], 19, self.F)
a = self._round_operation(a, b, c, d, words[4], 3, self.F)
d = self._round_operation(d, a, b, c, words[5], 7, self.F)
c = self._round_operation(c, d, a, b, words[6], 11, self.F)
b = self._round_operation(b, c, d, a, words[7], 19, self.F)
a = self._round_operation(a, b, c, d, words[8], 3, self.F)
d = self._round_operation(d, a, b, c, words[9], 7, self.F)
c = self._round_operation(c, d, a, b, words[10], 11, self.F)
b = self._round_operation(b, c, d, a, words[11], 19, self.F)
a = self._round_operation(a, b, c, d, words[12], 3, self.F)
d = self._round_operation(d, a, b, c, words[13], 7, self.F)
c = self._round_operation(c, d, a, b, words[14], 11, self.F)
b = self._round_operation(b, c, d, a, words[15], 19, self.F)
return a, b, c, d
def _round_2(self, words, a, b, c, d):
z = 0x5a827999
a = self._round_operation(a, b, c, d, words[0], 3, self.G, z)
d = self._round_operation(d, a, b, c, words[4], 5, self.G, z)
c = self._round_operation(c, d, a, b, words[8], 9, self.G, z)
b = self._round_operation(b, c, d, a, words[12], 13, self.G, z)
a = self._round_operation(a, b, c, d, words[1], 3, self.G, z)
d = self._round_operation(d, a, b, c, words[5], 5, self.G, z)
c = self._round_operation(c, d, a, b, words[9], 9, self.G, z)
b = self._round_operation(b, c, d, a, words[13], 13, self.G, z)
a = self._round_operation(a, b, c, d, words[2], 3, self.G, z)
d = self._round_operation(d, a, b, c, words[6], 5, self.G, z)
c = self._round_operation(c, d, a, b, words[10], 9, self.G, z)
b = self._round_operation(b, c, d, a, words[14], 13, self.G, z)
a = self._round_operation(a, b, c, d, words[3], 3, self.G, z)
d = self._round_operation(d, a, b, c, words[7], 5, self.G, z)
c = self._round_operation(c, d, a, b, words[11], 9, self.G, z)
b = self._round_operation(b, c, d, a, words[15], 13, self.G, z)
return a, b, c, d
def _round_3(self, words, a, b, c, d):
z = 0x6ed9eba1
a = self._round_operation(a, b, c, d, words[0], 3, self.H, z)
d = self._round_operation(d, a, b, c, words[8], 9, self.H, z)
c = self._round_operation(c, d, a, b, words[4], 11, self.H, z)
b = self._round_operation(b, c, d, a, words[12], 15, self.H, z)
a = self._round_operation(a, b, c, d, words[2], 3, self.H, z)
d = self._round_operation(d, a, b, c, words[10], 9, self.H, z)
c = self._round_operation(c, d, a, b, words[6], 11, self.H, z)
b = self._round_operation(b, c, d, a, words[14], 15, self.H, z)
a = self._round_operation(a, b, c, d, words[1], 3, self.H, z)
d = self._round_operation(d, a, b, c, words[9], 9, self.H, z)
c = self._round_operation(c, d, a, b, words[5], 11, self.H, z)
b = self._round_operation(b, c, d, a, words[13], 15, self.H, z)
a = self._round_operation(a, b, c, d, words[3], 3, self.H, z)
d = self._round_operation(d, a, b, c, words[11], 9, self.H, z)
c = self._round_operation(c, d, a, b, words[7], 11, self.H, z)
b = self._round_operation(b, c, d, a, words[15], 15, self.H, z)
return a, b, c, d
def _process_chunk(self, chunk):
a, b, c, d = self.registers
words = self._get_words_from(chunk)
a, b, c, d = self._round_1(words, a, b, c, d)
a, b, c, d = self._round_2(words, a, b, c, d)
a, b, c, d = self._round_3(words, a, b, c, d)
return a, b, c, d
|
#!/usr/bin/python2.7
import argparse
def get_sequence_lengths(labelfile):
lengths = [0]
with open(labelfile, 'r') as f:
content = f.read().split('\n')[2:-2] # skip header and '#' at the end of the file
for line in content:
if line == '#':
lengths.append(0)
else:
lengths[-1] += 1
return lengths
def get_transcripts(transcriptfile):
transcripts = [ [] ]
with open(transcriptfile, 'r') as f:
content = f.read().split('\n')[2:-2] # skip header and '#' at the end of the file
for line in content:
if line == '#':
transcripts.append([])
else:
transcripts[-1] += [int(line)]
return transcripts
def get_hmm(hmm_file):
start_states = [0]
with open(hmm_file, 'r') as f:
content = f.read().split('\n')[1:-1]
for line in content:
start_states.append( int(line) )
for i in range(1, len(start_states)):
start_states[i] += start_states[i-1]
hmm = []
for cls in range(len(start_states)-1):
hmm.append( range(start_states[cls], start_states[cls+1]) )
return hmm
def linear_alignment(transcripts, lengths, hmm, outputfile):
n_hmm_states = hmm[-1][-1] + 1
n_frames_total = sum(lengths)
n_sequences = len(lengths)
with open(outputfile, 'w') as f:
f.write('#sequencelabels\n' + str(n_frames_total) + ' ' + str(n_hmm_states) + ' ' + str(n_sequences) + '\n')
for i in range(len(lengths)):
# generate hmm state transcript for i-th video
hmm_transcript = []
for label in transcripts[i]:
hmm_transcript += hmm[label]
# generate linear alignment
t = 0
segment_length = float(lengths[i]) / len(hmm_transcript)
while t < lengths[i]:
state = hmm_transcript[ int(t / segment_length) ]
f.write( str(state) + '\n' )
t = t+1
f.write('#\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ground_truth_labels', help='framewise action labels, only used to determine the length of each video')
parser.add_argument('transcripts', help='action label transcripts')
parser.add_argument('hmm_file', help='hmm model file')
parser.add_argument('output_label_file', help='write the output alignment to this file')
args = parser.parse_args()
lengths = get_sequence_lengths(args.ground_truth_labels)
transcripts = get_transcripts(args.transcripts)
hmm = get_hmm(args.hmm_file)
linear_alignment(transcripts, lengths, hmm, args.output_label_file)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import cv2
import numpy as np
from augly.video.augmenters.cv2 import BaseCV2Augmenter, VideoDistractorByShapes
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class VideoDistractorByDots(BaseCV2Augmenter):
def __init__(
self, num_dots: int, dot_type: str, random_movement: bool = True, **kwargs
) -> None:
assert num_dots > 0, "Number of dots must be greater than zero"
assert dot_type in [
"colored",
"blur",
], "Dot type must be set to None or to 'colored' or 'blur'"
super().__init__(num_dots, random_movement, **kwargs)
self.num_dots = num_dots
self.dot_type = dot_type
self.shapes_distractor = None
if self.dot_type == "colored":
self.shapes_distractor = VideoDistractorByShapes(
num_dots,
shape_type="circle",
colors=[(0, 0, 0), (91, 123, 166)],
thickness=2,
radius=0.001,
random_movement=random_movement,
)
def add_blurred_dots(self, raw_frame: np.ndarray) -> np.ndarray:
height, width = raw_frame.shape[:2]
distract_frame = raw_frame.copy()
for i in range(self.num_dots):
fraction_x, fraction_y = self.get_origins(i)
x = int(fraction_x * width)
y = int(fraction_y * height)
# I think that sometimes the random positioning of the dot goes
# past the frame resulting in an error, but I can't repro this, so
# try/catching for now
try:
dot_bbox = distract_frame[y : y + 10, x : x + 10]
dot_bbox = cv2.GaussianBlur(dot_bbox, (111, 111), cv2.BORDER_DEFAULT)
distract_frame[y : y + 10, x : x + 10] = dot_bbox
except Exception as e:
logger.warning(f"Exception while adding Gaussian dot distractor: {e}")
return distract_frame
# overrides abstract method of base class
def apply_augmentation(self, raw_frame: np.ndarray, **kwargs) -> np.ndarray:
"""
Adds random dot distracts (in various colors and positions) to each frame
@param raw_frame: raw, single RGB/Gray frame
@returns: the augumented frame
"""
assert (raw_frame.ndim == 3) and (
raw_frame.shape[2] == 3
), "VideoDistractorByDots only accepts RGB images"
if self.dot_type == "colored":
return self.shapes_distractor.apply_augmentation(raw_frame, **kwargs)
return self.add_blurred_dots(raw_frame)
|
from functools import wraps
from flask import request
from app.models.models import User
from app.custom_http_respones.responses import Success, Error
success = Success()
error = Error()
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
access_token = None
if 'Authorization' in request.headers:
auth_header = request.headers.get('Authorization')
access_token = auth_header.split(" ")[1]
if not access_token:
return error.unauthorized("Please login to perform this action")
user_id = User.decode_token(access_token)
if isinstance(user_id, str):
return error.forbidden_action("Token has been rejected")
return f(*args, user_id=user_id, **kwargs)
return decorated
def admin_only(f):
@wraps(f)
def decorated(*args, **kwargs):
access_token = None
if 'Authorization' in request.headers:
auth_header = request.headers.get('Authorization')
access_token = auth_header.split(" ")[1]
if not access_token:
return error.unauthorized("Please login to perform this action")
user_id = User.decode_token(access_token)
if isinstance(user_id, str):
return error.forbidden_action("Token has been rejected")
user = User.query.filter_by(id=user_id).first()
if not user.admin:
return error.unauthorized("This action can only be performed by admin")
return f(*args, user_id=user_id, **kwargs)
return decorated
|
# Generated by Django 3.2.12 on 2022-03-18 13:20
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tunes', '0002_initial'),
]
operations = [
migrations.AddField(
model_name='stations',
name='slug',
field=autoslug.fields.AutoSlugField(blank='True', editable=False, populate_from='name', unique='True', verbose_name='slug'),
),
]
|
# coding: utf-8
# Copyright Luna Technology 2016
# Matthieu Riviere <mriviere@luna-technology.com>
PORTAL_FORMS = [
{
'pk': 1,
'portal': {
'name': 'HADDOCK',
'pk': 1
},
'name': 'HADDOCK server: the Easy interface',
'original_url': 'http://haddock.science.uu.nl/enmr/services/HADDOCK2.2/haddockserver-easy.html',
'submit_url': 'http://haddock.science.uu.nl/cgi/enmr/services/HADDOCK2.2/haddockserver-easy.cgi',
'template_name': 'ejpf_haddock_easy.html',
},
{
'pk': 2,
'portal': {
'name': 'HADDOCK',
'pk': 1
},
'name': 'HADDOCK server: the Prediction interface',
'original_url': 'http://haddock.science.uu.nl/enmr/services/HADDOCK2.2/haddockserver-prediction.html',
'submit_url': 'http://haddock.science.uu.nl/cgi/enmr/services/HADDOCK2.2/haddockserver-prediction.cgi',
'template_name': 'ejpf_haddock_prediction.html',
}
]
|
import time
import numpy as np
from petastorm.pytorch import DataLoader
from collections import namedtuple
import torch
from functools import partial
import sys
class DummyReader(object):
@property
def is_batched_reader(self):
return True
def stop(self):
pass
def join(self):
pass
def __iter__(self):
nt = namedtuple("row", ["test"])
batch = nt(np.random.rand(1000, 64).astype(np.float32))
while True:
yield batch
def main(device):
print("Testing DataLoader on", device)
reader = DummyReader()
for batch_size in [10, 100, 1000, 100000]:
iterations = 100
loader = DataLoader(reader, shuffling_queue_capacity=batch_size * 10, batch_size=batch_size, collate_fn=partial(torch.as_tensor, device=device))
it = iter(loader)
# Warmup
for _ in range(iterations):
next(it)
print("Done warming up")
tstart = time.time()
for _ in range(iterations):
next(it)
print("Samples per second for batch {}: {:.4g}".format(batch_size, (iterations * batch_size) / (time.time() - tstart)))
if __name__ == "__main__":
main(sys.argv[-1] if len(sys.argv) > 1 else "cpu")
|
import sys
from typing import List, Dict, Any, Tuple, Set
import logging
import numpy as np
import pandas as pd
from d3m import container, utils
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.base import ArgumentType
from d3m.metadata import hyperparams
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from common_primitives.denormalize import DenormalizePrimitive
from common_primitives.dataframe_image_reader import DataFrameImageReaderPrimitive
from common_primitives.extract_columns_semantic_types import ExtractColumnsBySemanticTypesPrimitive
from common_primitives.column_parser import ColumnParserPrimitive
from distil.primitives.ensemble_forest import EnsembleForestPrimitive
from distil.primitives.image_transfer import ImageTransferPrimitive
PipelineContext = utils.Enum(value='PipelineContext', names=['TESTING'], start=1)
# CDB: Totally unoptimized. Pipeline creation code could be simplified but has been left
# in a naively implemented state for readability for now.
#
# Overall implementation relies on passing the entire dataset through the pipeline, with the primitives
# identifying columns to operate on based on type. Alternative implementation (that better lines up with
# D3M approach, but generates more complex pipelines) would be to extract sub-sets by semantic type using
# a common primitive, apply the type-specific primitive to the sub-set, and then merge the changes
# (replace or join) back into the original data.
def create_pipeline(metric: str,
cat_mode: str = 'one_hot',
max_one_hot: int = 16,
scale: bool = False) -> Pipeline:
# create the basic pipeline
image_pipeline = Pipeline(context=PipelineContext.TESTING)
image_pipeline.add_input(name='inputs')
# step 0 - denormalize dataframe (N.B.: injects semantic type information)
step = PrimitiveStep(primitive_description=DenormalizePrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0')
step.add_output('produce')
image_pipeline.add_step(step)
# step 1 - extract dataframe from dataset
step = PrimitiveStep(primitive_description=DatasetToDataFramePrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce')
step.add_output('produce')
image_pipeline.add_step(step)
# step 2 - read images
step = PrimitiveStep(primitive_description=DataFrameImageReaderPrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')
step.add_output('produce')
step.add_hyperparameter('use_columns', ArgumentType.VALUE,[0,1])
step.add_hyperparameter('return_result', ArgumentType.VALUE, 'replace')
image_pipeline.add_step(step)
# step 3 - parse columns
step = PrimitiveStep(primitive_description=ColumnParserPrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce')
step.add_output('produce')
semantic_types = ('http://schema.org/Boolean', 'http://schema.org/Integer', 'http://schema.org/Float',
'https://metadata.datadrivendiscovery.org/types/FloatVector')
step.add_hyperparameter('parse_semantic_types', ArgumentType.VALUE, semantic_types)
image_pipeline.add_step(step)
# step 4 - featurize images
step = PrimitiveStep(primitive_description=ImageTransferPrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce')
step.add_output('produce')
image_pipeline.add_step(step)
# step 5 - extract targets
step = PrimitiveStep(primitive_description=ExtractColumnsBySemanticTypesPrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce')
step.add_output('produce')
target_types = ('https://metadata.datadrivendiscovery.org/types/Target', 'https://metadata.datadrivendiscovery.org/types/TrueTarget')
step.add_hyperparameter('semantic_types', ArgumentType.VALUE, target_types)
image_pipeline.add_step(step)
# step 6 - Generates a random forest ensemble model.
step = PrimitiveStep(primitive_description=EnsembleForestPrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce')
step.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce')
step.add_output('produce')
step.add_hyperparameter('metric', ArgumentType.VALUE, metric)
image_pipeline.add_step(step)
# step 7 - convert predictions to expected format
step = PrimitiveStep(primitive_description=ConstructPredictionsPrimitive.metadata.query())
step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.6.produce')
step.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')
step.add_output('produce')
step.add_hyperparameter('use_columns', ArgumentType.VALUE, [0, 1])
image_pipeline.add_step(step)
# Adding output step to the pipeline
image_pipeline.add_output(name='output', data_reference='steps.7.produce')
return image_pipeline
|
import tempfile
from functools import partial
from pathlib import Path
import tus
from aiohttp import web
from aiohttp_tus import setup_tus
from tests.common import get_upload_url, TEST_FILE_NAME, TEST_FILE_PATH
NUMBER = 5
async def test_mutltiple_tus_upload_urls(aiohttp_client, loop):
upload = partial(tus.upload, file_name=TEST_FILE_NAME)
with tempfile.TemporaryDirectory(prefix="aiohttp_tus") as temp_path:
base_path = Path(temp_path)
app = web.Application()
for idx in range(1, NUMBER + 1):
setup_tus(
app, upload_path=base_path / str(idx), upload_url=f"/{idx}/uploads"
)
client = await aiohttp_client(app)
for idx in range(1, NUMBER + 1):
with open(TEST_FILE_PATH, "rb") as handler:
await loop.run_in_executor(
None, upload, handler, get_upload_url(client, f"/{idx}/uploads")
)
expected_path = base_path / str(idx) / TEST_FILE_NAME
assert expected_path.exists()
assert expected_path.read_bytes() == TEST_FILE_PATH.read_bytes()
|
import sys, os
# Import libraries
import psycopg2
# import frameworks
from flask import abort
# import config, utils
from config import db_config
class Database:
def __init__(self, table=None, columns=None):
self.conn = self.conn(db_config)
self.__table__ = table
self.__column__ = columns
self.map_fields = {
'int': 'integer',
'float': 'float',
'string': 'varchar',
'array': 'integer ARRAY',
'string_array': 'varchar ARRAY'
}
self.init_db()
def conn(self, db_config):
print('Connecting to database')
conn = psycopg2.connect(db_config)
conn.autocommit = True
return conn
def execute(self, *args):
cur = self.conn.cursor()
try: cur.execute(*args)
except Exception as e:
print('Error', e.message)
return abort(400, 'Execute is wrong!')
def query(self, *args):
cur = self.conn.cursor()
try: cur.execute(*args)
except Exception as e:
print('Error', e.message )
return abort(400, 'Execute is wrong!')
return cur.fetchall()
def query_one(self, *args):
return self.query(*args)[0]
def query_values(self, *args):
result = self.query(*args) # result return like ('[(1,), (2, ), ...]')
if result: return list(map(lambda r: r[0], result)) # lambda return a function and map return a object
else: return abort(400, 'Database is wrong!')
def query_value(self, *args):
result = self.query_values(*args)
return result and result[0]
def init_db(self):
if not self.__table__ or not self.__column__:
return abort(400, 'Not found tables or columns!')
fields = self.__column__.copy()
if self.__table__ == 'user': self.__table__ = '_user'
sql = "SELECT count(*) FROM information_schema.tables WHERE table_name = '{}'".format(self.__table__)
count = self.query_value(sql)
if count == 0:
self.create_table(fields)
return
sql = "SELECT column_name, data_type FROM information_schema.columns WHERE table_name ='{}'".format(self.__table__);
columns_query = self.query(sql)
columns_exists = list(map(lambda r: r[0], columns_query)) # return a list of columns
if columns_exists and len(columns_exists) > 0:
self.add_column_table(columns_exists, fields)
self.drop_column_table(columns_exists, fields)
if columns_query and len(columns_query) > 0:
self.modify_data_type_table(columns_query, fields)
# Create a table with psotgresql
def create_table(self, fields=None):
new_fields = []
if self.__table__ == 'user': self.__table__ = '_user'
if not fields:
return abort(400, 'Not found columns.')
for field in fields:
type_of_field = fields[field]
if field == 'id':
fields[field] = 'serial primary key'
if type_of_field in self.map_fields and field != 'id':
fields[field] = self.map_fields[type_of_field]
new_fields.append(field + ' ' + fields[field])
sql_field = ', '.join(new_fields)
sql = 'CREATE TABLE "{}" ({});'.format(self.__table__, sql_field)
return self.execute(sql)
# add column into table.
def add_column_table(self, columns_exists=None, fields=None):
if not columns_exists or not fields:
return
new_fields = []
# check field already in columns_exists yet?
new_columns = [field for field in fields.keys() if field not in columns_exists]
if not new_columns or len(new_columns) <= 0:
return
for field in new_columns:
type_of_field = fields[field]
if type_of_field in self.map_fields:
fields[field] = self.map_fields[type_of_field]
new_fields.append('ADD ' + field + ' ' + fields[field])
sql_field = ', '.join(new_fields)
sql = 'ALTER TABLE "{}" {};'.format(self.__table__, sql_field)
return self.execute(sql)
# drop column at the table
def drop_column_table(self, columns_exists=None, fields=None):
if self.__table__ or columns_exists and fields:
columns_removed = [col for col in columns_exists if col not in fields.keys()]
columns_have_to_remove = []
if columns_removed and len(columns_removed) > 0:
for col in columns_removed:
columns_have_to_remove.append('DROP COLUMN IF EXISTS {}'.format(col))
sql_field = ', '.join(columns_have_to_remove)
sql = 'ALTER TABLE {} {};'.format(self.__table__, sql_field)
return self.execute(sql)
# modify type of column at the table
def modify_data_type_table(self, column_query=None, fields=None):
if self.__table__ or column_query and fields:
map_data_type = {
'integer': 'int',
'character varying': 'string',
'ARRAY': 'array'
}
new_data_type = [field for field in column_query if map_data_type[field[1]] != fields[field[0]]]
if new_data_type and len(new_data_type) > 0:
for col, col_type in new_data_type:
sql_field = str(col) + ' TYPE ' + str(self.map_fields[fields[col]]) + ' USING ( ' + str(col) + '::' + str(self.map_fields[fields[col]]) + ' )'
sql = "ALTER TABLE {} ALTER COLUMN {};".format(self.__table__, sql_field)
self.execute(sql)
def insert_table_sql(self, fields=None, values=None):
new_values = []
if self.__table__ or fields and values:
if self.__table__ == 'user': self.__table__ = '_user'
for field in fields:
if field == 'id':
continue
new_values.append("'" + values[field] + "'")
sql_fields = ', '.join(fields)
sql_values = ', '.join(new_values)
sql = "INSERT INTO {} ({}) VALUES ({});".format(self.__table__, sql_fields, sql_values)
return self.execute(sql)
def update_table_sql(self, fields=None, values=None, condition=None):
sql_value = ''
new_values = []
if not self.__table__:
return abort(400, 'you have to table')
if not condition:
return abort(400, "you don't have condition")
if self.__table__ == 'user': self.__table__ = '_user'
if fields:
for field in fields:
if field == 'id':
continue
new_values.append(field + '=' + "'" + values[field] + "'")
sql_value = ','.join(new_values)
sql = "UPDATE {} SET {} WHERE {}".format(self.__table__, sql_value, condition)
return self.execute(sql)
|
name = "comlib"
from comlib.lib import BackyardCom
|
#Python imports
import os
import requests
from typing import List
#Local import
from commands.utils.colors import error_message
from commands.utils.graph import wrapper_graphs
from config import IMAGE_FOLDER
def check_file_name(filename:str)->bool:
"""Validate the file format"""
if '.' not in filename:
return False
format_name= filename.split('.')[1].upper()
if(format_name!='PDF' and format_name!='CSV'):
error_message('We dont support this file format')
return False
return True
def get_images()->None:
"""Download the Prometeo logo and a content divider"""
urls = [
'https://cdn.prometeoapi.com/static/img/primary%402x.png',
'https://miro.medium.com/max/1400/1*yhG7orf9lABajiMrAfF5WQ.png'
]
#Get and save every image
for i,url in enumerate(urls):
img_data = requests.get(url).content
with open(f'{IMAGE_FOLDER}/image{i}.png','wb') as image:
image.write(img_data)
def wrapper_image(data:List[List[str]])->None:
"""Execute all the images related functions"""
#Create the folder if not exist
if not os.path.exists(IMAGE_FOLDER):
os.mkdir(IMAGE_FOLDER)
#Download the Prometeo's images
get_images()
#Create and save the charts
wrapper_graphs(data)
|
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from random import randint
from tempfile import gettempdir
from uuid import uuid4
from jsonschema import ValidationError
import pytest
from reles import configure_mappings
from reles.validators import CustomDraft4Validator
@pytest.fixture
def random_name():
return uuid4().hex
@pytest.fixture
def random_age():
return randint(0, 65536)
@pytest.fixture
def random_document(datastore, test_index, test_doc_type, random_name, random_age):
return datastore.create_document(
test_index,
test_doc_type,
{'name': random_name, 'age': random_age},
refresh=True
)
class TestValidatorUniqueTogether(object):
@pytest.fixture
def validator(self, datastore, test_index, test_doc_type):
test_schema = {
'type': 'object',
'required': ['name', 'age'],
'x-es-mapping': {
'properties': {
'nickname': {'type': 'string'},
'name': {'type': 'string'},
'age': {'type': 'integer'},
'user': {'type': 'string'},
'host': {'type': 'string'},
}
},
'x-unique-together': [
['name', 'age'],
['user', 'host']
],
'x-unique': [
'nickname',
],
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'age': {'type': 'integer'},
'user': {'type': 'string'},
'host': {'type': 'string'},
}
}
configure_mappings(
test_index,
{'definitions': {test_doc_type: test_schema}},
datastore._es
)
return CustomDraft4Validator(
test_schema,
datastore=datastore,
upload_path=gettempdir(),
index=test_index,
doc_type=test_doc_type
)
def test_create_no_conflict(self, validator, random_name, random_age):
# does not raise (note also that `user` and `host` are absent, which is fine!)
validator.validate({'name': random_name, 'age': random_age})
def test_create_conflict_raises(self, validator, random_document):
with pytest.raises(ValidationError) as e:
validator.validate(
{
'name': random_document['name'],
'age': random_document['age']
}
)
assert 'unique property' in e.value.message
assert 'conflicts with existing object' in e.value.message
def test_create_missing_only_user_raises(self, validator, random_name, random_age):
with pytest.raises(ValidationError) as e:
validator.validate(
{
'name': random_name,
'age': random_age,
'user': uuid4().hex,
}
)
assert "needs all or none of it's fields" in e.value.message
def test_create_missing_only_host_raises(self, validator, random_name, random_age):
with pytest.raises(ValidationError) as e:
validator.validate(
{
'name': random_name,
'age': random_age,
'host': uuid4().hex,
}
)
assert "needs all or none of it's fields" in e.value.message
def test_update_no_conflict(self, validator, random_document):
# does not raise
validator.validate(
{
'_id': random_document['_id'],
'name': random_document['name'],
'age': random_document['age']
}
)
def test_update_different_id_conflict_raises(self, validator, random_id, random_document):
with pytest.raises(ValidationError) as e:
validator.validate(
{'id': random_id, 'name': random_document['name'], 'age': random_document['age']}
)
assert 'unique property' in e.value.message
assert 'conflicts with existing object' in e.value.message
def test_does_not_require_unique_individually(self, validator, random_document):
# 'name' is not unique individually (but doesn't have to).
# The changed 'age' makes the tuple unique together.
validator.validate({
'name': random_document['name'],
'age': random_document['age'] + 1
})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 16:03:41 2020
@author: juanporras
"""
import pandas as pd
import numpy as np
import json
import urllib.request
from urllib.request import urlopen
import datetime
import time
config = {'displayModeBar': False}
from cleaning_datas import df
OWData = pd.read_csv("https://covid.ourworldindata.org/data/owid-covid-data.csv")
|
#Yudha Prasetyo_Tugas_Pertemuan13
def listdata():
nama1 = input("Input Nama ke-1: ")
nilai1 = input("Input Nilai ke-1: ")
nama2 = input("Input Nama ke-2: ")
nilai2 = input("Input Nilai ke-2: ")
nama3 = input("Input Nama ke-3: ")
nilai3 = input("Input Nilai ke-3: ")
return nama1, nilai1, nama2, nilai2, nama3, nilai3
def main():
nama1, nilai1, nama2, nilai2, nama3, nilai3 = listdata()
print ("====================================")
print ("No \t Nama \t\t Nilai \t\t")
print ("====================================")
print ("1\t", nama1, "\t", nilai1,"\n2\t", nama2, "\t", nilai2,"\n3\t", nama3, "\t", nilai3)
main()
|
import pandas
def lambda_handler(event, context):
return pandas.__version__
|
import logging
from collections import deque
from typing import Type, Dict, Iterator, Union
import pika
import scrapy
from scrapy import signals, Request
from scrapy.crawler import Crawler
from scrapy.exceptions import CloseSpider, DontCloseSpider
from scrapy.http import Response
from twisted.internet import reactor
from twisted.python.failure import Failure
from rmq.connections import PikaSelectConnection
from rmq.utils import RMQDefaultOptions
from rmq_alternative.base_rmq_spider import BaseRmqSpider
from rmq_alternative.schemas.messages.base_rmq_message import BaseRmqMessage
DeliveryTagInteger = int
CountRequestInteger = int
class RmqReaderMiddleware(object):
request_counter: Dict[DeliveryTagInteger, CountRequestInteger] = {}
@classmethod
def from_crawler(cls, crawler):
if not isinstance(crawler.spider, BaseRmqSpider):
raise CloseSpider(f"spider must have the {BaseRmqSpider.__name__} class as its parent")
o = cls(crawler)
"""Subscribe to signals which controls opening and shutdown hooks/behaviour"""
crawler.signals.connect(o.spider_idle, signal=signals.spider_idle)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
"""Subscribe to signals which controls requests scheduling and responses or error retrieving"""
crawler.signals.connect(o.on_spider_error, signal=signals.spider_error)
"""Subscribe to signals which controls item processing"""
crawler.signals.connect(o.on_item_dropped, signal=signals.item_dropped)
crawler.signals.connect(o.on_item_error, signal=signals.item_error)
crawler.signals.connect(o.on_request_dropped, signal=signals.request_dropped)
return o
def __init__(self, crawler: Crawler):
super().__init__()
self.crawler = crawler
self.__spider: BaseRmqSpider = crawler.spider
self.failed_response_deque = deque([], maxlen=crawler.settings.get('CONCURRENT_REQUESTS'))
self.message_meta_name: str = '__rmq_message'
self.init_request_meta_name: str = '__rmq_init_request'
self.logger = logging.getLogger(name=self.__class__.__name__)
self.logger.setLevel(self.__spider.settings.get("LOG_LEVEL", "INFO"))
logging.getLogger("pika").setLevel(self.__spider.settings.get("PIKA_LOG_LEVEL", "WARNING"))
self.rmq_connection = None
"""Build pika connection parameters and start connection in separate twisted thread"""
self.parameters = pika.ConnectionParameters(
host=self.__spider.settings.get("RABBITMQ_HOST"),
port=int(self.__spider.settings.get("RABBITMQ_PORT")),
virtual_host=self.__spider.settings.get("RABBITMQ_VIRTUAL_HOST"),
credentials=pika.credentials.PlainCredentials(
username=self.__spider.settings.get("RABBITMQ_USERNAME"),
password=self.__spider.settings.get("RABBITMQ_PASSWORD"),
),
heartbeat=RMQDefaultOptions.CONNECTION_HEARTBEAT.value,
)
def connect(self, parameters, queue_name):
c = PikaSelectConnection(
parameters,
queue_name,
owner=self,
options={
"enable_delivery_confirmations": False,
"prefetch_count": self.__spider.settings.get("CONCURRENT_REQUESTS", 1),
},
is_consumer=True,
)
self.logger.info("Pika threaded event start")
c.run()
self.logger.info("Pika threaded event loop stopped and exited")
def set_connection_handle(self, connection):
self.rmq_connection = connection
def spider_idle(self, spider: BaseRmqSpider):
if not self.rmq_connection:
task_queue_name = self.__spider.task_queue_name
reactor.callInThread(self.connect, self.parameters, task_queue_name)
raise DontCloseSpider
def spider_closed(self, spider: BaseRmqSpider):
if self.rmq_connection is not None and isinstance(self.rmq_connection, PikaSelectConnection):
if isinstance(self.rmq_connection.connection, pika.SelectConnection):
self.rmq_connection.connection.ioloop.add_callback_threadsafe(
self.rmq_connection.stop
)
def raise_close_spider(self):
# TODO: does it work?
if self.crawler.engine.slot is None or self.crawler.engine.slot.closing:
self.logger.critical("SPIDER ALREADY CLOSED")
return
self.crawler.engine.close_spider(self.__spider)
# SPIDER MIDDLEWARE METHOD
def process_start_requests(self, start_requests, spider: BaseRmqSpider) -> Iterator[Request]:
for request in start_requests:
request.meta[self.init_request_meta_name] = True
yield request
# SPIDER MIDDLEWARE METHOD
def process_spider_input(self, response, spider: BaseRmqSpider) -> None:
pass # raise Exception('process_spider_input exception')
# SPIDER MIDDLEWARE METHOD
def process_spider_output(self, response, result, spider: BaseRmqSpider) -> Iterator[Union[Request, dict]]:
if self.message_meta_name in response.request.meta:
rmq_message: BaseRmqMessage = response.request.meta[self.message_meta_name]
delivery_tag = rmq_message.deliver.delivery_tag
if self.is_active_message(delivery_tag):
for item_or_request in result:
if isinstance(item_or_request, scrapy.Request):
self.request_counter_increment(delivery_tag)
item_or_request.meta[self.message_meta_name] = rmq_message
yield item_or_request
if response in self.failed_response_deque:
return
self.request_counter_decrement(delivery_tag)
self.try_to_acknowledge_message(rmq_message)
else:
self.logger.warning('filtered processing of an inactive message')
elif self.init_request_meta_name in response.request.meta:
pass
else:
raise Exception('received response without sqs message')
# SPIDER MIDDLEWARE METHOD
def process_spider_exception(self, response, exception, spider):
self.crawler.signals.send_catch_log(
signal=signals.spider_error,
spider=self,
failure=exception,
response=response
)
# return value for process_spider_output result when exception on
return []
def on_message_consumed(self, dict_message: dict) -> None:
SpiderRmqMessage: Type[BaseRmqMessage] = self.__spider.message_type
message = SpiderRmqMessage(
channel=dict_message['channel'],
deliver=dict_message['method'],
basic_properties=dict_message['properties'],
body=dict_message['body'],
_rmq_connection=self.rmq_connection,
_crawler=self.crawler,
)
request = self.__spider.next_request(message)
request.meta[self.message_meta_name] = message
self.request_counter[message.deliver.delivery_tag] = 1
if request.errback is None:
request.errback = self.default_errback
if self.crawler.crawling:
self.crawler.engine.crawl(request, spider=self.__spider)
def on_spider_error(self, failure, response: Response, spider: BaseRmqSpider, *args, **kwargs):
if isinstance(response, Response):
meta = response.meta
else:
meta = failure.request.meta
if self.message_meta_name in meta:
# TODO: What was I trying to do?
self.failed_response_deque.append(response)
rmq_message: BaseRmqMessage = meta[self.message_meta_name]
self.nack(rmq_message)
def on_item_dropped(self, item, response, exception, spider: BaseRmqSpider):
if self.message_meta_name in response.meta:
rmq_message: BaseRmqMessage = response.meta[self.message_meta_name]
self.nack(rmq_message)
def on_item_error(self, item, response, spider: BaseRmqSpider, failure):
if self.message_meta_name in response.meta:
rmq_message: BaseRmqMessage = response.meta[self.message_meta_name]
self.nack(rmq_message)
def on_request_dropped(self, request, spider: BaseRmqSpider):
"""
called when the request is filtered
"""
if self.message_meta_name in request.meta:
rmq_message: BaseRmqMessage = request.meta[self.message_meta_name]
delivery_tag = rmq_message.deliver.delivery_tag
self.logger.warning(f'request_dropped, delivery tag {delivery_tag}')
self.request_counter_decrement(delivery_tag)
self.try_to_acknowledge_message(rmq_message)
def request_counter_increment(self, delivery_tag: int):
self.request_counter[delivery_tag] += 1
def request_counter_decrement(self, delivery_tag: int):
self.request_counter[delivery_tag] -= 1
def try_to_acknowledge_message(self, rmq_message: BaseRmqMessage):
self.logger.warning('try for acknowledge - {}'.format(self.request_counter[rmq_message.deliver.delivery_tag]))
if self.request_counter[rmq_message.deliver.delivery_tag] == 0:
rmq_message.ack()
def nack(self, rmq_message: BaseRmqMessage) -> None:
rmq_message.nack()
self.request_counter.pop(rmq_message.deliver.delivery_tag, None)
def is_active_message(self, delivery_tag: int) -> bool:
return delivery_tag in self.request_counter
def default_errback(self, failure: Failure, *args, **kwargs):
raise failure
|
import numpy as np
from typing import Union, Tuple, Dict
class Agent(object):
def get_action(self, obs:np.ndarray, stochastic:bool=True)-> Tuple[Union[int, np.ndarray, Dict], float]:
raise NotImplementedError
def update(self, obs:np.ndarray, act:Union[int, np.ndarray, Dict], blogp:float, reward:float, obs_next:np.ndarray, terminal:bool)->Union[np.ndarray, None]:
raise NotImplementedError
def new_episode(self):
raise NotImplementedError
class FixedAgent(Agent):
def __init__(self, policy):
self.policy = policy
def get_action(self, obs:np.ndarray, stochastic:bool=True)-> Tuple[Union[int, np.ndarray, Dict], float]:
return self.policy.get_action(obs, stochastic)
def update(self, obs:np.ndarray, act:Union[int, np.ndarray, Dict], blogp:float, reward:float, obs_next:np.ndarray, terminal:bool)->Union[np.ndarray, None]:
return None
def new_episode(self):
pass
|
from __future__ import absolute_import, division, print_function, unicode_literals
from jinja2 import BytecodeCache
class InMemoryBytecodeCache(BytecodeCache):
def __init__(self):
self._storage = {}
def clear(self):
self._storage.clear()
def load_bytecode(self, bucket):
try:
cached = self._storage[bucket.key]
except KeyError:
pass
else:
bucket.bytecode_from_string(cached)
def dump_bytecode(self, bucket):
self._storage[bucket.key] = bucket.bytecode_to_string()
|
class StreamKey:
'''
一条流的加密信息 含有下列属性
- 加密方法
- key的uri
- key内容
- keyid
- 偏移量
- 其他属性 -> 根据流类型而定
含有下面的方法
- 设置key内容
- 设置iv
- 保存为文本
- 从文本加载
'''
def __init__(self):
self.method = 'AES-128' # type: str
self.uri = None # type: str
self.key = b'' # type: bytes
self.keyid = None # type: str
self.iv = '0' * 32 # type: str
def set_key(self, key: bytes):
self.key = key
return self
def set_iv(self, iv: str):
if iv is None:
return
self.iv = iv
return self
def dump(self):
pass
def load(self):
pass
|
import re
from typing import Tuple
from urllib.parse import ParseResult, urlparse
from api2ch.config import BOARDS, hostname_mirrors
def prettify_bytes(size: float) -> str:
for unit in ('Б', 'Кб', 'Мб', 'Гб', 'Тб'):
if size < 1024.0:
break
size /= 1024.0
return f'{size:.0f} {unit}' if unit in ('Б', 'Кб') else f'{size:.2f} {unit}'
def parse_url(url: str) -> Tuple[bool, str, int]:
"""
Parse url with checks
:param url: example: 'https://2ch.hk/api/res/1.html'
:return: is_valid, board, thread_id
"""
result: ParseResult = urlparse(url)
bad = False, '', 0
if result.hostname not in hostname_mirrors:
return bad
split = re.split('[/.]', result.path)
if len(split) < 3:
return bad
board, method, thread = split[1], split[2], split[3]
if board not in BOARDS or method != 'res' or not thread.isdigit():
return bad
thread_id = int(thread)
return True, board, thread_id
def convert_html(text: str) -> str:
"""Telegram acceptable HTML code"""
text = re.sub(r'<br>', '\n', text)
text = re.sub(r' ', ' ', text)
text = re.sub(r'"', '\'', text)
text = re.sub(r'/', '/', text)
text = re.sub(r'<(/?)strong>', r'<\1b>', text)
text = re.sub(r'<(/?)em>', r'<\1i>', text)
text = re.sub(r'</?span.*?>', '', text)
text = re.sub(r'</?sup>', '', text)
text = re.sub(r'</?sub>', '', text)
return text
def clear_html(text: str) -> str:
"""Clear text from HTML tags"""
text = re.sub(r'<br>', '\n', text)
text = re.sub(r' ', ' ', text)
text = re.sub(r'"', '\'', text)
text = re.sub(r'/', '/', text)
text = re.sub(r'<.*?>', '', text)
return text
|
import sys
import operator
class Solution:
def layerbylayer(self, nlayer):
layerlength = self.n - nlayer * 2 # layer starts from 0
if not layerlength:
return None
if layerlength == 1:
self.ans[nlayer][nlayer] = self.nownum
else:
nowx, nowy = nlayer, nlayer
for dire in self.directions:
for i in range(layerlength - 1):
self.ans[nowx][nowy] = self.nownum
nowx, nowy = tuple(map(operator.add, (nowx, nowy), dire))
self.nownum += 1
self.layerbylayer(nlayer + 1)
def generateMatrix(self, n):
self.n = n
self.nownum = 1
self.ans = [[0] * n for i in range(n)]
self.directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
self.layerbylayer(0)
return self.ans
s = Solution()
if __name__ == '__main__':
print s.generateMatrix(int(sys.argv[1]))
|
#! /usr/local/Python_envs/Python3/bin/python3
import paramiko
import time
from getpass import getpass
import re
host = 'csr1.test.lab'
username = 'admin'
password = 'admin'
# host = 'ios-xe-mgmt-latest.cisco.com'
# username = 'developer'
# password = 'C1sco12345'
print(f"\n{'#' * 55}\nConnecting to the Device {host}\n{'#' * 55} ")
SESSION = paramiko.SSHClient()
SESSION.set_missing_host_key_policy(paramiko.AutoAddPolicy())
SESSION.connect(host, port=22,
username=username,
password=password,
look_for_keys=False,
allow_agent=False)
DEVICE_ACCESS = SESSION.invoke_shell()
DEVICE_ACCESS.send(b'term length 0\n')
DEVICE_ACCESS.send(b'show run\n')
time.sleep(1)
output = (DEVICE_ACCESS.recv(65000).decode('ascii'))
print(output)
print(f"\n{'#' * 55}\nFinished Executing Script\n{'#' * 55} ")
SESSION.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.