content
stringlengths 5
1.05M
|
|---|
import pyglet
from grid import Terrain
from pyglet.window import key
class Window(pyglet.window.Window):
def __init__(self):
super(Window,self).__init__(800, 800)
self.terrain = Terrain(self.get_size()[0], self.get_size()[1], 10)
def on_draw(self):
self.clear()
self.terrain.draw()
# resketch = input("Redraw?(y/n)")
# if resketch == "y":
# self.terrain.recalc()
# self.terrain.draw()
def on_key_press(self, symbol, modification):
if symbol == key.SPACE:
self.terrain.recalc()
print("New World Generated.")
if symbol == key.UP:
self.terrain.moveup()
if symbol == key.DOWN:
self.terrain.movedown()
if symbol == key.LEFT:
self.terrain.moveleft()
if symbol == key.RIGHT:
self.terrain.moveright()
if symbol == key.H:
self.terrain.heightmaptoggle()
@Window.event
def on_key_press(symbol, modifiers):
if symbol == key.SPACE:
print("G!!!!!!!")
else:
print("A Keyy!!!")
if __name__ == '__main__':
window = Window()
pyglet.app.run()
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from kit.dtypes import KustoType
from kit.models.serializable import SerializableModel
@dataclass
class Column(SerializableModel):
dtype: str = KustoType.STRING.value
name: Optional[str] = None
index: Optional[int] = None
def __init__(self, name: str = None, dtype: str = None, index: int = None, data_type: KustoType = None):
self.name = name
if name is None and index is None:
raise ValueError("Must explicitly specify name or index")
if data_type:
self.dtype = data_type.value
elif dtype:
self.dtype = dtype
else:
raise ValueError("Missing data type property")
self.index = index
@property
def moniker(self):
return self.name if self.name else f"Col_{self.index}"
@property
def data_type(self):
return KustoType(self.dtype)
@data_type.setter
def data_type(self, v: KustoType):
self.dtype = v.value
|
import tvm
from tvm.tensor_graph.core2.graph.concrete import Compute, Tensor
######################################################################
# for functional, all states are inputs, data from inside functionals
# can only be constants
######################################################################
def mse_loss(outputs, labels, output_dtype="float32", requires_grad=False):
"""MSE Loss function
Args:
-----------------------------
outputs : Tensor
shape [batch, length]
labels : Tensor
shape [batch, length]
output_dtype : str
requires_grad : bool
-----------------------------
Returns:
-----------------------------
Tensor
shape [1]
-----------------------------
"""
assert len(outputs.shape) == len(labels.shape) and outputs.shape[0] == labels.shape[0] and outputs.shape[1] == labels.shape[1]
def _inner_mse(_out, _label):
def _for_spatial(i):
def _for_reduce(b, l):
return tvm.tir.power(_label[i+b, l] - _out[i+b, l], 2) / outputs.shape[1]
return _for_reduce, [*outputs.shape], "sum"
return _for_spatial
return Compute([1], output_dtype, outputs, labels, fhint=_inner_mse, name="mse", requires_grad=requires_grad)
|
import datetime as dt
import json
from bson import json_util
import pygal
import time
from pygal.style import BlueStyle
# These functions work on the GroupB totalStorage collection.
# as the dataset grows larger these methods will take too long
# in the future, may want to use "group by" equivalent and use map-reduce
def max_tb(collection): # pragma: no cover
"""Returns the maximum number of TB recorded in the collection"""
max_TB = 0
for doc in collection.find({}).sort([('total_TB',-1)]).limit(1):
max_TB = doc['total_TB']
return max_TB
def min_tb(collection): # pragma: no cover
"""Returns the minimum number of TB recorded in the collection"""
min_TB = 0
for doc in collection.find({}).sort([('total_TB',1)]).limit(1):
min_TB = doc['total_TB']
return min_TB
def min_farmers(collection): # pragma: no cover
"""Returns the minimum number of farmers recorded in the collection"""
min_farmers = 0
for doc in collection.find({}).sort([('total_farmers',1)]).limit(1):
min_farmers = doc['total_farmers']
return min_farmers
def max_farmers(collection): # pragma: no cover
"""Returns the maximum number of farmers recorded in the collection"""
max_farmers = 0
for doc in collection.find({}).sort([('total_farmers',-1)]).limit(1):
max_farmers = doc['total_farmers']
return max_farmers
def avg_gb_farmer(collection):
"""Returns the average storage capacity of a farmer in gigabytes."""
avg_gb_farmer = 0
for doc in collection.find({}).sort([('time', -1)]).limit(1):
avg_gb_farmer = doc['total_TB'] / doc['total_farmers'] * 1000
avg_gb_farmer = format(avg_gb_farmer, '.2f')
return avg_gb_farmer
def is_noon_time(dt_time): # pragma: no cover
"""
Returns True if the time is between 12:00-12:05, False otherwise.
Args:
dt_time: datetime.datetime
"""
if dt.time(12, 5) > dt_time.time() > dt.time(12, 0):
return True
else:
return False
def create_stats_table(conn, cursor):
cursor.execute('''CREATE TABLE stats
(date REAL PRIMARY KEY NOT NULL,
tb REAL,
farmers REAL);''')
conn.commit()
def init_stats_table(conn, cursor, collection):
for doc in collection.find({}, {'_id': False, 'total_TB': True,
'time': True, 'total_farmers': True}):
tb = doc['total_TB']
farmers = doc['total_farmers']
date = time.mktime(doc['time'].timetuple())
cursor.execute('INSERT INTO stats(date, tb, farmers) VALUES (?, ?, ?)',
(date, tb, farmers))
conn.commit()
def update_stats_table(conn, cursor, collection):
cursor.execute('SELECT MAX(date) from stats')
last_date = dt.datetime.fromtimestamp(int(cursor.fetchone()[0]))
for doc in collection.find({'time': {'$gt': last_date}}):
tb = doc['total_TB']
farmers = doc['total_farmers']
date = time.mktime(doc['time'].timetuple())
cursor.execute('INSERT INTO stats(date, tb, farmers) VALUES (?, ?, ?)',
(date, tb, farmers))
conn.commit()
def total_storage_graph(collection):
totals = collection.find({}, {'_id': False, 'total_TB': True,
'time': True, 'total_farmers': True})
json_totals = []
for total in totals:
json_totals.append(total)
json_totals = json.dumps(json_totals, default=json_util.default)
parsed = json.loads(json_totals)
terabytes = []
times = []
for i in range(1, len(parsed) / 12): #takes a data point from each hour
j = i * 12
terabytes.append(float(parsed[j]['total_TB']))
d = dt.datetime.fromtimestamp(parsed[j]['time']['$date']/1e3)
times.append('%i/%i %i:%i' % (d.month, d.day, d.hour, d.minute))
tb_title = 'Total Storage Available Over Time'
tb_min_range = min_tb(collection)
tb_max_range = max_tb(collection) + 100
tb_chart = pygal.Line(width=1000, height=600, explicit_size=True,
x_label_rotation=35, show_minor_x_labels=False,
x_labels_major_count=12, title=tb_title,
range=(tb_min_range, tb_max_range), dots_size=0.2,
style=BlueStyle, disable_xml_declaration=True)
tb_chart.x_labels = times
tb_chart.add('TB', terabytes)
return tb_chart
def total_farmers_graph(collection):
totals = collection.find({}, {'_id': False, 'total_TB': True,
'time': True, 'total_farmers': True})
json_totals = []
for total in totals:
json_totals.append(total)
json_totals = json.dumps(json_totals, default=json_util.default)
parsed = json.loads(json_totals)
farmers = []
times = []
for i in range(1, len(parsed) / 12): #takes a data point from each hour
j = i * 12
farmers.append(int(parsed[j]['total_farmers']))
d = dt.datetime.fromtimestamp(parsed[j]['time']['$date']/1e3)
times.append('%i/%i %i:%i' % (d.month, d.day, d.hour, d.minute))
farmers_title = 'Number of Farmers Over Time'
farmers_min_range = min_farmers(collection)
farmers_max_range = max_farmers(collection) + 50
farmers_chart = pygal.Line(width=1000, height=600, explicit_size=True,
x_label_rotation=35, show_minor_x_labels=False,
x_labels_major_count=12, title=farmers_title,
range=(farmers_min_range, farmers_max_range),
dots_size=0.2, style=BlueStyle,
disable_xml_declaration=True)
farmers_chart.x_labels = times
farmers_chart.add('farmers', farmers)
return farmers_chart
|
from datetime import datetime, date
import json
from unittest.mock import patch
import tempfile
import os
from delphi_hhs.run import _date_to_int, int_date_to_previous_day_datetime, generate_date_ranges, \
make_signal, make_geo, run_module, pop_proportion
from delphi_hhs.constants import SMOOTHERS, GEOS, SIGNALS, \
CONFIRMED, SUM_CONF_SUSP, CONFIRMED_FLU, CONFIRMED_PROP, SUM_CONF_SUSP_PROP, CONFIRMED_FLU_PROP
from delphi_utils.geomap import GeoMapper
from freezegun import freeze_time
import numpy as np
import pandas as pd
import pytest
def test__date_to_int():
"""Check that dates are converted to the right int."""
assert _date_to_int(date(2020, 5, 1)) == 20200501
def test_date_conversion():
"""Check that we convert dates properly between Epidata and datetime format."""
data = pd.DataFrame({"date": [20200101, 20201231]})
result = int_date_to_previous_day_datetime(data.date)
expected_result = [
datetime(year=2019, month=12, day=31),
datetime(year=2020, month=12, day=30)
]
for got, expected in zip(result, expected_result):
assert isinstance(got, datetime), f"Bad type: {type(got)}\n{result}"
assert got == expected
def test_generate_date_ranges():
"""Check ranges generated partition the specified inputs."""
assert generate_date_ranges(date(2020, 1, 1), date(2020, 1, 1)) == \
[{'from': 20200101, 'to': 20200101}]
assert generate_date_ranges(date(2020, 1, 1), date(2020, 1, 31)) == \
[{'from': 20200101, 'to': 20200131}]
assert generate_date_ranges(date(2020, 1, 1), date(2020, 2, 1)) == \
[{'from': 20200101, 'to': 20200131},
{'from': 20200201, 'to': 20200201}]
assert generate_date_ranges(date(2020, 1, 1), date(2020, 5, 12)) == \
[{'from': 20200101, 'to': 20200131},
{'from': 20200201, 'to': 20200302},
{'from': 20200303, 'to': 20200402},
{'from': 20200403, 'to': 20200503},
{'from': 20200504, 'to': 20200512}]
def test_make_signal():
"""Check that constructed signals sum the correct columns."""
data = pd.DataFrame({
'state': ['NA'],
'date': [20200102],
'previous_day_admission_adult_covid_confirmed': [1],
'previous_day_admission_adult_covid_suspected': [2],
'previous_day_admission_pediatric_covid_confirmed': [4],
'previous_day_admission_pediatric_covid_suspected': [8],
'previous_day_admission_influenza_confirmed': [16]
})
expected_confirmed = pd.DataFrame({
'state': ['na'],
'timestamp': [datetime(year=2020, month=1, day=1)],
'val': [5.],
})
pd.testing.assert_frame_equal(expected_confirmed, make_signal(data, CONFIRMED))
pd.testing.assert_frame_equal(expected_confirmed, make_signal(data, CONFIRMED_PROP))
expected_sum = pd.DataFrame({
'state': ['na'],
'timestamp': [datetime(year=2020, month=1, day=1)],
'val': [15.],
})
pd.testing.assert_frame_equal(expected_sum, make_signal(data, SUM_CONF_SUSP))
pd.testing.assert_frame_equal(expected_sum, make_signal(data, SUM_CONF_SUSP_PROP))
expected_flu = pd.DataFrame({
'state': ['na'],
'timestamp': [datetime(year=2020, month=1, day=1)],
'val': [16.],
})
pd.testing.assert_frame_equal(expected_flu, make_signal(data, CONFIRMED_FLU))
pd.testing.assert_frame_equal(expected_flu, make_signal(data, CONFIRMED_FLU_PROP))
with pytest.raises(Exception):
make_signal(data, "zig")
def test_pop_proportion():
geo_mapper = GeoMapper()
state_pop = geo_mapper.get_crosswalk("state_code", "pop")
test_df = pd.DataFrame({
'state': ['PA'],
'state_code': [42],
'timestamp': [datetime(year=2020, month=1, day=1)],
'val': [15.],})
pa_pop = int(state_pop.loc[state_pop.state_code == "42", "pop"])
pd.testing.assert_frame_equal(
pop_proportion(test_df, geo_mapper),
pd.DataFrame({
'state': ['PA'],
'state_code': [42],
'timestamp': [datetime(year=2020, month=1, day=1)],
'val': [15/pa_pop*100000],})
)
test_df= pd.DataFrame({
'state': ['WV'],
'state_code': [54],
'timestamp': [datetime(year=2020, month=1, day=1)],
'val': [150.],})
wv_pop = int(state_pop.loc[state_pop.state_code == "54", "pop"])
pd.testing.assert_frame_equal(
pop_proportion(test_df, geo_mapper),
pd.DataFrame({
'state': ['WV'],
'state_code': [54],
'timestamp': [datetime(year=2020, month=1, day=1)],
'val': [150/wv_pop*100000],})
)
def test_make_geo():
"""Check that geographies transform correctly."""
test_timestamp = datetime(year=2020, month=1, day=1)
geo_mapper = GeoMapper()
data = pd.DataFrame({
'state': ['PA', 'WV', 'OH'],
'state_code': [42, 54, 39],
'timestamp': [test_timestamp] * 3,
'val': [1., 2., 4.],
})
template = {
'se': np.nan,
'sample_size': np.nan,
}
expecteds = {
"state": pd.DataFrame(
dict(template,
geo_id=data.state,
timestamp=data.timestamp,
val=data.val)),
"hhs": pd.DataFrame(
dict(template,
geo_id=['3', '5'],
timestamp=[test_timestamp] * 2,
val=[3., 4.])),
"nation": pd.DataFrame(
dict(template,
geo_id=['us'],
timestamp=[test_timestamp],
val=[7.]))
}
for geo, expected in expecteds.items():
result = make_geo(data, geo, geo_mapper)
for series in ["geo_id", "timestamp", "val", "se", "sample_size"]:
pd.testing.assert_series_equal(expected[series], result[series], obj=f"{geo}:{series}")
@freeze_time("2020-01-01")
@patch("delphi_epidata.Epidata.covid_hosp")
def test_output_files(mock_covid_hosp):
with open("test_response.json", "r") as f:
test_response = json.load(f)
mock_covid_hosp.return_value = test_response
with tempfile.TemporaryDirectory() as tmpdir:
params = {
"common": {
"export_dir": tmpdir
}
}
run_module(params)
# 9 days in test data, so should be 9 days of unsmoothed and 3 days for smoothed
expected_num_files = len(GEOS) * len(SIGNALS) * 9 + len(GEOS) * len(SIGNALS) * 3
assert len(os.listdir(tmpdir)) == expected_num_files
@freeze_time("2020-02-03")
@patch("delphi_hhs.run.create_export_csv")
@patch("delphi_epidata.Epidata.covid_hosp")
def test_ignore_last_range_no_results(mock_covid_hosp, mock_export):
mock_covid_hosp.side_effect = [
{"result": 1,
"epidata":
{"state": ["placeholder"],
"date": ["20200101"],
"previous_day_admission_adult_covid_confirmed": [0],
"previous_day_admission_adult_covid_suspected": [0],
"previous_day_admission_pediatric_covid_confirmed": [0],
"previous_day_admission_pediatric_covid_suspected": [0],
"previous_day_admission_influenza_confirmed": [0]
}
},
{"result": -2, "message": "no results"}
]
mock_export.return_value = None
params = {
"common": {
"export_dir": "./receiving"
}
}
assert not run_module(params) # function should not raise value error and has no return value
|
import textwrap
sample_text = "fasdfsa' f sf af asfsadfsadfas as sdf asfsdf sdafsadfs s sa fsadf sadfsadasfdsadfsd f sdfasd fsd"
# Wrap this text.
wrapper = textwrap.TextWrapper(width=20)
word_list = wrapper.wrap(text=sample_text)
# Print each line.
for i, element in enumerate(word_list):
print(i, element)
|
import functools
from typing import TypeVar, Union, Callable, Any, List, Optional
from purpleserver.core import datatypes
from purpleserver.core import serializers
T = TypeVar('T')
def identity(value: Union[Any, Callable]) -> T:
"""
:param value: function or value desired to be wrapped
:return: value or callable return
"""
return value() if callable(value) else value
def post_processing(methods: List[str] = None):
def class_wrapper(klass):
setattr(klass, 'post_process_functions', getattr(klass, 'post_process_functions') or [])
for name in methods:
method = getattr(klass, name)
def wrapper(*args, **kwargs):
result = method(*args, **kwargs)
processes = klass.post_process_functions
context = kwargs.get('context')
return functools.reduce(
lambda cummulated_result, process: process(context, cummulated_result), processes, result
)
setattr(klass, name, wrapper)
return klass
return class_wrapper
def upper(value_str: Optional[str]) -> Optional[str]:
if value_str is None:
return None
return value_str.upper().replace('_', ' ')
def compute_tracking_status(details: datatypes.Tracking) -> serializers.TrackerStatus:
if details.delivered:
return serializers.TrackerStatus.delivered
elif (len(details.events) == 0) or (len(details.events) == 1 and details.events[0].code == 'CREATED'):
return serializers.TrackerStatus.pending
return serializers.TrackerStatus.in_transit
|
# Analyze distribution of RGZ counterparts in WISE color-color space
#
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
paper_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/radiogalaxyzoo/paper'
from astropy.io import fits
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
from numpy import ma
from scipy.ndimage.filters import gaussian_filter
wise_snr = 5.0
def compare_density():
# WISE All-sky sample
#
filenames = ['%s/%s.fits' % (rgz_dir,x) for x in ('wise_allsky_2M','gurkan/gurkan_all','rgz_75_wise')]
labels = ('WISE all-sky sources','Gurkan+14 radio galaxies','RGZ 75% radio galaxies')
print ''
for fname,label in zip(filenames,labels):
with fits.open(fname) as f:
d = f[1].data
if label == 'RGZ 75% radio galaxies':
d = d[d['ratio']>=0.75]
# SNR cut
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
w1 = d['w1mpro']
w2 = d['w2mpro']
w3 = d['w3mpro']
w4 = d['w4mpro']
x = w2-w3
y = w1-w2
# AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
#
wedge_lims = (y > -3.172*x + 7.624) & (y > (0.315*x - 0.222)) & (y < (0.315*x + 0.796))
#
# Very rough loci from Wright et al. (2010)
stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)
agn_frac = wedge_lims.sum()/float(len(d))
stars_frac = stars_lims.sum()/float(len(d))
el_frac = el_lims.sum()/float(len(d))
sp_frac = sp_lims.sum()/float(len(d))
print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,agn_frac*100)
print 'Fraction of %25s in stars locus: %4.1f percent' % (label,stars_frac*100)
print 'Fraction of %25s in elliptical locus: %4.1f percent' % (label,el_frac*100)
print 'Fraction of %25s in spiral locus: %4.1f percent' % (label,sp_frac*100)
print ''
print ''
'''
# Make empty arrays for TOPCAT
#
xb,yb = 2.250,0.487
xt,yt = 1.958,1.413
xab = np.linspace(xb,6,100)
xat = np.linspace(xt,6,100)
xal = np.linspace(xb,xt,100)
yab = 0.315*xab - 0.222
yat = 0.315*xat + 0.796
yal =-3.172*xal + 7.624
xall = np.append(xab,np.append(xat,xal))
yall = np.append(yab,np.append(yat,yal))
with open('%s/csv/agn_wedge.csv' % rgz_dir,'w') as f:
for x,y in zip(xall,yall):
print >> f,x,y
'''
# Bin data and look at differences?
#
with fits.open(filenames[0]) as f:
wise = f[1].data
with fits.open(filenames[2]) as f:
rgz = f[1].data
bins_w2w3 = np.linspace(-1,7,25)
bins_w1w2 = np.linspace(-0.5,3,25)
hw,xedges,yedges = np.histogram2d(wise['w2mpro']-wise['w3mpro'],wise['w1mpro']-wise['w2mpro'],bins=(bins_w2w3,bins_w1w2))
hr,xedges,yedges = np.histogram2d(rgz['w2mpro']-rgz['w3mpro'],rgz['w1mpro']-rgz['w2mpro'],bins=(bins_w2w3,bins_w1w2))
from matplotlib import pyplot as plt
from matplotlib import cm
fig = plt.figure(1,(10,5))
fig.clf()
hw_norm = hw/float(np.max(hw))
hr_norm = hr/float(np.max(hr))
from numpy import ma
hw_norm_masked = ma.masked_array(hw_norm,mask=(hw <= 10))
hr_norm_masked = ma.masked_array(hr_norm,mask=(hr <= 10))
extent = [bins_w2w3[0],bins_w2w3[-1],bins_w1w2[0],bins_w1w2[-1]]
ax1 = fig.add_subplot(121)
cmap = cm.jet
cmap.set_bad('w')
im1 = ax1.imshow(hw_norm_masked.T, alpha=1.0, extent=extent, vmin = 0., vmax = 1., interpolation='nearest', origin='lower')
ax1.set_title('WISE All-Sky')
ax1.set_xlabel('(W2-W3)')
ax1.set_ylabel('(W1-W2)')
ax1.set_aspect('auto')
ax2 = fig.add_subplot(122)
cmap = cm.jet
im2 = ax2.imshow(hr_norm_masked.T, alpha=1.0, extent=extent,vmin = 0., vmax = 1., interpolation='nearest', origin='lower')
ax2.set_title('RGZ 75%')
ax2.set_xlabel('(W2-W3)')
ax2.set_aspect('auto')
position=fig.add_axes([0.92,0.1,0.02,0.80])
cb = plt.colorbar(im2,cax=position,orientation='vertical')
cb.set_label('Normalized ratio',fontsize=16)
'''
ax3 = fig.add_subplot(133)
cmap = cm.jet
im3 = ax3.imshow((np.log10(hr_norm/hw_norm)).T, alpha=1.0, extent=extent,interpolation='nearest', origin='lower')
ax3.set_title('RGZ/WISE ratio')
ax3.set_aspect('auto')
position=fig.add_axes([0.92,0.1,0.02,0.80])
cb = plt.colorbar(im3,cax=position,orientation='vertical')
cb.set_label('log(ratio)',fontsize=16)
'''
#plt.show()
fig.savefig('%s/wise_rgz_fractions.png' % rgz_dir)
return None
def wise_rgz_gurkan():
plt.ion()
# WISE All-sky sample
#
filenames = ['%s/fits/%s.fits' % (rgz_dir,x) for x in ('wise_allsky_2M','gurkan_all','rgz_75_wise')]
labels = ('WISE all-sky sources','Gurkan+14 radio galaxies','RGZ 75% radio galaxies')
print ''
for fname,label in zip(filenames,labels):
with fits.open(fname) as f:
d = f[1].data
# Restrict the RGZ-WISE matches to 75% consensus
if label == 'RGZ 75% radio galaxies':
d = f[1].data
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
d = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
w1 = d['w1mpro']
w2 = d['w2mpro']
w3 = d['w3mpro']
w4 = d['w4mpro']
x = w2-w3
y = w1-w2
# AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
#
wedge_lims = (y > -3.172*x + 7.624) & (y > (0.315*x - 0.222)) & (y < (0.315*x + 0.796))
#
# Very rough loci from Wright et al. (2010)
stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)
agn_frac = wedge_lims.sum()/float(len(d))
stars_frac = stars_lims.sum()/float(len(d))
el_frac = el_lims.sum()/float(len(d))
sp_frac = sp_lims.sum()/float(len(d))
print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,agn_frac*100)
print 'Fraction of %25s in stars locus: %4.1f percent' % (label,stars_frac*100)
print 'Fraction of %25s in elliptical locus: %4.1f percent' % (label,el_frac*100)
print 'Fraction of %25s in spiral locus: %4.1f percent' % (label,sp_frac*100)
print ''
print ''
# Bin data and look at differences?
#
with fits.open(filenames[0]) as f:
d = f[1].data
maglim_w1 = d['w1snr'] > wise_snr
maglim_w2 = d['w2snr'] > wise_snr
maglim_w3 = d['w3snr'] > wise_snr
wise = d[maglim_w1 & maglim_w2 & maglim_w3]
with fits.open(filenames[2]) as f:
d = f[1].data
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
xmin,xmax = -1,6
ymin,ymax = -0.5,3
bins_w2w3 = np.linspace(xmin,xmax,40)
bins_w1w2 = np.linspace(ymin,ymax,40)
hw,xedges,yedges = np.histogram2d(wise['w2mpro']-wise['w3mpro'],wise['w1mpro']-wise['w2mpro'],bins=(bins_w2w3,bins_w1w2))
hr,xedges,yedges = np.histogram2d(rgz['w2mpro']-rgz['w3mpro'],rgz['w1mpro']-rgz['w2mpro'],bins=(bins_w2w3,bins_w1w2))
fig = plt.figure(1,(9,8))
fig.clf()
hw_norm = hw/float(np.max(hw))
hr_norm = hr/float(np.max(hr))
hw_norm_masked = ma.masked_array(hw,mask=(hw < 10))
hr_norm_masked = ma.masked_array(hr_norm,mask=(hr <= 10))
extent = [bins_w2w3[0],bins_w2w3[-1],bins_w1w2[0],bins_w1w2[-1]]
ax1 = fig.add_subplot(111,position=(0.10,0.10,0.75,0.85))
# WISE all-sky
cmap = cm.cubehelix_r
cmap.set_bad('w')
Z = hw_norm_masked
im1 = ax1.imshow(Z.T, cmap=cmap, alpha=1.0, extent=extent, interpolation='nearest', origin='lower', norm=LogNorm())
'''
fi = gaussian_filter(hw.T,0.5)
levels=np.linspace(10,20000,10)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='r',linewidths=1)
'''
# RGZ 75% catalog
fi = gaussian_filter(hr.T,0.5)
levels=np.linspace(3,50,8)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='k',linewidths=1.5)
CS.collections[0].set_label('RGZ 75%')
# Gurkan
with fits.open(filenames[1]) as f:
gurkan = f[1].data
ax1.scatter(gurkan['w2mpro']-gurkan['w3mpro'],gurkan['w1mpro']-gurkan['w2mpro'],color='g',s=10,label='PRGs (Gurkan+14)')
xb,yb = 2.250,0.487
xt,yt = 1.958,1.413
xab = np.linspace(xb,6,100)
xat = np.linspace(xt,6,100)
xal = np.linspace(xb,xt,100)
yab = 0.315*xab - 0.222
yat = 0.315*xat + 0.796
yal =-3.172*xal + 7.624
ax1.plot(xab,yab,color='r',linestyle='--',label='AGN "wedge"')
ax1.plot(xat,yat,color='r',linestyle='--')
ax1.plot(xal,yal,color='r',linestyle='--')
ax1.set_xlabel(r'$(W2-W3)$',fontsize=20)
ax1.set_ylabel(r'$(W1-W2)$',fontsize=20)
ax1.set_xlim(xmin,xmax)
ax1.set_ylim(ymin,ymax)
ax1.set_aspect('auto')
cb_position=fig.add_axes([0.88,0.1,0.02,0.85])
cb = plt.colorbar(im1,cax=cb_position,orientation='vertical')
cb.set_label('WISE all-sky sources',fontsize=16)
h,l = ax1.get_legend_handles_labels()
ax1.legend(h,l,loc='upper left',scatterpoints=2)
plt.show()
# Measure number of galaxies in the new loci
#
locus1 = ((rgz['w1mpro'] - rgz['w2mpro']) > -0.2) & ((rgz['w1mpro'] - rgz['w2mpro']) < 0.3) & ((rgz['w2mpro'] - rgz['w3mpro']) > -0.2) & ((rgz['w2mpro'] - rgz['w3mpro']) < 1.0)
locus2 = ((rgz['w1mpro'] - rgz['w2mpro']) > 0.1) & ((rgz['w1mpro'] - rgz['w2mpro']) < 0.5) & ((rgz['w2mpro'] - rgz['w3mpro']) > 3.5) & ((rgz['w2mpro'] - rgz['w3mpro']) < 4.8)
locus3 = ((rgz['w1mpro'] - rgz['w2mpro']) > 0.8) & ((rgz['w1mpro'] - rgz['w2mpro']) < 1.5) & ((rgz['w2mpro'] - rgz['w3mpro']) > 2.2) & ((rgz['w2mpro'] - rgz['w3mpro']) < 3.6)
print 'Locus 1 (stars): %i, %.1f' % (locus1.sum(),locus1.sum() / float(len(rgz))*100)
print 'Locus 2 (LIRGs): %i, %.1f' % (locus2.sum(),locus2.sum() / float(len(rgz))*100)
print 'Locus 3 (QSOs): %i, %.1f' % (locus3.sum(),locus3.sum() / float(len(rgz))*100)
fig.savefig('%s/figures/wise_colorcolor_sn5.eps' % paper_dir)
return None
def wise_rgz_gurkan_lowsn():
plt.ion()
# WISE All-sky sample
#
filenames = ['%s/%s.fits' % (rgz_dir,x) for x in ('wise_allsky_2M','gurkan/gurkan_all','rgz_75_wise_16jan')]
labels = ('WISE all-sky sources','Gurkan+14 radio galaxies','RGZ 75% radio galaxies')
print ''
for fname,label in zip(filenames,labels):
with fits.open(fname) as f:
d = f[1].data
# Restrict the RGZ-WISE matches to 75% consensus
if label == 'RGZ 75% radio galaxies':
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
d = d[np.logical_not(rgz75 & snr_w1 & snr_w2 & snr_w3)]
w1 = d['w1mpro']
w2 = d['w2mpro']
w3 = d['w3mpro']
w4 = d['w4mpro']
x = w2-w3
y = w1-w2
# AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
#
wedge_lims = (y > -3.172*x + 7.624) & (y > (0.315*x - 0.222)) & (y < (0.315*x + 0.796))
#
# Very rough loci from Wright et al. (2010)
stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)
agn_frac = wedge_lims.sum()/float(len(d))
stars_frac = stars_lims.sum()/float(len(d))
el_frac = el_lims.sum()/float(len(d))
sp_frac = sp_lims.sum()/float(len(d))
print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,agn_frac*100)
print 'Fraction of %25s in stars locus: %4.1f percent' % (label,stars_frac*100)
print 'Fraction of %25s in elliptical locus: %4.1f percent' % (label,el_frac*100)
print 'Fraction of %25s in spiral locus: %4.1f percent' % (label,sp_frac*100)
print ''
print ''
# Bin data and look at differences?
#
with fits.open(filenames[0]) as f:
d = f[1].data
maglim_w1 = d['snr1'] > wise_snr
maglim_w2 = d['snr2'] > wise_snr
maglim_w3 = d['snr3'] < wise_snr
wise = d[maglim_w1 & maglim_w2 & maglim_w3]
with fits.open(filenames[2]) as f:
d = f[1].data
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] <= wise_snr
rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
xmin,xmax = -1,6
ymin,ymax = -0.5,3
bins_w2w3 = np.linspace(xmin,xmax,40)
bins_w1w2 = np.linspace(ymin,ymax,40)
hw,xedges,yedges = np.histogram2d(wise['w2mpro']-wise['w3mpro'],wise['w1mpro']-wise['w2mpro'],bins=(bins_w2w3,bins_w1w2))
hr,xedges,yedges = np.histogram2d(rgz['w2mpro']-rgz['w3mpro'],rgz['w1mpro']-rgz['w2mpro'],bins=(bins_w2w3,bins_w1w2))
fig = plt.figure(1,(9,8))
fig.clf()
hw_norm = hw/float(np.max(hw))
hr_norm = hr/float(np.max(hr))
hw_norm_masked = ma.masked_array(hw,mask=(hw < 10))
hr_norm_masked = ma.masked_array(hr_norm,mask=(hr <= 10))
extent = [bins_w2w3[0],bins_w2w3[-1],bins_w1w2[0],bins_w1w2[-1]]
ax1 = fig.add_subplot(111,position=(0.10,0.10,0.75,0.85))
# WISE all-sky
cmap = cm.YlOrRd
cmap.set_bad('w')
Z = hw_norm_masked
im1 = ax1.imshow(Z.T, cmap=cmap, alpha=1.0, extent=extent, interpolation='nearest', origin='lower')
'''
fi = gaussian_filter(hw.T,0.5)
levels=np.linspace(10,20000,10)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='r',linewidths=1)
'''
# RGZ 75% catalog
fi = gaussian_filter(hr.T,0.5)
levels=np.linspace(3,hr.max(),10)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='b',linewidths=1.5)
CS.collections[0].set_label('RGZ 75%')
# Gurkan
with fits.open(filenames[1]) as f:
gurkan = f[1].data
ax1.scatter(gurkan['w2mpro']-gurkan['w3mpro'],gurkan['w1mpro']-gurkan['w2mpro'],color='g',s=10,label='PRGs (Gurkan+14)')
xb,yb = 2.250,0.487
xt,yt = 1.958,1.413
xab = np.linspace(xb,6,100)
xat = np.linspace(xt,6,100)
xal = np.linspace(xb,xt,100)
yab = 0.315*xab - 0.222
yat = 0.315*xat + 0.796
yal =-3.172*xal + 7.624
ax1.plot(xab,yab,color='k',linestyle='--',label='AGN "wedge"')
ax1.plot(xat,yat,color='k',linestyle='--')
ax1.plot(xal,yal,color='k',linestyle='--')
ax1.set_xlabel(r'$(W2-W3)$',fontsize=20)
ax1.set_ylabel(r'$(W1-W2)$',fontsize=20)
ax1.set_xlim(xmin,xmax)
ax1.set_ylim(ymin,ymax)
ax1.set_aspect('auto')
cb_position=fig.add_axes([0.88,0.1,0.02,0.85])
cb = plt.colorbar(im1,cax=cb_position,orientation='vertical')
cb.set_label('WISE all-sky sources',fontsize=16)
h,l = ax1.get_legend_handles_labels()
ax1.legend(h,l,loc='upper left',scatterpoints=2)
plt.show()
fig.savefig('%s/figures/wise_colorcolor_lowsn.eps' % paper_dir)
return None
wise_rgz_gurkan()
|
# -*- coding: utf-8 -*-
import math
# Zadanie 6 ---------------------------------
rad = float(input('Proszę podać promień okręgu: '))
circ = 2 * rad * math.pi
area = rad**2 * math.pi
print('\nOkrąg o promieniu', rad, 'ma: \n obwód równy', circ, '\n pole równe', area)
|
from PIL import ImageTk
from tkinter import END
from PIL import Image
from cv2 import imread, imwrite
import processImage as pI
import os
import popupWindows as pW
from tkinter.colorchooser import askcolor
exePath = os.getcwd()
def readScale(self):
# Disable manual checkbox
self.parent.ch1.config(state='disable')
# Update progress bar to 0
valueStateChanger(self, self.p_bar, 0)
# Open image
self.img = imread(self.parent.files[self.parent.i - 1])
# Update progress bar to 25 and update GUI
valueStateChanger(self, self.p_bar, 25)
# Send image to function getBar and obtain information about the "white bar in SEM images.
self.crop_img, self.bar_img, barSize = pI.getBar(self.img)
if barSize != 0:
# Update entry widgets with values obtaines
valueStateChanger(self, self.e3, round(barSize))
# Update progress bar to 50 and update GUI
valueStateChanger(self, self.p_bar, 50)
# Save white bar image, resize it (for better tesseract readability), and calling it again
height, width, channels = self.bar_img.shape
imwrite(exePath + "\\images\\HoldImages\\bar.tif", self.bar_img)
img = Image.open(exePath + "\\images\\HoldImages\\bar.tif")
img1 = img.resize((width * 3, height * 3), Image.ANTIALIAS)
img1.save(exePath + "\\images\\HoldImages\\resize_im.tif", dpi=(600, 600), quality=100)
self.bar_img_res = imread(exePath + "\\images\\HoldImages\\resize_im.tif")
# Measures scale bar (in pixels) from resized white bar image
scale = pI.getScale(self.bar_img)
if scale is not None:
self.scale = len(scale)
# Update entry widgets with values obtained
valueStateChanger(self, self.parent.e2, self.scale)
# Update progress bar to 75 and update GUI
valueStateChanger(self, self.p_bar, 75)
# Get scale number and it's units
self.scaleNumb, self.units = pI.getNumber(self.bar_img, self.bar_img_res, exePath)
# Update entry widgets with values obtained
valueStateChanger(self, self.e1, self.scaleNumb)
# Update combobox widgets (units) with value obtained
valueStateChanger(self, self.c1, self.units)
# Update progress bar to 100 and update GUI
valueStateChanger(self, self.p_bar, 100)
# If manual checkbox is checked return widgets to normal state
if self.parent.var.get() == 1:
widgets = [self.e1, self.parent.e2, self.e3]
for wg in widgets:
wg.configure(state='normal')
self.c1.configure(state="readonly")
self.c2.configure(state="readonly")
self.parent.ch1.config(state='normal')
elif self.parent.save < 1:
# If scale bar is not found, user must use manual
self.parent.ch1.config(state='normal')
pW.Error(self, "Scale Bar (usually on top of scale number) could not be determined. Use manual instead.",
"error", "no")
valueStateChanger(self, self.p_bar, 0)
# If the program is processing several files don't show error window
elif self.parent.save < 1:
# If a whiter bar is not found, user must use manual
self.parent.ch1.config(state='normal')
pW.Error(self, "White Bar (usually where scale is) could not be determined. Use manual instead.", "error",
"no")
valueStateChanger(self, self.p_bar, 0)
def preview(self):
self.choice = None
# Check if an image was imported
if not hasattr(self.parent, 'img3open'):
pW.Error(self, "Please import a image first.", "error", "no")
return 0
# Checks if all parameters are filled and if the user chose a contrast higher than 7 or chose to ignore the
# warning the code continues to run
if self.e1.get() == '' or self.parent.e2.get() == '' or self.e3.get() == '':
pW.Error(self, "Please have all parameters with values, or click Read Scale.", "warning", "no")
return 0
elif self.contrast < 7 and self.parent.save == 0:
errormessage = "Contrast is less than 7. This means that visibility/readability can be compromised. \n " \
"We sugest a contrast higher than 7 for better scale color set :)"
error = pW.Error(self, errormessage, "warning", "yes")
error.wait_window()
if self.choice != "ignore":
return 0
self.img = imread(self.parent.files[self.parent.i - 1])
# Check if target value was specified
if self.parent.var.get() == 0:
self.targetValue = 0
self.targetUnit = ''
elif self.e4.get() != "":
self.targetValue = int(self.e4.get())
self.targetUnit = self.c2.get()
else:
self.targetValue = 0
self.targetUnit = ''
# Check the new scale position
position_dict = {"Bottom Left": 0, "Bottom Right": 1, "Top Left": 2, "Top Right": 3}
self.position = position_dict[self.c3.get()]
# Get parameters: Scale number, Scale units, Number of pixels, Size of the new scale.
self.scale = int(self.parent.e2.get())
self.sizeOfScale = int(self.spin.get())
self.scaleNumb = int(self.e1.get())
self.units = self.c1.get()
# Change variable of scale colors from list of floats to tupple of integrers
self.bgColour = tuple([int(i) for i in self.bgColour_rgb])
self.ftColour = tuple([int(i) for i in self.ftColour_rgb])
# Check if crop is from top or from bottom
self.cropbeg = self.c4.get()
# Obtain image without white bar
self.crop_img = pI.cropImage(self.img, int(self.e3.get()), self.cropbeg)
# Draw scale in cropped image
self.finalImage = pI.drawScale(self.crop_img, self.scale, int(self.scaleNumb), self.units,
exePath, self.position, self.sizeOfScale, self.ftColour,
self.bgColour, self.targetValue, self.targetUnit)
if isinstance(self.finalImage, str):
message = "Value of target value to high. For the scale number and pixels provided, " + self.finalImage
pW.Error(self, message, "error", "no")
return 0
self.parent.img4open = self.finalImage
# Resize image
self.parent.img4 = ImageTk.PhotoImage(
self.parent.img4open.resize((int(self.parent.panel2.winfo_width()) - 5,
int(self.parent.panel2.winfo_height()) - 5), Image.ANTIALIAS))
# Put image on canvas
self.parent.panel2.itemconfig(self.parent.image_on_panel2, image=self.parent.img4)
def manual(self):
# Change widgets from disabled to normal
widgets = [self.e1, self.parent.e2, self.e3, self.e4, self.b5]
if self.parent.var.get() == 1:
for wg in widgets:
wg.configure(state='normal')
self.c1.configure(state="readonly")
self.c2.configure(state="readonly")
else:
for wg in widgets:
wg.configure(state='disabled')
def contrastChecker(self, rgb, rgb1):
# Calculates the contrast between the font and background color chosen
# For more information: https://www.w3.org/TR/WCAG20-TECHS/G17#G17-procedure
lumi = [0, 0]
rgb_list = [rgb, rgb1]
rgb_math = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
for j in range(0, 2):
for i in range(0, 3):
temp = rgb_list[j][i] / 255.0
if temp <= 0.03928:
rgb_math[j][i] = temp / 12.92
else:
rgb_math[j][i] = ((temp + 0.055) / 1.055) ** 2.4
lumi[j] = 0.2126 * rgb_math[j][0] + 0.7152 * rgb_math[j][1] + 0.0722 * rgb_math[j][2]
if lumi[0] > lumi[1]:
self.contrast = (lumi[0] + 0.05) / (lumi[1] + 0.05)
else:
self.contrast = (lumi[1] + 0.05) / (lumi[0] + 0.05)
self.text.set("Contrast = %.2f" % self.contrast)
if self.contrast >= 7:
self.l11.config(bg="#008000")
else:
self.l11.config(bg="#FF0000")
def chooseColour(self, pick):
# Check if font colour or background color is selected. label == 0 -> font colour
if pick == "bg":
# Window to choose color
self.bgColour = askcolor()
# askcolor returns a list with the color rgb and hex codes [[rgb], hex]
if self.bgColour[0] is not None:
self.bgColour_rgb = list(self.bgColour[0])
# Change label background color
self.l10.config(bg=self.bgColour[1])
# Calculate contrast
contrastChecker(self, self.bgColour_rgb, self.ftColour_rgb)
else:
self.ftColour = askcolor()
if self.ftColour[0] is not None:
self.ftColour_rgb = list(self.ftColour[0])
self.l10.config(fg=self.ftColour[1])
contrastChecker(self, self.bgColour_rgb, self.ftColour_rgb)
def reset(self):
# Resets GUI to original state
self.parent.i = 1
for j in range(1, len(self.parent.files) + 1):
self.parent.img3open.close()
os.remove(self.parent.files[j - 1])
valueStateChanger(self, self.p_bar, 0)
widgets = [self.e1, self.parent.e2, self.e3, self.e4]
for wg in widgets:
wg.configure(state='normal')
wg.delete(0, END)
wg.configure(state='disabled')
self.b5.configure(state='disabled')
self.c1.current(1)
self.c2.current(1)
self.parent.var.set(0)
self.c1.configure(state='disabled')
self.c2.configure(state='disabled')
self.parent.img3open.close()
if hasattr(self.parent, "img4open"):
self.parent.img4open.close()
del self.parent.img4open
del self.parent.img4
self.parent.panel2.delete("IMG2")
del self.parent.img3open
del self.parent.img3
self.parent.panel.delete("IMG1")
self.parent.image_on_panel = self.parent.panel.create_image(0, 0, anchor='nw', image=self.parent.img1,
tags="IMG1")
self.parent.image_on_panel2 = self.parent.panel.create_image(0, 0, anchor='nw', image=self.parent.img2,
tags="IMG2")
self.update_idletasks()
def valueStateChanger(self, widget, value):
str_widget = str(widget)
if str_widget.find("entry") > -1:
widget.configure(state='normal')
widget.delete(0, END)
widget.insert(END, value)
widget.configure(state='disabled')
elif str_widget.find("combobox") > -1:
widget.current(value)
elif str_widget.find("progressbar") > -1:
widget['value'] = value
self.update_idletasks()
|
import core.utils as utils
def test_should_get_domain(db):
assert utils.get_domain() == 'http://example.com'
def test_should_get_domain_raise_error(db, mocker):
mock = mocker.patch.object(utils, 'Site')
mock.objects.get_current.side_effect = Exception
assert utils.get_domain() == ''
|
import pytest
from zoo.datacenters import utils as uut
@pytest.mark.parametrize(
"email,expected",
[
("jon.doe@kiwi.com", "Jon Doe"),
("platform@kiwi.com", "Platform"),
("something", "Something"),
],
)
def test_email_to_full_name(email, expected):
assert uut.email_to_full_name(email) == expected
|
# Generated by Django 3.0.2 on 2020-02-13 03:47
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AdminUpdate',
fields=[
('admin_id', models.AutoField(primary_key=True, serialize=False)),
('fullname', models.CharField(max_length=50)),
('email', models.CharField(max_length=100)),
('phone', models.CharField(max_length=15)),
('position', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Applying',
fields=[
('app_id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateField(default=datetime.datetime.now)),
('firstname', models.CharField(default='', max_length=50)),
('lastname', models.CharField(default='', max_length=50)),
('phone', models.CharField(default='', max_length=15)),
('email', models.EmailField(default='', max_length=50)),
('position', models.CharField(default='', max_length=50)),
('linkedin', models.CharField(default='', max_length=150)),
('github', models.CharField(default='', max_length=150)),
('portfolio', models.CharField(default='', max_length=150)),
('other', models.CharField(default='', max_length=150)),
('twitter', models.CharField(default='', max_length=150)),
('aspiration', models.CharField(default='', max_length=500)),
('skills', models.CharField(default='', max_length=500)),
('project', models.CharField(default='', max_length=500)),
('techstack', models.CharField(default='', max_length=500)),
('education', models.CharField(default='', max_length=500)),
('availablity', models.CharField(default='', max_length=500)),
('protfoliolink', models.CharField(default='', max_length=500)),
('opensourcecommit', models.CharField(default='', max_length=1000)),
('resume', models.CharField(default='', max_length=1000)),
],
),
migrations.CreateModel(
name='SelectIntern1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_id', models.IntegerField()),
('task1name', models.CharField(default='', max_length=500)),
('task1link', models.CharField(default='', max_length=1000)),
('applicantEmail', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='SelectIntern2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_id', models.IntegerField()),
('task2name', models.CharField(default='', max_length=500)),
('task2link', models.CharField(default='', max_length=1000)),
('applicantEmail', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='SelectIntern3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_id', models.IntegerField()),
('task3name', models.CharField(default='', max_length=500)),
('task3link', models.CharField(default='', max_length=1000)),
('applicantEmail', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='Solution1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('solution1', models.CharField(default='', max_length=500)),
('appid', models.CharField(default='', max_length=10)),
],
),
migrations.CreateModel(
name='Solution2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('solution2', models.CharField(default='', max_length=500)),
('appid', models.CharField(default='', max_length=10)),
],
),
migrations.CreateModel(
name='Solution3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('solution2', models.CharField(default='', max_length=500)),
('appid', models.CharField(default='', max_length=10)),
],
),
]
|
from cpselect import cpselect
|
# Generated by Django 3.2.7 on 2022-01-22 07:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20220122_1437'),
]
operations = [
migrations.AddField(
model_name='post',
name='is_editor_pick',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='post',
name='is_trending',
field=models.BooleanField(default=False),
),
]
|
"""
Credits:
Copyright (c) 2019-2022 Matej Aleksandrov, Matej Batič, Grega Milčinski, Domagoj Korais, Matic Lubej (Sinergise)
Copyright (c) 2019-2022 Žiga Lukšič, Devis Peressutti, Nejc Vesel, Jovan Višnjić, Anže Zupanc (Sinergise)
Copyright (c) 2019-2021 Beno Šircelj
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import datetime as dt
import os
import shutil
import dataclasses
from concurrent import futures
from typing import Optional, Any
import pytest
from pytest import approx
import numpy as np
from sentinelhub import BBox, CRS, DataCollection, SHConfig, Band, Unit
from eolearn.core import EOPatch, FeatureType, EOTask
from eolearn.io import (
SentinelHubDemTask,
SentinelHubEvalscriptTask,
SentinelHubInputTask,
SentinelHubSen2corTask,
get_available_timestamps,
)
@pytest.fixture(name="cache_folder")
def cache_folder_fixture():
test_dir = os.path.dirname(os.path.realpath(__file__))
cache_folder = os.path.join(test_dir, "cache_test")
if os.path.exists(cache_folder):
shutil.rmtree(cache_folder)
yield cache_folder
shutil.rmtree(cache_folder)
@dataclasses.dataclass
class IoTestCase:
name: str
task: EOTask
bbox: BBox
time_interval: tuple
feature: str = "BANDS"
feature_type: FeatureType = FeatureType.DATA
data_size: Optional[int] = None
timestamp_length: Optional[int] = None
stats: Any = None
def calculate_stats(array):
time, height, width, _ = array.shape
slices = [
array[int(time / 2) :, 0, 0, :],
array[: max(int(time / 2), 1), -1, -1, :],
array[:, int(height / 2), int(width / 2), :],
]
values = [(np.nanmean(slice) if not np.isnan(slice).all() else np.nan) for slice in slices]
return np.round(np.array(values), 4)
@pytest.mark.sh_integration
class TestProcessingIO:
"""Test cases for SentinelHubInputTask"""
size = (99, 101)
bbox = BBox(bbox=[268892, 4624365, 268892 + size[0] * 10, 4624365 + size[1] * 10], crs=CRS.UTM_33N)
time_interval = ("2017-12-15", "2017-12-30")
maxcc = 0.8
time_difference = dt.timedelta(minutes=60)
max_threads = 3
def test_s2l1c_float32_uint16(self, cache_folder):
task = SentinelHubInputTask(
bands_feature=(FeatureType.DATA, "BANDS"),
additional_data=[(FeatureType.MASK, "dataMask")],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L1C,
max_threads=self.max_threads,
cache_folder=cache_folder,
)
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
bands = eopatch[(FeatureType.DATA, "BANDS")]
is_data = eopatch[(FeatureType.MASK, "dataMask")]
assert calculate_stats(bands) == approx([0.0233, 0.0468, 0.0252])
width, height = self.size
assert bands.shape == (4, height, width, 13)
assert is_data.shape == (4, height, width, 1)
assert len(eopatch.timestamp) == 4
assert bands.dtype == np.float32
assert os.path.exists(cache_folder)
# change task's bans_dtype and run it again
task.bands_dtype = np.uint16
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
bands = eopatch[(FeatureType.DATA, "BANDS")]
assert calculate_stats(bands) == approx([232.5769, 467.5385, 251.8654])
assert bands.dtype == np.uint16
def test_specific_bands(self):
task = SentinelHubInputTask(
bands_feature=(FeatureType.DATA, "BANDS"),
bands=["B01", "B02", "B03"],
additional_data=[(FeatureType.MASK, "dataMask")],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L1C,
max_threads=self.max_threads,
)
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
bands = eopatch[(FeatureType.DATA, "BANDS")]
assert calculate_stats(bands) == approx([0.0648, 0.1193, 0.063])
width, height = self.size
assert bands.shape == (4, height, width, 3)
def test_scl_only(self):
"""Download just SCL, without any other bands"""
task = SentinelHubInputTask(
bands_feature=None,
additional_data=[(FeatureType.DATA, "SCL")],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L2A,
max_threads=self.max_threads,
)
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
scl = eopatch[(FeatureType.DATA, "SCL")]
width, height = self.size
assert scl.shape == (4, height, width, 1)
def test_single_scene(self):
"""Download S2L1C bands and dataMask"""
task = SentinelHubInputTask(
bands_feature=(FeatureType.DATA, "BANDS"),
additional_data=[(FeatureType.MASK, "dataMask")],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L1C,
max_threads=self.max_threads,
single_scene=True,
mosaicking_order="leastCC",
)
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
bands = eopatch[(FeatureType.DATA, "BANDS")]
is_data = eopatch[(FeatureType.MASK, "dataMask")]
width, height = self.size
assert bands.shape == (1, height, width, 13)
assert is_data.shape == (1, height, width, 1)
assert len(eopatch.timestamp) == 1
def test_additional_data(self):
task = SentinelHubInputTask(
bands_feature=(FeatureType.DATA, "BANDS"),
bands=["B01", "B02", "B05"],
additional_data=[
(FeatureType.MASK, "dataMask", "IS_DATA"),
(FeatureType.MASK, "CLM"),
(FeatureType.MASK, "SCL"),
(FeatureType.MASK, "SNW"),
(FeatureType.MASK, "CLD"),
(FeatureType.DATA, "CLP"),
(FeatureType.DATA, "viewAzimuthMean", "view_azimuth_mean"),
(FeatureType.DATA, "sunAzimuthAngles"),
(FeatureType.DATA, "sunZenithAngles"),
],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L2A,
max_threads=self.max_threads,
)
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
bands = eopatch[(FeatureType.DATA, "BANDS")]
is_data = eopatch[(FeatureType.MASK, "IS_DATA")]
clm = eopatch[(FeatureType.MASK, "CLM")]
scl = eopatch[(FeatureType.MASK, "SCL")]
snw = eopatch[(FeatureType.MASK, "SNW")]
cld = eopatch[(FeatureType.MASK, "CLD")]
clp = eopatch[(FeatureType.DATA, "CLP")]
view_azimuth_mean = eopatch[(FeatureType.DATA, "view_azimuth_mean")]
sun_azimuth_angles = eopatch[(FeatureType.DATA, "sunAzimuthAngles")]
sun_zenith_angles = eopatch[(FeatureType.DATA, "sunZenithAngles")]
assert calculate_stats(bands) == approx([0.027, 0.0243, 0.0162])
width, height = self.size
assert bands.shape == (4, height, width, 3)
assert is_data.shape == (4, height, width, 1)
assert is_data.dtype == bool
assert clm.shape == (4, height, width, 1)
assert clm.dtype == np.uint8
assert scl.shape == (4, height, width, 1)
assert snw.shape == (4, height, width, 1)
assert cld.shape == (4, height, width, 1)
assert clp.shape == (4, height, width, 1)
assert view_azimuth_mean.shape == (4, height, width, 1)
assert sun_azimuth_angles.shape == (4, height, width, 1)
assert sun_zenith_angles.shape == (4, height, width, 1)
assert len(eopatch.timestamp) == 4
def test_aux_request_args(self):
"""Download low resolution data with `PREVIEW` mode"""
task = SentinelHubInputTask(
bands_feature=(FeatureType.DATA, "BANDS"),
resolution=260,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L1C,
max_threads=self.max_threads,
aux_request_args={"dataFilter": {"previewMode": "PREVIEW"}},
)
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
bands = eopatch[(FeatureType.DATA, "BANDS")]
assert bands.shape == (4, 4, 4, 13)
assert calculate_stats(bands) == approx([0.0, 0.0493, 0.0277])
def test_dem(self):
task = SentinelHubDemTask(resolution=10, feature=(FeatureType.DATA_TIMELESS, "DEM"), max_threads=3)
eopatch = task.execute(bbox=self.bbox)
dem = eopatch.data_timeless["DEM"]
width, height = self.size
assert dem.shape == (height, width, 1)
def test_dem_cop(self):
task = SentinelHubDemTask(
data_collection=DataCollection.DEM_COPERNICUS_30,
resolution=10,
feature=(FeatureType.DATA_TIMELESS, "DEM_30"),
max_threads=3,
)
eopatch = task.execute(bbox=self.bbox)
dem = eopatch.data_timeless["DEM_30"]
width, height = self.size
assert dem.shape == (height, width, 1)
def test_dem_wrong_feature(self):
with pytest.raises(ValueError):
SentinelHubDemTask(resolution=10, feature=(FeatureType.DATA, "DEM"), max_threads=3)
def test_sen2cor(self):
task = SentinelHubSen2corTask(
sen2cor_classification=["SCL", "CLD"],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L2A,
max_threads=self.max_threads,
)
eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)
scl = eopatch[(FeatureType.MASK, "SCL")]
cld = eopatch[(FeatureType.DATA, "CLD")]
width, height = self.size
assert scl.shape == (4, height, width, 1)
assert cld.shape == (4, height, width, 1)
def test_metadata(self):
evalscript = """
//VERSION=3
function setup() {
return {
input: [{
bands:["B02","dataMask"],
units: "DN"
}],
output:[
{
id:'bands',
bands: 1,
sampleType: SampleType.UINT16
}
]
}
}
function updateOutputMetadata(scenes, inputMetadata, outputMetadata) {
outputMetadata.userData = { "metadata": JSON.stringify(scenes) }
}
function evaluatePixel(sample) {
return {
'bands': [sample.B02]
};
}
"""
task = SentinelHubEvalscriptTask(
evalscript=evalscript,
data_collection=DataCollection.SENTINEL2_L1C,
features=[(FeatureType.DATA, "bands"), (FeatureType.META_INFO, "meta_info")],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
max_threads=self.max_threads,
)
eop = task.execute(bbox=self.bbox, time_interval=self.time_interval)
width, height = self.size
assert eop.data["bands"].shape == (4, height, width, 1)
assert len(eop.meta_info["meta_info"]) == 4
def test_multi_processing(self):
task = SentinelHubInputTask(
bands_feature=(FeatureType.DATA, "BANDS"),
bands=["B01", "B02", "B03"],
additional_data=[(FeatureType.MASK, "dataMask")],
size=self.size,
maxcc=self.maxcc,
time_difference=self.time_difference,
data_collection=DataCollection.SENTINEL2_L1C,
max_threads=self.max_threads,
)
time_intervals = [
("2017-01-01", "2017-01-30"),
("2017-02-01", "2017-02-28"),
("2017-03-01", "2017-03-30"),
("2017-04-01", "2017-04-30"),
("2017-05-01", "2017-05-30"),
("2017-06-01", "2017-06-30"),
]
with futures.ProcessPoolExecutor(max_workers=3) as executor:
tasks = [executor.submit(task.execute, None, self.bbox, interval) for interval in time_intervals]
eopatches = [task.result() for task in futures.as_completed(tasks)]
array = np.concatenate([eop.data["BANDS"] for eop in eopatches], axis=0)
width, height = self.size
assert array.shape == (20, height, width, 3)
def test_get_available_timestamps_with_missing_data_collection_service_url(self):
collection = DataCollection.SENTINEL2_L1C.define_from("COLLECTION_WITHOUT_URL", service_url=None)
timestamps = get_available_timestamps(
bbox=self.bbox,
config=SHConfig(),
data_collection=collection,
time_difference=self.time_difference,
time_interval=self.time_interval,
maxcc=self.maxcc,
)
assert len(timestamps) == 4
assert all(timestamp.tzinfo is not None for timestamp in timestamps)
def test_no_data_input_task_request(self):
task = SentinelHubInputTask(
bands_feature=(FeatureType.DATA, "BANDS"),
additional_data=[(FeatureType.MASK, "dataMask")],
size=self.size,
maxcc=0.0,
data_collection=DataCollection.SENTINEL2_L1C,
)
eopatch = task.execute(bbox=self.bbox, time_interval=("2021-01-01", "2021-01-20"))
bands = eopatch[FeatureType.DATA, "BANDS"]
assert bands.shape == (0, 101, 99, 13)
masks = eopatch[FeatureType.MASK, "dataMask"]
assert masks.shape == (0, 101, 99, 1)
def test_no_data_evalscript_task_request(self):
evalscript = """
//VERSION=3
function setup() {
return {
input: [{
bands:["B02", "dataMask"],
units: "DN"
}],
output:[
{
id:'bands',
bands: 2,
sampleType: SampleType.UINT16
},
{
id:'mask',
bands: 1,
sampleType: SampleType.UINT8
}
]
}
}
function evaluatePixel(sample) {
return {
'bands': [sample.B02, sample.B02],
'mask': [sample.dataMask]
};
}
"""
task = SentinelHubEvalscriptTask(
evalscript=evalscript,
data_collection=DataCollection.SENTINEL2_L1C,
features=[(FeatureType.DATA, "bands"), (FeatureType.MASK, "mask")],
size=self.size,
maxcc=0.0,
)
eopatch = task.execute(bbox=self.bbox, time_interval=("2021-01-01", "2021-01-20"))
bands = eopatch[FeatureType.DATA, "bands"]
assert bands.shape == (0, 101, 99, 2)
masks = eopatch[FeatureType.MASK, "mask"]
assert masks.shape == (0, 101, 99, 1)
@pytest.mark.sh_integration
class TestSentinelHubInputTaskDataCollections:
"""Integration tests for all supported data collections"""
bbox = BBox(bbox=(-5.05, 48.0, -5.00, 48.05), crs=CRS.WGS84)
bbox2 = BBox(bbox=(-72.2, -70.4, -71.6, -70.2), crs=CRS.WGS84)
size = (50, 40)
time_interval = ("2020-06-1", "2020-06-10")
time_difference = dt.timedelta(minutes=60)
data_feature = FeatureType.DATA, "BANDS"
mask_feature = FeatureType.MASK, "dataMask"
s3slstr_500m = DataCollection.SENTINEL3_SLSTR.define_from(
"SENTINEL3_SLSTR_500m",
bands=(
Band("S2", (Unit.REFLECTANCE,), (np.float32,)),
Band("S3", (Unit.REFLECTANCE,), (np.float32,)),
Band("S6", (Unit.REFLECTANCE,), (np.float32,)),
),
)
s5p_co = DataCollection.SENTINEL5P.define_from("SENTINEL5P_CO", bands=(Band("CO", (Unit.DN,), (np.float32,)),))
ndvi_evalscript = """
//VERSION=3
function setup() {
return {
input: [{
bands: ["B04", "B08", "dataMask"],
units: ["REFLECTANCE", "REFLECTANCE", "DN"]
}],
output: [
{ id:"ndvi", bands:1, sampleType: SampleType.FLOAT32 },
{ id:"dataMask", bands:1, sampleType: SampleType.UINT8 }
]
}
}
function evaluatePixel(sample) {
return {
ndvi: [index(sample.B08, sample.B04)],
dataMask: [sample.dataMask]};
}
"""
test_cases = [
IoTestCase(
name="Sentinel-2 L2A",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=DataCollection.SENTINEL2_L2A,
),
bbox=bbox,
time_interval=time_interval,
data_size=12,
timestamp_length=2,
stats=[0.4676, 0.6313, 0.7688],
),
IoTestCase(
name="Sentinel-2 L2A - NDVI evalscript",
task=SentinelHubEvalscriptTask(
features={
FeatureType.DATA: [("ndvi", "NDVI-FEATURE")],
FeatureType.MASK: ["dataMask"],
},
evalscript=ndvi_evalscript,
size=size,
time_difference=time_difference,
data_collection=DataCollection.SENTINEL2_L2A,
),
feature="NDVI-FEATURE",
bbox=bbox,
time_interval=time_interval,
data_size=1,
timestamp_length=2,
stats=[0.0088, 0.0083, 0.0008],
),
IoTestCase(
name="Landsat8",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=DataCollection.LANDSAT_OT_L1,
),
bbox=bbox,
time_interval=time_interval,
data_size=11,
timestamp_length=1,
stats=[48.7592, 48.726, 48.9168],
),
IoTestCase(
name="MODIS",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=DataCollection.MODIS,
),
bbox=bbox,
time_interval=time_interval,
data_size=7,
timestamp_length=10,
stats=[0.0073, 0.0101, 0.1448],
),
IoTestCase(
name="Sentinel-1 IW",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=DataCollection.SENTINEL1_IW,
),
bbox=bbox,
time_interval=time_interval,
data_size=2,
timestamp_length=5,
stats=[0.016, 0.0022, 0.0087],
),
IoTestCase(
name="Sentinel-1 IW ASCENDING",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=DataCollection.SENTINEL1_IW_ASC,
),
bbox=bbox,
time_interval=time_interval,
data_size=2,
timestamp_length=1,
stats=[0.0407, 0.0206, 0.0216],
),
IoTestCase(
name="Sentinel-1 EW DESCENDING",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=DataCollection.SENTINEL1_EW_DES,
),
bbox=bbox2,
time_interval=time_interval,
data_size=2,
timestamp_length=1,
stats=[np.nan, 0.1944, 0.3800],
),
IoTestCase(
name="Sentinel-3 OLCI",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=DataCollection.SENTINEL3_OLCI,
),
bbox=bbox,
time_interval=time_interval,
data_size=21,
timestamp_length=11,
stats=[0.317, 0.1946, 0.2884],
),
IoTestCase(
name="Sentinel-3 SLSTR 500m resolution",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=s3slstr_500m,
),
bbox=bbox,
time_interval=("2021-02-10", "2021-02-15"),
data_size=3,
timestamp_length=13,
stats=[0.3173, 0.4804, 0.4041],
),
IoTestCase(
name="Sentinel-5P",
task=SentinelHubInputTask(
bands_feature=data_feature,
additional_data=[mask_feature],
size=size,
time_difference=time_difference,
data_collection=s5p_co,
),
bbox=bbox,
time_interval=("2020-06-1", "2020-06-1"),
data_size=1,
timestamp_length=1,
stats=[0.0351, 0.034, 0.0351],
),
]
@pytest.mark.parametrize("test_case", test_cases)
def test_data_collections(self, test_case):
eopatch = test_case.task.execute(bbox=test_case.bbox, time_interval=test_case.time_interval)
assert isinstance(eopatch, EOPatch), "Expected return type is EOPatch"
width, height = self.size
data = eopatch[(test_case.feature_type, test_case.feature)]
assert data.shape == (test_case.timestamp_length, height, width, test_case.data_size)
timestamps = eopatch.timestamp
assert all(timestamp.tzinfo is None for timestamp in timestamps), f"`tzinfo` present in timestamps {timestamps}"
assert len(timestamps) == test_case.timestamp_length
stats = calculate_stats(data)
assert stats == approx(test_case.stats, nan_ok=True), f"Expected stats {test_case.stats}, got {stats}"
|
"""
Loading MNIST data
"""
from mnist import MNIST
import numpy as np
def MNIST_load(N):
mndata = MNIST('samples')
y,z = mndata.load_training()
z = np.array(z[0:N])
y = np.array(y[0:N])/256
D = np.shape(y)[1]
zvalues = np.arange(0,10)
K = 10
gmm_data= {'y': y, 'z': z, 'N':N,'K': K,'D':D, 'zvals': zvalues}
return gmm_data
def MNIST_test_load(N):
mndata = MNIST('samples')
y,z = mndata.load_testing()
z = np.array(z[0:N])
y = np.array(y[0:N])/256
D = np.shape(y)[1]
zvalues = np.arange(0,10)
K = 10
gmm_data= {'y': y, 'z': z, 'N':N,'K': K,'D':D, 'zvals': zvalues}
return gmm_data
|
"""
Revision ID: 0143_remove_reply_to
Revises: 0142_validate_constraint
Create Date: 2017-11-21 10:42:25.045444
"""
import sqlalchemy as sa
from alembic import op
revision = "0143_remove_reply_to"
down_revision = "0142_validate_constraint"
def upgrade():
op.drop_column("services", "letter_contact_block")
op.drop_column("services", "reply_to_email_address")
op.drop_column("services_history", "letter_contact_block")
op.drop_column("services_history", "reply_to_email_address")
def downgrade():
op.add_column(
"services_history",
sa.Column("reply_to_email_address", sa.TEXT(), autoincrement=False, nullable=True),
)
op.add_column(
"services_history",
sa.Column("letter_contact_block", sa.TEXT(), autoincrement=False, nullable=True),
)
op.add_column(
"services",
sa.Column("reply_to_email_address", sa.TEXT(), autoincrement=False, nullable=True),
)
op.add_column(
"services",
sa.Column("letter_contact_block", sa.TEXT(), autoincrement=False, nullable=True),
)
|
import numpy as np
import timeit
def Buffering_Array(Thresholded_Hologram,Initial_Hologram,Pixel_Size,Buffer_Size,Begin_Time,time_slices):
buffer_elements_x = np.int(np.floor(Buffer_Size[0]/Pixel_Size[0]))
buffer_elements_y = np.int(np.floor(Buffer_Size[1]/Pixel_Size[1]))
buffer_elements_z = np.int(np.floor(Buffer_Size[2]/Pixel_Size[2]))
threshold_array_time_slice_region_of_interest = np.copy(Thresholded_Hologram)
Thresholded_Hologram = []
buffered_array = np.zeros(Initial_Hologram.shape,'<f4')
threshold_array_time_slice_inside_buffer = np.zeros(Initial_Hologram.shape).astype(int)
threshold_array_time_slice_inside_buffer[buffer_elements_z:Initial_Hologram.shape[0]-buffer_elements_z,buffer_elements_y:Initial_Hologram.shape[1]-buffer_elements_y,buffer_elements_x:Initial_Hologram.shape[2]-buffer_elements_x] = 1
threshold_array_time_slice_region_of_interest[threshold_array_time_slice_inside_buffer != 1] = 0
threshold_array_time_slice_non_zero = np.argwhere(threshold_array_time_slice_region_of_interest!=0)
print(f'[Time Slice #{time_slices+1}] Buffering the array ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
timer_check = 0
timer_check_check = 0
for non_zero in range(len(threshold_array_time_slice_non_zero[:,0])):
if non_zero >0:
if non_zero%(np.floor(0.25*len(threshold_array_time_slice_non_zero[:,0])))==0:
if timer_check == 0:
#print(f'{np.round((np.floor(0.25*len(threshold_array_time_slice_non_zero[:,0]))/non_zero,1))*25} percent done buffering')
timer_check = 1
if timer_check == 1:
if non_zero%(np.floor(0.25*len(threshold_array_time_slice_non_zero[:,0])))!=0:
timer_check = 0
buffered_array[threshold_array_time_slice_non_zero[:,0][non_zero]-buffer_elements_z:threshold_array_time_slice_non_zero[:,0][non_zero]+buffer_elements_z+1,threshold_array_time_slice_non_zero[:,1][non_zero]-buffer_elements_y:threshold_array_time_slice_non_zero[:,1][non_zero]+buffer_elements_y+1,threshold_array_time_slice_non_zero[:,2][non_zero]-buffer_elements_x:threshold_array_time_slice_non_zero[:,2][non_zero]+buffer_elements_x+1] = Initial_Hologram[threshold_array_time_slice_non_zero[:,0][non_zero]-buffer_elements_z:threshold_array_time_slice_non_zero[:,0][non_zero]+buffer_elements_z+1,threshold_array_time_slice_non_zero[:,1][non_zero]-buffer_elements_y:threshold_array_time_slice_non_zero[:,1][non_zero]+buffer_elements_y+1,threshold_array_time_slice_non_zero[:,2][non_zero]-buffer_elements_x:threshold_array_time_slice_non_zero[:,2][non_zero]+buffer_elements_x+1]
print(f'[Time Slice #{time_slices+1}] Done buffering the array ({np.round(timeit.default_timer() - Begin_Time,3)} seconds)')
return buffered_array
'''
Thresholding - 2046 (Deriv taken)
Initial - 2048
'''
|
'''
Brain Fuck Interpreter Python Edition
Python version should greater than Python 3.10.0
Author: Mux
Copy Right: Mux, 2021
'''
try:
import sys
if sys.version_info.major >= 10:
print("Python version should greater than 3.10")
exit(-1)
except ImportError as ierr:
print("Python do not install correctly, reinstall it and run this again.")
exit(-1)
import os
import re
__all__ = [
"BrainFuckInterpreter"
]
import sys
IGNORE_CHARACTER = {
'\n', '\t', ' ', '\0', '\r'
}
SYNTAX = {
'+': "Increase byte at the data pointer by one.",
'-': "Decrease byte at the data pointer by one.",
'.': "Output the byte at the data pointer.",
',': "Accept one byte of input, storing its value in the byte at the data pointer.",
'<': "Move the data pointer to point to the next cell to the left.",
'>': "Move the data pointer to point to the next cell to the right.",
'[': "Jump to the next instruction of the matching ] if the current data pointer is pointing to 0",
']': "Jump to the next instruction of the matching [, if the data pointed to by the current data pointer is not 0",
# Non-standard feature, provided and implemented by Mux.
# For simplifying the debug process of brainfuck programs
';': "Display the entire run stack",
':': "Display the current position of the data pointer",
'%': "Display the contents of the runtime stack from the start to the data pointer position",
'#': "Comment"
}
class SourceScanner:
def __init__(self, source_input: str):
if not os.path.exists(source_input):
raise IOError(f"Source file {source_input} is not exist. use --help to see document")
if not source_input.endswith(".bf"):
raise IOError(f"Source file{source_input} is not a valid brainfuck-script use --help to see document")
with open(source_input, "r") as f:
source = f.read()
# To skip comment, only allowed single line.
source = re.sub(r'(?m)^ *#.*\n?', '', source)
self.processed_ = ""
for index, char in enumerate(source):
if char in IGNORE_CHARACTER:
continue
if char not in SYNTAX.keys():
raise SyntaxError(f"The {index} character(value '{char}') is not a valid instruction")
self.processed_ += char
# loop check:
stk = []
for char in self.processed_:
if char == '[':
stk.append('[')
elif char == ']':
if stk.pop() != '[':
raise SyntaxError("Unclosed loop")
if len(stk) != 0:
raise SyntaxError("Unclosed loop")
def get_instructions(self):
return self.processed_
class Executor:
def __init__(self, instructions: str, runtime_stk_size=300):
self.instructions_ = instructions
self._runtime_stk = [0] * runtime_stk_size
# build jump map
self.lr_map = dict()
self.rl_map = dict()
stk = []
for index, ins in enumerate(instructions):
if ins == '[':
stk.append(index)
if (ins == ']'):
self.rl_map[index] = stk.pop()
for k, v in self.rl_map.items():
self.lr_map[v] = k
def show_rt_stk(self, i, j):
# print("".join([f"\n{index}:[{content}], " if index % 10 == 0 and index > 9 else f"{index}:[{content}], "
# for index, content in enumerate(self._runtime_stk)]))
print("".join([
f"\n{idx}:[{self._runtime_stk[idx]}], " if idx % 10 == 0 and idx > 9 else f"{idx}:[{self._runtime_stk[idx]}], "
for idx in range(i, j)]))
def execute(self) -> int:
dp, ins_position = 0, 0
while ins_position < len(self.instructions_):
ins = self.instructions_[ins_position]
match ins:
case '+':
self._runtime_stk[dp] += 1
case '-':
self._runtime_stk[dp] -= 1
case '<':
if dp == 0:
raise IndexError("Cannot move data pointer any more.")
dp -= 1
case '>':
if dp == len(self._runtime_stk):
raise IndexError("Cannot move data pointer any more")
dp += 1
case '[':
ins_position = self.lr_map[ins_position] if self._runtime_stk[dp] == 0 else ins_position
case ']':
ins_position = self.rl_map[ins_position] if self._runtime_stk[dp] != 0 else ins_position
case ',':
self._runtime_stk[dp] = ord(sys.stdin.read(1))
case '.':
sys.stdout.write(chr(self._runtime_stk[dp]))
case ';':
self.show_rt_stk(0, len(self._runtime_stk))
case ':':
print(dp, end='')
case '%':
self.show_rt_stk(0, dp)
ins_position += 1
return 0
class BrainFuckInterpreter:
def __call__(self, path: str, runtime_stk_size=300, *args, **kwargs):
return Executor(SourceScanner(path).get_instructions(), runtime_stk_size).execute()
if __name__ == "__main__":
try:
exit(BrainFuckInterpreter()(sys.argv[1]))
except IndexError as idxerr:
print("No file was inputted.")
print("usage: python BrainFuck.py [Brain Fuck Module].bf")
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import ezgal
class weight(object):
""" ezgal.weight class
Used to apply weights to EzGal model objects through multiplication """
weight = 1
ezgal_type = ''
def __init__(self, weight):
self.weight = float(weight)
self.ezgal_type = type(ezgal.ezgal(skip_load=True))
def __mul__(self, obj):
if type(obj) == type(self):
return weight(self.weight * obj.weight)
elif type(obj) == self.ezgal_type:
return obj.weight(self.weight)
else:
return weight(self.weight * obj)
def __imul__(self, obj):
if type(obj) == type(self):
self.weight *= obj.weight
elif type(obj) == self.ezgal_type:
self.weight *= obj.model_weight
else:
self.weight *= obj
return self
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from libica.openapi.libtes.api.task_runs_api import TaskRunsApi
from libica.openapi.libtes.api.task_versions_api import TaskVersionsApi
from libica.openapi.libtes.api.tasks_api import TasksApi
|
from clld.db.meta import DBSession
from clld.db.models.common import Source
from clldutils.misc import slug
from dogonlanguages.models import Document
def bangime(req):
docs = {
'memorial': 'eldersmemorialcall07',
'bangerimevocabulaire': 'bangerimevocabulaire',
'bangerimephrases': 'bangerimephrases',
'bangerimepres': 'elders2006',
'blacksmith': 'blacksmithvocabulary',
}
return {
'docs': {k: Source.get(sid) for k, sid in docs.items()}
}
def florafauna(req):
note_ids = [
'fish_notes_Mali_JH',
'flora_notes_Mali_JH',
'insect_arthropod_mollusc_notes_Mali_JH',
'mammal_notes_Mali_JH',
'reptile_notes_Mali_JH',
'bird_notes_Mali_JH',
]
return {
'notes': [Source.get(slug(sid)) for sid in note_ids]
}
def typology(req):
docids = [
"dogonnominalclasses",
"dogonatrharmony",
"dogonexistentialparticleyv",
"dogonidentificationalitisx",
"dogonmediopassiveandcausative",
"dogonadpositions",
"dogoncasemarking",
"dogondoubleheadedrelativeclauses",
"dogondynamicandstativeverbs",
"dogonfocalization",
"dogonimperativeandhortativeverbs",
"dogonlexicaltonesofverbsandotherstemclasses",
"dogonlogophorics",
"dogonnasalizedsonorantsandnasalizationspreading",
"dogonrelativeclauses",
"dogonreversiveverbs",
"dogonsyntactictonologyofnp",
"dogonverbserialization",
"dogonvowelsymbolism",
]
return {'docs': {d.id: d
for d in DBSession.query(Document).filter(Document.id.in_(docids))}}
def other(req):
jenaama = 'Heath2016-Jenaama-lexicon Heath2016-JenaamaBozo'.split()
rows = [
["Tieyaxo", "Tigemaxo", "boz", "tiey1235"],
["Tiema Cewe", "Tiema Ce", "boo", "tiem1235"],
["Kelenga", "Hainyaxo", "bsx", "hain1253"],
["Jenaama", "Sorogaana", "bze", "jena1242"],
]
return {
'rows': rows,
'jenaama': [Source.get(slug(sid)) for sid in jenaama]
}
|
#!/bin/python3
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
""" The bittensor base validator
Example:
$ python3 miners/text/core_validator.py --logging.debug
"""
import sys
import argparse
import time
from types import SimpleNamespace
import bittensor
import torch
import os
import wandb
import math
import pandas
import traceback
from rich import print
from rich.console import Console
from rich.traceback import install
from ..neuron_utilities import joining_context, partial_contexts, ThreadQueue
import torch.nn as nn
import random
from torch.nn.utils import clip_grad_norm_
import torch.nn.functional as F
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from loguru import logger
import cProfile
from threading import Lock
logger = logger.opt( colors=True )
console = Console()
install(show_locals=True)
class neuron:
r"""
Creates a bittensor neuron that specializes validating other peers. The core validator
finetunes on the bittensor network with a mixture of experts model and shapely scoring.
The validator's main jobs are to identify important/useful peers in the network and correctly
weight them. To achieve this, the validator will send requests to different peers on the network
and evalute their responses.
Args:
config (:obj:`bittensor.Config`, `optional`):
bittensor.server.config()
subtensor (:obj:bittensor.subtensor , `optional`):
bittensor subtensor connection
dataset (:obj:bittensor.dataset , `optional`):
bittensor dataset
wallet (:obj:bittensor.wallet, `optional`):
bittensor wallet object
metagraph (:obj:bittensor.metagraph, `optional`):
bittensor metagraph object
dendrite (:obj:bittensor.dendrite, `optional`):
bittensor dendrite object
dataset (:obj:bittensor.dendrite, `optional`):
bittensor dendrite object
Examples::
>>> subtensor = bittensor.subtensor(network='nakamoto')
>>> validator = bittensor.neuron.text.core_validator.neuron(subtensor=subtensor)
>>> validator.run()
"""
def __init__(
self,
config: 'bittensor.Config' = None,
wallet: 'bittensor.Wallet' = None,
subtensor: 'bittensor.Subtensor' = None,
metagraph: 'bittensor.Metagraph' = None,
dendrite: 'bittensor.Dendrite' = None,
dataset: 'bittensor.dataset' = None
):
# === Set up Config ===
if config == None: config = neuron.config()
self.config = config
neuron.check_config( self.config )
self.config.to_defaults()
if self.config.neuron._mock == True:
self.config.subtensor._mock = True
self.config.wallet._mock = True
self.config.dataset._mock = True
self.config.dendrite._mock = True
self.config.metagraph._mock = True
self.config.subtensor._mock = True
print ( self.config )
# === Create Bittensor objects ===
bittensor.logging( config = self.config, logging_dir = self.config.neuron.full_path )
self.wallet = bittensor.wallet ( config = self.config ) if wallet == None else wallet
self.subtensor = bittensor.subtensor ( config = self.config ) if subtensor == None else subtensor
self.metagraph = bittensor.metagraph ( config = config, subtensor = self.subtensor ) if metagraph == None else metagraph
self.dendrite = bittensor.dendrite ( config = self.config, wallet = self.wallet ) if dendrite == None else dendrite
self.device = torch.device ( device = self.config.neuron.device )
self.nucleus = nucleus ( config = self.config, device = self.device, subtensor = self.subtensor ).to( self.device )
self.dataset = bittensor.dataset ( config = self.config, batch_size = self.subtensor.validator_batch_size, block_size = self.subtensor.validator_sequence_length ) if dataset == None else dataset
# === Create thread queue ===
self.forward_thread_queue = ThreadQueue(num_jobs = self.config.neuron.forward_num, target = self.forward)
self.loss = None
self.loss_agg_mutex = Lock()
self.moving_avg_scores = None
@classmethod
def check_config( cls, config: 'bittensor.Config' ):
r""" Checks/validates the config namespace object.
"""
nucleus.check_config( config )
bittensor.logging.check_config( config )
bittensor.wallet.check_config( config )
bittensor.subtensor.check_config( config )
bittensor.metagraph.check_config( config )
bittensor.dataset.check_config( config )
bittensor.dendrite.check_config( config )
bittensor.wandb.check_config( config )
full_path = os.path.expanduser('{}/{}/{}/{}'.format( config.logging.logging_dir, config.wallet.name, config.wallet.hotkey, config.neuron.name ))
config.neuron.full_path = os.path.expanduser(full_path)
config.using_wandb = config.wandb.api_key != 'default'
if not os.path.exists(config.neuron.full_path):
os.makedirs(config.neuron.full_path)
@classmethod
def add_args( cls, parser ):
parser.add_argument('--neuron.name', type=str, help='Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ', default='core_validator')
parser.add_argument('--neuron.learning_rate', type=float, help='Training initial learning rate.', default=0.1 )
parser.add_argument('--neuron.momentum', type=float, help='optimizer momentum.', default=0.8 )
parser.add_argument('--neuron.blocks_per_epoch', type=int, help='Blocks per epoch, -1 value means we use the chain value.', default = -1 )
parser.add_argument('--neuron.epochs_until_reset', type=int, help='Number of epochs before weights are reset.', default = -1 )
parser.add_argument('--neuron.device', type=str, help='miner default training device cpu/cuda', default=("cuda" if torch.cuda.is_available() else "cpu"))
parser.add_argument('--neuron.clip_gradients', type=float, help='Implement gradient clipping to avoid exploding loss on smaller architectures.', default=1.0 )
parser.add_argument('--neuron.restart_on_failure', action='store_true', help='''Restart neuron on unknown error.''', default=True )
parser.add_argument('--neuron._mock', action='store_true', help='To turn on neuron mocking for testing purposes.', default=False )
parser.add_argument('--neuron.wait_for_finalization', action='store_true', help='''when setting weights the miner waits for trnasaction finalization.''', default=False)
parser.add_argument('--neuron.forward_num', type=int, help='''How much forward request before a backward call.''', default=3)
@classmethod
def config ( cls ):
parser = argparse.ArgumentParser()
cls.add_args( parser )
nucleus.add_args( parser )
bittensor.wallet.add_args( parser )
bittensor.dendrite.add_args( parser )
bittensor.subtensor.add_args( parser )
bittensor.metagraph.add_args( parser )
bittensor.logging.add_args( parser )
bittensor.dataset.add_args( parser )
bittensor.wandb.add_args(parser)
return bittensor.config( parser )
def __del__(self):
self.__exit__()
def __exit__ ( self, exc_type, exc_value, exc_traceback ):
r""" Close down neuron.
"""
print(exc_type, exc_value, exc_traceback)
self.dataset.close()
self.dendrite.__del__()
self.forward_thread_queue.stop()
self.forward_thread_queue.join()
def __enter__(self):
r""" Sanity checks and begin validator.
"""
# === Wallet ===
# Connects wallett to network.
# NOTE: This registration step should likely be solved offline first.
self.wallet.create().register( subtensor = self.subtensor )
# === UID ===
# Get our uid from the chain.
# At this point we should have a uid because we are already registered.
self.uid = self.wallet.get_uid( subtensor = self.subtensor )
# === Monitoring ===
# Optionally set up wandb logging.
if self.config.using_wandb:
bittensor.wandb(
config = self.config,
cold_pubkey = self.wallet.coldkeypub.ss58_address,
hot_pubkey = self.wallet.hotkey.ss58_address,
root_dir = self.config.neuron.full_path
)
def forward(self):
r""" Run the nucleus forward request
This function is supposed to be ran multi-threaded.
"""
result = self.nucleus( next(self.dataset) , self.metagraph, self.dendrite )
# === Backward ===
# Backwards gradients through model to train gating and remote endpoints.
(result.loss / self.config.neuron.forward_num).backward()
return result
def run ( self ):
r""" Run the validator and terminate on Keyboard interrupt.
"""
# === Setup ===
# Checks wallet and starts monitoring with wandb.
with self:
# === Start forward requests ===
self.metagraph_sync()
self.forward_thread_queue.start()
# === Run ===
# Iterates through epochs.
self.epoch = 0
self.global_step = 0
while True:
try:
# === Epoch ===
# Each epoch runs for blocks_per_epoch and resets
# the model every epochs_until_reset.
self.run_epoch()
# === Stops on interrupt otherwise restarts ===
except KeyboardInterrupt:
break
except Exception as e:
console.print_exception(show_locals=False)
print( traceback.format_exc() )
print( 'Unknown exception: {}', e )
if not self.config.neuron.restart_on_failure:
break
def run_epoch( self ):
r""" Runs a validator epoch. We apply batches until the epoch length is exhausted.
Occasionally the validator nucleus is completely reset to ensure we dont converge to far.
At the end of the epoch we set weights on the chain and optionally log to wandb.
"""
# === Get params for epoch ===
# Pulling the latest chain parameters.
current_block = self.subtensor.block
batch_size = self.subtensor.validator_batch_size
sequence_length = self.subtensor.validator_sequence_length
n_topk_peer_weights = self.subtensor.min_allowed_weights
max_allowed_ratio = self.subtensor.max_allowed_min_max_ratio
blocks_per_epoch = self.subtensor.validator_epoch_length if self.config.neuron.blocks_per_epoch == -1 else self.config.neuron.blocks_per_epoch
epochs_until_reset = self.subtensor.validator_epochs_per_reset if self.config.neuron.epochs_until_reset == -1 else self.config.neuron.epochs_until_reset
# === Logs ===
print ( '\nEra:', '\n\t batch_size:', batch_size, '\n\t sequence_length:', sequence_length, '\n\t n_topk_peer_weights:', n_topk_peer_weights,
'\n\t max_allowed_ratio:', max_allowed_ratio, '\n\t blocks_per_epoch:', blocks_per_epoch, '\n\t epochs_until_reset:', epochs_until_reset,
'\n\t until_reset:', self.epoch % epochs_until_reset, '\n\t current_block:', current_block, '\n')
if self.config.using_wandb:
wandb.log( { 'era/batch_size': batch_size, 'era/sequence_length': sequence_length, 'era/n_topk_peer_weights': n_topk_peer_weights,
'era/max_allowed_ratio': max_allowed_ratio, 'era/blocks_per_epoch': blocks_per_epoch, 'era/epochs_until_reset': epochs_until_reset,
}, step = current_block )
# === Run Epoch ===
# Each block length lasts blocks_per_epoch blocks.
# This gives us a consistent network wide timer.
# Here we run until blocks_per_epochs have progressed.
self.metagraph_sync() # Reset metagraph.
epoch_steps = 0
# === Reset Epochs with new params. ===
# Pulls new default validator training parameters and resets
# the model and dataset for the following epoch.
if self.epoch % epochs_until_reset == 0:
print ('\n\n=== Reset ===\n\n')
# === Resetting model + dataset ===
if (batch_size != self.dataset.batch_size) or (sequence_length != self.dataset.block_size):
self.dataset.set_data_size(batch_size, sequence_length)
self.nucleus = nucleus ( config = self.config, device = self.device, subtensor = self.subtensor ).to( self.device )
self.optimizer = torch.optim.SGD (
self.nucleus.parameters(), lr = self.config.neuron.learning_rate, momentum = self.config.neuron.momentum
)
# === Reset Scores ===
self.moving_avg_scores = torch.ones_like( self.metagraph.S ) * -1
# Checks if moving avg has been initiated
if self.moving_avg_scores == None:
self.moving_avg_scores = torch.ones_like( self.metagraph.S ) * -1
start_block = self.subtensor.block
while self.subtensor.block < start_block + blocks_per_epoch:
start_time = time.time()
# === Forward ===
# Forwards inputs through the network and returns the loss
# and endpoint scores using shapely approximation of salience.
forward_results = self.forward_thread_queue.get()
print(f'Run\t| Got forward result in {round(time.time() - start_time, 3)}')
loss, scores, uids = self.nucleus.compute_shapely_scores(forward_results)
# === Scoring ===
# Updates moving averages and history.
self.moving_avg_scores[uids] = self.moving_avg_scores[uids]*(0.99) + scores*(0.01)
# === State update ===
# Prints step logs to screen.
epoch_steps += 1
self.global_step += 1
current_block = self.subtensor.block
step_time = time.time() - start_time
# === Logs ===
print( '\nStep:', '\n\t epoch:', self.epoch, '\n\t epoch_steps:', epoch_steps, '\n\t global_steps:', self.global_step, '\n\t step_time:', step_time, '\n\t loss:', loss.item(),
'\n\t current_block', current_block, '\n\t blocks remaining:', current_block - start_block, '/', blocks_per_epoch, '\n')
if self.config.using_wandb:
wandb.log( { 'epoch/epoch': self.epoch, 'epoch/epoch_steps': epoch_steps, 'epoch/global_steps': self.global_step, 'epoch/loss': loss.item(), 'epoch/time': step_time }, step = current_block )
step_topk_scores, step_topk_uids = bittensor.unbiased_topk( self.moving_avg_scores, k = n_topk_peer_weights )
step_topk_normalized = bittensor.utils.weight_utils.normalize_max_multiple( x = step_topk_scores, multiple = max_allowed_ratio )
for i, w in list(zip(step_topk_uids.tolist(), step_topk_normalized.tolist()) ):
wandb.log( {'weights/w_{}'.format( i ): w }, step = current_block )
# Do the backward request after the a queue of forward requests got finished.
if self.forward_thread_queue.paused() and self.forward_thread_queue.is_empty():
print('Run\t| Model update')
# === Apply gradients ===
# Applies local gradients to parameters.
clip_grad_norm_(self.nucleus.parameters(), self.config.neuron.clip_gradients)
self.optimizer.step()
self.optimizer.zero_grad()
# === Get another round of forward requests ===
self.forward_thread_queue.resume()
# Iterate epochs.
self.epoch += 1
# === Set weights ===
# Find the n_topk_peer_weights peers to set weights to.
# We use the mean of the epoch weights.
topk_scores, topk_uids = bittensor.unbiased_topk(self.moving_avg_scores, k = n_topk_peer_weights )
topk_scores = bittensor.utils.weight_utils.normalize_max_multiple( x = topk_scores, multiple = max_allowed_ratio )
print( '\nScores:', '\n\t weights:', topk_scores.sort()[0].tolist(), '\n\t sum:', topk_scores.sum().item(),
'\n\t min:', topk_scores.min().item(), '\n\t max:', topk_scores.max().item(), '\n\t max/min:', (topk_scores.max()/topk_scores.min()).item() )
self.subtensor.set_weights(
uids = topk_uids.detach().to('cpu'),
weights = topk_scores.detach().to('cpu'),
wallet = self.wallet,
wait_for_finalization = self.config.neuron.wait_for_finalization,
)
# === Wandb Logs ===
# Optionally send validator logs to wandb.
if self.config.using_wandb:
# Logging history to wandb.
df = pandas.concat( [
bittensor.utils.indexed_values_to_dataframe( prefix = 'weights', index = topk_uids, values = torch.zeros( self.metagraph.n ).scatter( dim = 0, src = topk_scores, index = topk_uids ) ),
self.dendrite.to_dataframe( metagraph = self.metagraph )
], axis = 1); df['uid'] = df.index
wandb_data_dend = self.dendrite.to_wandb()
wandb_data = { 'stake': self.metagraph.S[ self.uid ].item(), 'dividends': self.metagraph.D[ self.uid ].item() }
wandb.log( { 'stats': wandb.Table( dataframe = df ) }, step = current_block )
wandb.log( { **wandb_data, **wandb_data_dend }, step = current_block )
def metagraph_sync(self):
r""" Syncing metagraph together with other metagraph-size related objects
"""
self.metagraph.sync()
if self.moving_avg_scores == None:
self.moving_avg_scores = torch.ones_like( self.metagraph.S ) * -1
if self.metagraph.n > len(self.moving_avg_scores):
size_incerease = self.metagraph.n - len(self.moving_avg_scores)
self.moving_avg_scores = torch.concat([self.moving_avg_scores, torch.ones(size_incerease) * -1])
class PositionalEncoding(nn.Module):
r""" Positional Encoder which adds information based on the relative position of each token
"""
def __init__(self, d_model: int, dropout: float, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
# === Create position matrix ===
# Creates a positional matrix with alternating frequencies
# pe: (torch.FloatTensor) positional encoding matrix
# pe.shape: [1, max_len, network_dim]
pe = torch.zeros(1, max_len, d_model)
pe[0, :, 0::2] = torch.sin(position * div_term)
pe[0, : , 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x: torch.tensor) -> torch.tensor:
"""
Args:
x: Tensor, shape [batch_size, seq_len, embedding_dim]
"""
# === Positional Encoding ===
# Inject some information of the relative position of the token in the sequence.
# Finally, Dropout is applied to tokens
# x: (torch.FloatTensor) input sequence tokens with position information injected
# x.shape: [batch_size, seq_len, network_dim]
x = x + self.pe[0, :x.size(1)]
return self.dropout(x)
class nucleus( torch.nn.Module ):
""" Nucleus class which holds the validator model.
"""
def __init__( self, config, device, subtensor ):
super(nucleus, self).__init__()
self.config = config
self.device = device
self.max_n = subtensor.max_n
# Token embeddings project int64 tokens onto representations.
self.token_embedding = torch.nn.Embedding( bittensor.__vocab_size__, bittensor.__network_dim__ )
# Routing encoder, projects token embeddings onto context for routing inputs.
self.routing_encoder_layers = TransformerEncoderLayer( bittensor.__network_dim__, config.nucleus.nhead, config.nucleus.nhid, config.nucleus.dropout, batch_first=True)
self.routing_encoder = TransformerEncoder( self.routing_encoder_layers, 1 )
# Encoder projects response representations onto hidden units.
self.encoder_layers = TransformerEncoderLayer( bittensor.__network_dim__, config.nucleus.nhead, config.nucleus.nhid, config.nucleus.dropout, batch_first=True)
self.encoder = TransformerEncoder( self.encoder_layers, config.nucleus.nlayers )
# Decoder which projects hidden unit representations on to the token dimension.
self.decoder = torch.nn.Linear( bittensor.__network_dim__, bittensor.__vocab_size__ , bias=False)
# Positional Encoding
self.local_pos_encoder = PositionalEncoding( bittensor.__network_dim__, self.config.nucleus.dropout )
# Crosss entropy loss for NTP.
self.loss_fct = torch.nn.CrossEntropyLoss()
# SGMOE Gates: Instantiating the gates per expert.
self.gates = torch.nn.Linear( bittensor.__network_dim__, self.max_n, bias=True ).to( self.device )
self.reset_weights()
@classmethod
def add_args( cls, parser ):
parser.add_argument('--nucleus.topk', type=int, help='the number of peers queried during each remote forward call', default = 20 )
parser.add_argument('--nucleus.nhid', type=int, help='the dimension of the feedforward network model in nn.TransformerEncoder', default=200 )
parser.add_argument('--nucleus.nhead', type=int, help='the number of heads in the multiheadattention models', default = 2 )
parser.add_argument('--nucleus.nlayers', type=int, help='the number of nn.TransformerEncoderLayer in nn.TransformerEncoder', default=2 )
parser.add_argument('--nucleus.dropout', type=float, help='the dropout value', default=0.2)
parser.add_argument('--nucleus.importance', type=float, help='hyperparameter for the importance loss', default=3)
parser.add_argument('--nucleus.noise_multiplier', type=float, help='Standard deviation multipler on weights', default=2 )
@classmethod
def config ( cls ):
parser = argparse.ArgumentParser()
cls.add_args( parser )
return bittensor.config( parser )
@classmethod
def check_config( cls, config: 'bittensor.Config' ):
pass
def reset_weights ( self ):
r""" Resets the validator weights.
"""
# === Resets all the weights using xavier initialization. ===
torch.nn.init.xavier_uniform_ ( self.token_embedding.weight )
torch.nn.init.xavier_uniform_ ( self.decoder.weight )
torch.nn.init.xavier_uniform_( self.gates.weight )
def init_xavier( component ):
try:
torch.nn.init.xavier_uniform_( component.weight )
except: pass
self.routing_encoder.apply( init_xavier )
self.encoder.apply( init_xavier )
torch.nn.init.xavier_uniform_( self.gates.weight )
# === Compute loss given joined responses ===
# This function computes target loss for next token prediction given
# the joined responses as a hidden unit input.
# target_loss: (torch.float64): loss after decoding responses to targets.
# target_loss.shape = [ 1 ]
def get_target_loss ( self, hidden, targets ):
# hidden: (torch.float64): [ batch_size, sequence_len, __network_dim__ ]
# Hidden units which are encoded and decoded onto targets for loss computation.
# targets: (torch.float64): [n]
# Token targets,
src_mask = torch.triu(torch.ones(hidden.size(1), hidden.size(1)) * float('-inf'), diagonal=1)
src_mask = src_mask.to(self.config.neuron.device)
encoded_hidden = self.encoder( hidden, mask = src_mask )
decoded_targets = self.decoder( encoded_hidden )
shift_logits = decoded_targets[..., :-1, :].contiguous()
shift_labels = targets[..., 1:].contiguous()
return self.loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) )
def forward (
self,
inputs: torch.FloatTensor,
metagraph: 'bittensor.Metagraph',
dendrite: 'bittensor.Dendrite',
):
r""" Forward validator pass. Selects peer to query, joins results and computes scoring.
Args:
inputs (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, *-1*)`, `required`):
Tensor inputs to distribute to neurons using query context.
metagraph (bittensor.Metagraph):
Metagraph object used to query network information.
dendrite (bittensor.Dendrite):
Dendrite RPC client used to make network queries.
Returns:
global_loss (torch.FloatTensor, [1] ):
Loss for training validator nucleus.
scores (torch.FloatTensor, [ metagraph.n ]):
Scores per endpoint for this batch.
"""
# === Create the local context used to select endpoints ===
# The context tensor returns a hidden unit representation for the text inputs
# this context can be used as input to the gates in the next step.
# embedding: retrieve learned representation vectors for input vocabulary tokens.
# inputs.shape = [batch_size, sequence_len]
# embedding.shape = [batch_size, sequence_len, bittensor.__network_dim__]
embedding = self.token_embedding( inputs )* math.sqrt( bittensor.__network_dim__ )
# === Create an attention mask ===
# The attention mask will mask out parts of the context
# This prevents cheating and forward-looking when predicting each token in the sequence.
# src_mask: (torch.FloatTensor) attention mask adds -inf to positions not allowed to attend
# src_mask.shape = [sequence_len, sequence_len]
src_mask = torch.triu(torch.ones(embedding.size(1), embedding.size(1)) * float('-inf'), diagonal=1)
src_mask = src_mask.to(self.config.neuron.device)
# === Apply the positional encoding to help select endpoints ===
# The positional encoder provides information based on the relative postion of each token
# embedding.shape = [batch_size, sequence_len, bittensor.__network_dim__]
# pos_embedding: (torch.FloatTensor) positional encoded embedding.
# pos_embedding.shape = [batch_size, sequence_len, bittensor.__network_dim__]
pos_embedding = self.local_pos_encoder(embedding)
# routing_context: (torch.FloatTensor): context tensor which is used to select endpoints.
# routing_context.shape = [ batch size, __network_dim__ ]
routing_context = self.routing_encoder( pos_embedding, mask = src_mask )
# === Get weights for uids. ===
# We iterate over each of the network uids and compute a querying score for each
# using the gating function. This returns a score per endpoint per example.
# routing_weights: (torch.FloatTensor): score per example, per endpoint.
# routing_weights.shape = [ batch size, __network_n__ ]
# The gates act over the last embedding of the routing_context.
routing_weights = self.gates( routing_context[:,-1,:] )
# === Normalize routing_weights across batch dimension and add noise. ===
# We are summing across the batch dimension to create a per-batch score per endpoint.
# The resulting routing_weights tensor is a score per expert.
# routing_weights: (torch.FloatTensor): normalized weights across batch dimension with noise.
# routing_weights.shape = [ n_filtered ]
batchwise_routing_weights = torch.mean(routing_weights, axis = 0)[:metagraph.n]
noisy_routing_weights = torch.normal( 0, torch.std(batchwise_routing_weights).item(), size=( batchwise_routing_weights.size())).to( self.config.neuron.device )
noisy_routing_weights = batchwise_routing_weights + noisy_routing_weights * self.config.nucleus.noise_multiplier
# === Get indices and values for uids with highest scores ===
# We are taking the topk routing weights and returning their uids.
# First we ensure topk is smaller than the network size then use the torch.topk.
# topk_routing_weights: (torch.float64): scores of uids with highest scores.
# topk_routing_weights.shape = [ self.config.nucleus.topk ]
# topk_routing_uids: (torch.LongTensor): uids with highest scores.
# topk_routing_uids.shape = [ self.config.nucleus.topk ]
top_k_routing_weights, routing_uids = torch.topk( noisy_routing_weights, self.config.nucleus.topk, dim=0)
# === Get endpoint information for the highest scoring uids ===
# We index into the metagraph's endpoints and return a list of the filtered set of endpoints we wish to query.
# routing_endpoints: List[bittensor.endpoints]: endpoint information for filtered uids.
# len(neurons) == self.config.nucleus.topk
routing_endpoints = [ metagraph.endpoints[ uid ] for uid in routing_uids ]
# === Query the endpoints ===
# Makes the dendrite call into the network returning the representations
# for each of the endpoints. The return ops can be used to filter weights and outputs.
# query_responses: (List[torch.float64]): responses from each endpoint.
# query_responses.shape = self.config.nucleus.topk * [ batch_size, sequence_len, __network_dim__ ]
# return_ops: (torch.int64): Return ops.
# return_ops.shape = [ self.config.nucleus.topk ]
query_responses, return_ops, times = dendrite.forward_text (
endpoints = routing_endpoints,
inputs = inputs
)
# Send responses to device. This is required to ensure we move the responses
# Onto the correct device.
for response in query_responses:
response.to( self.device )
# === Compute global loss ===
# Computes the global training loss for the nucleus by decoding all the responses
# onto the targets.
# target_loss: (torch.float64): loss after decoding all responses and a variance loss.
# target_loss.shape = [ 1 ]
responses_hidden, _ = joining_context( return_ops, batchwise_routing_weights[routing_uids], query_responses)
target_loss = self.get_target_loss ( responses_hidden, inputs )
print ('Loss\t|\t{}'.format( target_loss.item() ))
# === Compute Importance loss ===
# Computes the importance loss based on the stardard error of batchwise_routing_weights
# This ensures that gates do not converge onto a few experts
# importance_loss: (torch.float64) the importance loss based on the stardard error
# target_loss: (torch.float64): the total loss (global training loss + importance loss)
# target_loss.shape = [ 1 ]
importance_loss = self.config.nucleus.importance * (torch.std(batchwise_routing_weights)/torch.mean(batchwise_routing_weights))**2
loss = target_loss + importance_loss
state_dict = SimpleNamespace(
inputs = inputs,
batchwise_routing_weights = batchwise_routing_weights,
routing_uids = routing_uids,
query_responses = query_responses,
return_ops = return_ops,
responses_hidden = responses_hidden,
loss = loss,
n = metagraph.n.item()
)
return state_dict
def compute_shapely_scores(self, state_dict):
# === Compute shapely scores ===
# Computes shapely scores for each endpoint by masking the response and
# computing the change in loss induced.
# shapely_scores: (torch.float32): shapely scores per query_response
# shapely_scores.shape = [ metagraph.n ]
masked_contexts = partial_contexts(
state_dict.return_ops,
state_dict.routing_uids,
state_dict.batchwise_routing_weights[state_dict.routing_uids],
state_dict.query_responses
)
# Turn off gradient computation for shapely scores.
# shapely_scores.shape = [ nucleus.topk ]
# This sets non queried peers as if non-responsive
shapely_scores = torch.zeros( state_dict.routing_uids.size())
# Turn off gradient computation for shapely scores.
with torch.no_grad():
self.eval()
unmasked_loss = self.get_target_loss(state_dict.responses_hidden, state_dict.inputs)
# Iterate over all responses creating a masked context.
for i, uid in enumerate(masked_contexts):
# Create mask by zeroing out the response at index.
masked_loss = self.get_target_loss ( masked_contexts[uid], state_dict.inputs )
shapely_score = unmasked_loss - masked_loss
print ('Shapely\t|\tuid: {}\tweight: {}\tscore: {}\tcode: {}\tsum: {}'.format( uid, state_dict.batchwise_routing_weights[state_dict.routing_uids][i], -shapely_score.item(), state_dict.return_ops[i], state_dict.query_responses[i].sum()))
shapely_scores[ i ] = -shapely_score if not torch.abs(1 - state_dict.query_responses[i].std()).item() < 0.05 else -1
# Ensures that the nonresponsive peers are not rewarded
shapely_scores[state_dict.return_ops != 1 ] = -1
# === Done ===
return state_dict.loss, shapely_scores, state_dict.routing_uids
|
version = '2.1.4'
version_cmd = 'riak version'
download_url = 'http://s3.amazonaws.com/downloads.basho.com/riak/2.1/2.1.1/ubuntu/trusty/riak_2.1.1-1_amd64.deb'
install_script = """
dpkg -i riak_2.1.1-1_amd64.deb
"""
|
def verificar_repeticao(numero):
if numero < 10:
return True
if 10 <= numero < 100:
d1 = numero // 10
d2 = numero - d1 * 10
if d1 == d2:
return False
else:
return True
if 100 <= numero < 1000:
d1 = numero // 100
d2 = (numero - d1 * 100) // 10
d3 = numero - (d1 * 100 + d2 * 10)
if d1 == d2 or d1 == d3 or d2 == d3:
return False
else:
return True
if numero >= 1000:
d1 = numero // 1000
d2 = (numero - d1 * 1000) // 100
d3 = (numero - (d1 * 1000 + d2 *100)) // 10
d4 = numero - (d1 * 1000 + d2 * 100 + d3 * 10)
if d1 == d2 or d1 == d3 or d1 == d4 or d2 == d3 or d2 == d4 or d3 == d4:
return False
else:
return True
n1 = int(input())
n2 = int(input())
possiveis = 0
while n1 <= n2:
if verificar_repeticao(n1):
possiveis += 1
n1 += 1
print(possiveis)
|
"""ppm docker compose new命令的处理."""
from typing import Dict
from pmfp.utils.endpoint import EndPoint
from ..core import (
dockercompose,
common_schema_properties
)
properties: Dict[str, object] = {
"dockercompose_name": {
"type": "string",
"title": "f",
"description": "指定docker-compose文件名字",
"default": "docker-compose.yml"
},
"dockerfile_dir": {
"type": "string",
"description": "dockerfile文件所在的文件夹,如果指定则会构造`build`段,在未指定`docker_register_namespace`时会被默认指定为`.`"
},
"dockerfile_name": {
"type": "string",
"description": "dockerfile文件名字,只有在dockerfile_dir有值时才会生效"
}
}
properties.update(common_schema_properties)
class New(EndPoint):
"""创建一个docker-compose文件.
当指定的dockercompose文件存在时创建全新内容并覆盖原来老的compose文件,老的会被重新保存为`原名.{timestamp}_bak`;
当指定的dockercompose文件不存在时创建新的compose文件.
更新操作只能更新如下内容:
1. service
2. 外部networks声明
3. 外部volumes声明
4. 外部configs声明
5. 外部secrits声明
"""
argparse_noflag = "compose_version"
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": properties
}
dockercompose_new = dockercompose.regist_sub(New)
|
from domain.route import entity
from flask_mongoengine import BaseQuerySet
from domain.airport.service import get_airport
from domain.airport.dtos import json_from_airport, json_from_airports
def args_to_origin(args):
origin_name = args.get('origin', None)
return None if origin_name is None else get_airport(origin_name)
def args_to_destination(args):
destination_name = args.get('destination', None)
return None if destination_name is None else get_airport(destination_name)
def json_to_update_route(route_json):
origin_name = route_json.get('origin', {'name': None}).get('name')
destination_name = route_json.get('destination', {'name': None}).get('name')
return {
'origin': get_airport(origin_name) if origin_name else None,
'destination': get_airport(destination_name) if destination_name else None
}
def json_from_route(route):
return {
"origin": json_from_airport(route.origin),
"destination": json_from_airport(route.destination)
}
def json_from_routes(routes):
return {'routes': list(map(json_from_route, routes))} if isinstance(routes, BaseQuerySet) else json_from_route(routes)
def json_from_destination(route):
return json_from_airports(route.destination)
def json_from_destinations(routes):
return {'destinations': list(map(json_from_destination, routes))} if isinstance(routes, BaseQuerySet) else json_from_destination(routes)
def route_from_json(route_json):
return entity.Route(
origin=get_airport(route_json.get('origin', None).get('name', None)),
destination=get_airport(route_json.get('destination', None).get('name', None)))
|
from django.db import models
from django.conf import settings
from SocialNetwork_API.models.timestamped import TimeStampedModel
from SocialNetwork_API.models.user_types import PositiveTinyIntegerField
class Api(TimeStampedModel):
expired_at = models.DateTimeField(auto_now=False, default=settings.REST_FRAMEWORK['EXPIRED_FOREVER'])
# id = models.AutoField(primary_key=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL) #tai sao khi dung cai nay thi lai sai
# user_id = models.PositiveIntegerField(default=0)
# device = models.CharField(max_length=64)
# ip = models.GenericIPAddressField()
token = models.CharField(max_length=255, primary_key=True)
# version = models.CharField(max_length=40)
# type = PositiveTinyIntegerField(default=0)
# app_id = models.CharField(max_length=64, default='')
class Meta:
db_table = 'sn_apis'
|
# coding=utf-8
# Copyright 2022 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the cloning network."""
import copy
import ibc.ibc.utils.constants as constants
from ibc.networks import mlp_ebm
from ibc.networks import mlp_mdn
from ibc.networks import mlp_mse
from ibc.networks import pixel_ebm
from ibc.networks import pixel_mdn
from ibc.networks import pixel_mse
import tensorflow as tf
from tf_agents.specs import tensor_spec
def adjust_img_spec_to_float(obs_tensor_spec):
"""If there are images, adjust spec to be float32."""
float_obs_tensor_spec = obs_tensor_spec
for img_key in constants.IMG_KEYS:
if isinstance(obs_tensor_spec, dict) and img_key in obs_tensor_spec:
img_spec = obs_tensor_spec[img_key]
float_obs_tensor_spec = copy.deepcopy(obs_tensor_spec)
float_obs_tensor_spec[img_key] = tensor_spec.BoundedTensorSpec(
img_spec.shape,
dtype=tf.float32,
minimum=img_spec.minimum,
maximum=1.0)
return float_obs_tensor_spec
def get_cloning_network(
name,
obs_tensor_spec,
action_tensor_spec,
obs_norm_layer,
act_norm_layer,
sequence_length,
act_denorm_layer):
"""Chooses a cloning network based on config.
Args:
name: Name of the network to build.
obs_tensor_spec: A nest of `tf.TypeSpec` representing the
input observations.
action_tensor_spec: A nest of `tf.TypeSpec` representing the actions.
obs_norm_layer: Keras layer to normalize observations.
act_norm_layer: Keras layer to normalize actions.
sequence_length: Length of the observation sequence.
act_denorm_layer: Layer mapping zmuv-normalized actions back to original
spec.
Returns:
A cloning network.
"""
del obs_norm_layer # placeholder
del act_norm_layer
del sequence_length
obs_tensor_spec = adjust_img_spec_to_float(obs_tensor_spec)
if name == 'MLPEBM':
cloning_network = mlp_ebm.MLPEBM(
obs_spec=(obs_tensor_spec, action_tensor_spec),
action_spec=tf.TensorSpec([1]))
elif name == 'MLPMSE':
cloning_network = mlp_mse.MLPMSE(
obs_spec=obs_tensor_spec,
action_spec=action_tensor_spec,
act_denorm_layer=act_denorm_layer)
elif name == 'MLPMDN':
cloning_network = mlp_mdn.MLPMDN(
obs_spec=obs_tensor_spec,
action_spec=action_tensor_spec,
act_denorm_layer=act_denorm_layer)
elif name == 'ConvMLPMSE':
cloning_network = pixel_mse.ConvMLPMSE(
obs_spec=obs_tensor_spec,
action_spec=action_tensor_spec,
act_denorm_layer=act_denorm_layer)
elif name == 'ConvMLPMDN':
cloning_network = pixel_mdn.ConvMLPMDN(
obs_spec=obs_tensor_spec,
action_spec=action_tensor_spec,
act_denorm_layer=act_denorm_layer)
elif name == 'PixelEBM':
cloning_network = pixel_ebm.PixelEBM(
obs_spec=obs_tensor_spec,
action_spec=action_tensor_spec)
else:
raise ValueError('Unsupported cloning network %s' % name)
return cloning_network
|
import math
def structure_solution(solution):
res = {
# List of classes with their exams
'classes': {},
# List of exams with their teachers
'teachers': {},
# Number of days
'days': 0
}
for block in solution:
day = math.floor(block[2] / 4)
# check if is class
if block[1].startswith('class_'):
if not block[1] in res['classes']:
res['classes'][block[1]] = {}
if not day in res['classes'][block[1]]:
res['classes'][block[1]][day] = []
res['classes'][block[1]][day].append({
'label': block[0],
'day': day,
'start_hour': block[2] % 4,
'length': block[3] - block[2],
'teachers': []
})
if day + 1 > res['days']:
res['days'] = day + 1
elif block[1].startswith('teacher_'):
if not block[1] in res['teachers']:
res['teachers'][block[1]] = []
res['teachers'][block[1]].append({
'label': block[0],
'day': day,
'start_hour': block[2] % 4,
'length': block[3] - block[2]
})
# add teachers to each class
for cls in res['classes'].values():
for day in cls.values():
for exam in day:
if exam['label'] in res['teachers']:
exam['teachers'] = res['teachers'][exam['label']]
return res
|
from arm.logicnode.arm_nodes import *
class LoopNode(ArmLogicTreeNode):
"""Resembles a for-loop (`for (i in from...to)`) that is executed at
once when this node is activated.
@seeNode While
@seeNode Loop Break
@input From: The value to start the loop from (inclusive)
@input To: The value to end the loop at (exclusive)
@output Loop: Active at every iteration of the loop
@output Index: The index for the current iteration
@output Done: Activated once when the looping is done
"""
bl_idname = 'LNLoopNode'
bl_label = 'Loop'
bl_description = 'Resembles a for-loop that is executed at once when this node is activated'
arm_section = 'flow'
arm_version = 1
def init(self, context):
super(LoopNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('NodeSocketInt', 'From')
self.add_input('NodeSocketInt', 'To')
self.add_output('ArmNodeSocketAction', 'Loop')
self.add_output('NodeSocketInt', 'Index')
self.add_output('ArmNodeSocketAction', 'Done')
|
from django.contrib import admin
from .models import Category, Post, Tag
class PostAdmin(admin.ModelAdmin):
list_display = ['id', 'title', 'created_at', ]
list_display_links = ['id', 'title', ]
ordering = ['-created_at', 'id', ]
search_fields = ['title', 'content', ]
date_hierarchy = 'created_at'
list_filter = ['tags', ]
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
|
from stix_shifter.stix_translation import stix_translation
from stix_shifter_utils.utils.module_discovery import modules_list
translation = stix_translation.StixTranslation()
class TestTranslationDialecs(object):
def test_supported_dialects(self):
modules = modules_list()
for module in modules:
result = translation.translate(module, stix_translation.DIALECTS, None, None)
for dialect, data in result.items():
assert len(data) == 2
assert 'language' in data
assert 'default' in data
|
import ccxt.async as ccxt
import asyncio
import json
import networkx as nx
from .utils.general import ExchangeNotInCollectionsError
class CollectionBuilder:
def __init__(self):
self.exchanges = ccxt.exchanges
# keys are market names and values are an array of names of exchanges which support that market
self.collections = {}
# stores markets which are only available on one exchange: keys are markets names and values are exchange names
self.singularly_available_markets = {}
async def async_build_all_collections(self, write=True, ccxt_errors=False):
"""
Refer to glossary.md for the definition of a "collection"
:param write: If true, will write collections and singularly_available_markets to json files in /collections
:param ccxt_errors: If true, this method will raise the errors ccxt raises
:return: A dictionary where keys are market names and values are lists of exchanges which support the respective
market name
"""
tasks = [self._add_exchange_to_collections(exchange_name, ccxt_errors) for exchange_name in self.exchanges]
await asyncio.wait(tasks)
if write:
with open('./collections/collections.json', 'w') as outfile:
json.dump(self.collections, outfile)
with open('./collections/singularly_available_markets.json', 'w') as outfile:
json.dump(self.singularly_available_markets, outfile)
return self.collections
def build_all_collections(self, write=True, ccxt_errors=False):
"""
A synchronous version of async_build_all_collections
Refer to glossary.md for the definition of a "collection"
:param write: If true, will write collections and singularly_available_markets to json files in /collections
:param ccxt_errors: If true, this method will raise the errors ccxt raises
:return: A dictionary where keys are market names and values are lists of exchanges which support the respective
market name
"""
asyncio.get_event_loop().run_until_complete(self.async_build_all_collections(write, ccxt_errors))
return self.collections
async def _add_exchange_to_collections(self, exchange_name: str, ccxt_errors=False):
exchange = getattr(ccxt, exchange_name)()
if ccxt_errors:
await exchange.load_markets()
await exchange.close()
else:
try:
await exchange.load_markets()
await exchange.close()
except ccxt.BaseError:
await exchange.close()
return
for market_name in exchange.symbols:
if market_name in self.collections:
self.collections[market_name].append(exchange_name)
elif market_name in self.singularly_available_markets:
self.collections[market_name] = [self.singularly_available_markets[market_name], exchange_name]
del self.singularly_available_markets[market_name]
else:
self.singularly_available_markets[market_name] = exchange_name
class SpecificCollectionBuilder(CollectionBuilder):
def __init__(self, blacklist=False, **kwargs):
"""
**kwargs should restrict acceptable exchanges. Only acceptable keys and values are strings. Look at this part of
the ccxt manual: https://github.com/ccxt/ccxt/wiki/Manual#user-content-exchange-structure for insight into what
are acceptable rules.
When a value in kwargs is a list x, SpecificCollectionBuilder builds a collection of exchanges in which
the property (designated by the key corresponding to x) contains all elements in x.
When a value in kwargs is a dict x, SpecificCollectionBuilder builds a collection of exchanges in which the
specified property (designated by the key corresponding to x) is a dict and contains all key/ value pairs in x.
Typical use case for **kwargs is 'countries' as a key and Australia, Bulgaria, Brazil, British Virgin
Islands, Canada, China, Czech Republic, EU, Germany, Hong Kong, Iceland, India, Indonesia, Israel, Japan,
Mexico, New Zealand, Panama, Philippines, Poland, Russia, Seychelles, Singapore, South Korea,
St. Vincent & Grenadines, Sweden, Tanzania, Thailand, Turkey, US, UK, Ukraine, or Vietnam as a value.
"""
super().__init__()
self.rules = kwargs
self.blacklist = blacklist
async def _add_exchange_to_collections(self, exchange_name: str, ccxt_errors=False):
exchange = getattr(ccxt, exchange_name)()
if ccxt_errors:
await exchange.load_markets()
await exchange.close()
else:
try:
await exchange.load_markets()
await exchange.close()
except ccxt.BaseError:
await exchange.close()
return
# Implicitly (and intentionally) does not except ValueErrors raised by _check_exchange_meets_criteria
if self._check_exchange_meets_criteria(exchange):
# Having reached this, it is known that exchange meets the criteria given in **kwargs.
for market_name in exchange.symbols:
if market_name in self.collections:
self.collections[market_name].append(exchange_name)
elif market_name in self.singularly_available_markets:
self.collections[market_name] = [self.singularly_available_markets[market_name], exchange_name]
del self.singularly_available_markets[market_name]
else:
self.singularly_available_markets[market_name] = exchange_name
def _check_exchange_meets_criteria(self, exchange):
for key, desired_value in self.rules.items():
try:
actual_value = getattr(exchange, key)
except AttributeError:
raise ValueError("{} is not a valid property of {}".format(key, exchange.name))
if isinstance(actual_value, list):
# in all cases where an attribute of an exchange is a list, that list's elements' types are uniform
# so type of the first element == type of all elements
type_of_actual_value = type(actual_value[0])
# this will not work for any Exchange property which is a list of lists (there currently are no such
# properties)
if isinstance(desired_value, list):
for element in desired_value:
if not self._element_of_type_in_list(element, type_of_actual_value, actual_value, key):
return False
else:
return self._element_of_type_in_list(desired_value, type_of_actual_value, actual_value, key)
elif isinstance(actual_value, dict):
# When given a dict as a desired value, this checks that the values in the actual value are equal to
# the values in desired value
if not isinstance(desired_value, dict):
raise ValueError("Exchange attribute {} is a dict but supplied preferred value {} is not a dict"
.format(key, desired_value))
desired_value_items = desired_value.items()
for key_a, value_a in desired_value_items:
# this line is A XOR B where A is self.blacklist and B is desired_value not in actual_value
if self.blacklist != (actual_value[key_a] != value_a):
return False
else:
# if desired_value is a list of length 1 and its only element == actual_value (or != if self.blacklist)
if isinstance(desired_value, list):
if len(desired_value) == 1 and (self.blacklist != (actual_value != desired_value[0])):
return False
elif self.blacklist != (actual_value != desired_value):
return False
return True
def _element_of_type_in_list(self, element, actual_value_type, actual_value, key):
"""
:param actual_value: A list
:param actual_value_type: Type of all elements in actual_value
:param key: The name of the Exchange property
:return: a boolean
"""
if not isinstance(element, actual_value_type):
raise ValueError("Exchange attribute {} is a list of {}s. "
"A non-{} object was passed.".format(key, str(actual_value_type),
str(actual_value_type)))
# this line is A XOR B where A is self.blacklist and B is desired_value not in actual_value
if self.blacklist != (element not in actual_value):
return False
return True
class ExchangeMultiGraphBuilder:
def __init__(self, exchanges: list):
self.exchanges = exchanges
self.graph = nx.MultiGraph()
def build_multi_graph(self, write=False, ccxt_errors=False):
futures = [asyncio.ensure_future(self._add_exchange_to_graph(exchange_name, ccxt_errors)) for
exchange_name in self.exchanges]
asyncio.get_event_loop().run_until_complete(asyncio.gather(*futures))
if write:
with open('collections/graph.json', 'w') as outfile:
json.dump(self.graph, outfile)
return self.graph
async def _add_exchange_to_graph(self, exchange_name: str, ccxt_errors=False):
"""
:param ccxt_errors: if true, raises errors ccxt raises when calling load_markets. The common ones are
RequestTimeout and ExchangeNotAvailable, which are caused by problems with exchanges' APIs.
"""
exchange = getattr(ccxt, exchange_name)()
if ccxt_errors:
await exchange.load_markets()
await exchange.close()
else:
try:
await exchange.load_markets()
await exchange.close()
except ccxt.BaseError:
await exchange.close()
return
for market_name in exchange.symbols:
currencies = market_name.split('/')
try:
self.graph.add_edge(currencies[0], currencies[1], exchange_name=exchange_name, market_name=market_name)
# certain exchanges (lykke, possibly more)
except IndexError as e:
pass
def build_multi_graph_for_exchanges(exchanges: list):
"""
A wrapper function for the usage of the ExchangeMultiGraphBuilder class which returns a dict as specified in the
docstring of __init__ in ExchangeMultiGraphBuilder.
:param exchanges: A list of exchanges (e.g. ['bittrex', 'poloniex', 'bitstamp', 'anxpro']
"""
return ExchangeMultiGraphBuilder(exchanges).build_multi_graph()
def build_arbitrage_graph_for_exchanges(exchanges: list, k_core=2):
"""
This function is currently inefficient as it finds the entire graph for the given exchanges then finds the k-core
for that graph. todo: It would be great if someone could improve the efficiency of it but this is not a priority.
IMPORTANT: For this function to work, the @not_implemented_for('multigraph') decorator above the core_number
function in networkx.algorithms.core.py must be removed or commented out.
Todo: Improve this project so that the above does not have to be done.
:param exchanges: A list of exchanges (e.g. ['bittrex', 'poloniex', 'bitstamp', 'anxpro']
"""
return nx.k_core(build_multi_graph_for_exchanges(exchanges), k_core)
def build_collections(blacklist=False, write=True, ccxt_errors=False):
return build_specific_collections(blacklist, write,
ccxt_errors, has={'fetchOrderBook': True})
def build_specific_collections(blacklist=False, write=False, ccxt_errors=False, **kwargs):
builder = SpecificCollectionBuilder(blacklist, **kwargs)
return builder.build_all_collections(write, ccxt_errors)
def build_all_collections(write=True, ccxt_errors=False):
"""
Be careful when using this. build_collections is typically preferred over this method because build_collections only
accounts for exchanges which have a private API (and thus can be traded on).
:param write:
:param ccxt_errors:
:return:
"""
builder = CollectionBuilder()
return builder.build_all_collections(write=write, ccxt_errors=ccxt_errors)
def get_exchanges_for_market(market_ticker):
"""
Returns the list of exchanges on which a market is traded
"""
try:
with open('./collections/collections.json') as f:
collections = json.load(f)
for market_name, exchanges in collections.items():
if market_name == market_ticker:
return exchanges
except FileNotFoundError:
return build_specific_collections(symbols=[market_ticker])
with open('./collections/singularly_available_markets.json') as f:
singularly_available_markets = json.load(f)
for market_name, exchange in singularly_available_markets:
if market_name == market_ticker:
return [exchange]
raise ExchangeNotInCollectionsError(market_ticker)
|
import keras
import json
import os.path
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.optimizers import RMSprop
import os
from os import environ
from keras.callbacks import TensorBoard
from emetrics import EMetrics
import pandas as pd
from math import sqrt
import numpy as np
# from sklearn.preprocessing import MinMaxScaler
###############################################################################
# Set up working directories for data, model and logs.
###############################################################################
model_filename = "oilprice_rnn.h5"
data_filename = "WCOILWTICO.csv"
# writing the train model and getting input data
if environ.get('DATA_DIR') is not None:
input_data_folder = environ.get('DATA_DIR')
input_data_path = os.path.join(input_data_folder, data_filename)
else:
input_data_path= data_filename
if environ.get('RESULT_DIR') is not None:
output_model_folder = os.path.join(os.environ["RESULT_DIR"], "model")
output_model_path = os.path.join(output_model_folder, model_filename)
else:
output_model_folder = "model"
output_model_path = os.path.join("model", model_filename)
os.makedirs(output_model_folder, exist_ok=True)
#writing metrics
if environ.get('JOB_STATE_DIR') is not None:
tb_directory = os.path.join(os.environ["JOB_STATE_DIR"], "logs", "tb", "test")
else:
tb_directory = os.path.join("logs", "tb", "test")
os.makedirs(tb_directory, exist_ok=True)
tensorboard = TensorBoard(log_dir=tb_directory)
###############################################################################
###############################################################################
# Set up HPO.
###############################################################################
config_file = "config.json"
if os.path.exists(config_file):
with open(config_file, 'r') as f:
json_obj = json.load(f)
prev_periods = int(json_obj["prev_periods"])
dropout_rate = float(json_obj["dropout_rate"])
else:
prev_periods = 1
dropout_rate = 0.2
def getCurrentSubID():
if "SUBID" in os.environ:
return os.environ["SUBID"]
else:
return None
def gen_datasets(dataset, prev_periods=1):
dataX, dataY = [], []
for i in range(len(dataset) - prev_periods):
a = dataset[i:(i + prev_periods), 0]
dataX.append(a)
dataY.append(dataset[i + prev_periods, 0])
print(len(dataY))
return np.array(dataX), np.array(dataY)
class HPOMetrics(keras.callbacks.Callback):
def __init__(self):
self.emetrics = EMetrics.open(getCurrentSubID())
def on_epoch_end(self, epoch, logs={}):
train_results = {}
test_results = {}
for key, value in logs.items():
if 'val_' in key:
test_results.update({key: value})
else:
train_results.update({key: value})
print('EPOCH ' + str(epoch))
self.emetrics.record("train", epoch, train_results)
self.emetrics.record(EMetrics.TEST_GROUP, epoch, test_results)
def close(self):
self.emetrics.close()
###############################################################################
# data_url = "https://ibm.box.com/shared/static/ojkntksc9rdbrj52yzkqfhbc1c9kv833.csv"
data = pd.read_csv(input_data_path, index_col='DATE')
# Create a scaled version of the data with oil prices normalized between 0 and 1
values = data['WCOILWTICO'].values.reshape(-1,1)
values = values.astype('float32')
#scaler = MinMaxScaler(feature_range=(0, 1))
#scaled = scaler.fit_transform(values)
# turn off scaler to simplify running model on future data
scaled = values
# Split the data between training and testing
# The first 70% of the data is used for training while the remaining 30% is used for validation
train_size = int(len(scaled) * 0.7)
test_size = len(scaled) - train_size
train, test = scaled[0:train_size,:], scaled[train_size:len(scaled),:]
# Generate testing and validation data
trainX, trainY = gen_datasets(train, prev_periods)
testX, testY = gen_datasets(test, prev_periods)
# Reshape into a numpy arraya of shape (m, 1, prev_periods) where m is the number of training or testing values
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# Build model
lstm_units = 1000
epochs = 50
batch_size = 32
model = Sequential()
model.add(LSTM(lstm_units, input_shape=(trainX.shape[1], trainX.shape[2])))
if dropout_rate > 0.0:
model.add(Dropout(dropout_rate))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae'])
hpo = HPOMetrics()
history = model.fit(trainX, trainY, epochs=epochs, batch_size=batch_size, validation_data=(testX, testY), callbacks=[tensorboard, hpo], shuffle=False)
hpo.close()
print("Training history:" + str(history.history))
# Check out MSE, RMSE, MAE for testing data
testing_error = model.evaluate(testX, testY, verbose=0)
print('Testing error: %.5f MSE (%.5f RMSE) %.5f MAE' % (testing_error[0], sqrt(testing_error[0]), testing_error[1]))
# save the model
model.save(output_model_path)
|
"""This module contains the general information for AdaptorRssProfile ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class AdaptorRssProfileConsts:
pass
class AdaptorRssProfile(ManagedObject):
"""This is AdaptorRssProfile class."""
consts = AdaptorRssProfileConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("AdaptorRssProfile", "adaptorRssProfile", "rss", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'adaptorHostEthIf'], [], ["Get", "Set"]),
"modular": MoMeta("AdaptorRssProfile", "adaptorRssProfile", "rss", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'adaptorHostEthIf'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"receive_side_scaling": MoPropertyMeta("receive_side_scaling", "receiveSideScaling", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"receive_side_scaling": MoPropertyMeta("receive_side_scaling", "receiveSideScaling", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"receiveSideScaling": "receive_side_scaling",
"rn": "rn",
"status": "status",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"receiveSideScaling": "receive_side_scaling",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.receive_side_scaling = None
self.status = None
ManagedObject.__init__(self, "AdaptorRssProfile", parent_mo_or_dn, **kwargs)
|
from arxiv.submission.domain.agent import User
TITLES = [
(2344371, 'Maximally Rotating Supermassive Stars at the Onset of Collapse: The Perturbative Effects of Gas Pressure, Magnetic Fields, Dark Matter and Dark Energy', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344372, 'Deep learning approach for Fourier ptychography microscopy', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344378, 'Implementing nonlinear Compton scattering beyond the local constant field approximation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344394, 'The role of gravity in the pair creation induced by electric fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344409, 'An analysis of high-frequency cryptocurrencies prices dynamics using permutation-information-theory quantifiers', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344417, 'Floer theory and flips', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344461, 'An Effect of The Radiofrequency Fields Over Saccharomyces Cerevisiae', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344489, 'On the field of moduli of superelliptic curves', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344491, 'Toward an Optimal Quantum Algorithm for Polynomial Factorization over Finite Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344506, 'Are 10 EeV cosmic rays extragalactic? Theory of cosmic ray diffusion at high energy', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344557, 'C-metric solution for conformal gravity with a conformally coupled scalar field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344572, 'On the theory of high-Tc superconductivity of doped cuprates', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344583, 'Controlling spin-orbit interactions in silicon quantum dots using magnetic field direction', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344585, 'An ostentatious model of cosmological scalar-tensor theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344593, 'Measurements and atomistic theory of electron $g$ factor anisotropy for phosphorus donors in strained silicon', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344618, 'Sending or not sending: twin-field quantum key distribution with large misalignment error', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344662, 'Density Estimation with Contaminated Data: Minimax Rates and Theory of Adaptation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344670, 'Non-linear charge oscillation driven by single-cycle light-field in an organic superconductor', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344743, 'Toward a quantitative theory of the hydrodynamic limit', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344768, 'The Needle in the Haystack for Theory of High Temperature Superconductivity', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344792, 'A transition between bouncing hyper-inflation to {\\Lambda}CDM from diffusive scalar fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344794, 'The quantum theory of a closed string', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344796, 'Probing Aqueous Electrolytes with Fourier Spectrum Pulse-Echo Technique', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344798, 'Confronting nuclear equation of state in the presence of dark matter using GW170817 observation in relativistic mean field theory approach', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344873, 'Dynamically probing strongly-coupled field theories with critical point', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344929, 'Galactic Magnetic Field Reconstruction I. Constraints from polarized diffuse emission: Methodology and simulations', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344950, 'Galactic Magnetic Field Reconstruction II. Constraints from polarized thermal dust sky as seen by $Planck$', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2344970, 'Theory of Coulomb Drag in Spatially Inhomogeneous Materials', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345010, 'Efficient Gauss-Newton-Krylov momentum conservation constrained PDE-LDDMM using the band-limited vector field parameterization', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345014, 'Nonrelativistic String Theory and T-Duality', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345067, 'Decay of a Thermofield-Double State in Chaotic Quantum Systems', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345109, 'Convex Optimization Based Bit Allocation for Light Field Compression under Weighting and Consistency Constraints', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345110, 'Smoothness of correlation functions in Liouville Conformal Field Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345129, 'Towards a directed homotopy type theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345148, 'Collective Coordinate Descriptions of Magnetic Domain Wall Motion in Perpendicularly Magnetized Nanostructures under the Application of In-plane Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345150, 'Collective Coordinate Descriptions of Magnetic Domain Wall Motion in Perpendicularly Magnetized Nanostructures under the Application of In-plane Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345209, 'Construction of MDS Self-dual Codes over Finite Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345229, 'Theta and eta polynomials in geometry, Lie theory, and combinatorics', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345238, 'Weak Cosmic Censorship Conjecture in Kerr-(Anti-)de Sitter Black Hole with Scalar Field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345248, 'Contramodules over pro-perfect topological rings, the covering property in categorical tilting theory, and homological ring epimorphisms', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345299, 'Learnable: Theory vs Applications', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345317, 'The importance of scalar fields as extradimensional metric components in Kaluza-Klein models', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345322, 'Theory of Single Susceptibility for Near-field Optics Equally Associated with Scalar and Vector Potentials', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345324, 'Spatially Inhomogeneous Population Dynamics: Beyond the Mean Field Approximation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345404, 'Analytical treatment of the wakefields driven by transversely shaped beams in a planar slow-wave structure', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345420, 'On s-injective and injective ray transforms of tensor fields on surfaces', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345425, 'Solar system science with the Wide-Field InfraRed Survey Telescope (WFIRST)', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345439, 'On the use of machine learning algorithms in the measurement of stellar magnetic fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345446, 'Circuit theory in projective space and homogeneous circuit models', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345447, 'Logical Fallacy of using the Electric Field in Non-resonant Near-field Optics', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345450, 'Generalized Lennard-Jones Potentials, SUSYQM and Differential Galois Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345475, 'A general framework for SPDE-based stationary random fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345482, 'Towards the Theory of the Yukawa Potential', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345521, 'One-Shot Optimal Topology Generation through Theory-Driven Machine Learning', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345617, 'In situ Electric Field Skyrmion Creation in Magnetoelectric Cu$_2$OSeO$_3$', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345619, 'Convergence With Probability 1 in the Method of Expansion of Multiple Ito Stochastic Integrals, Based on Multiple Fourier-Legendre Series', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345690, 'Theory of cavity-modified ground-state chemical reactivity', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345748, 'Estimates on Functional Integrals of Quantum Mechanics and Non-Relativistic Quantum Field Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345750, 'Construction of general symplectic field theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345758, 'A non-perturbative field theory approach for the Kondo effect: Emergence of an extra dimension and its implication for the holographic duality conjecture', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345760, 'Visible -Light-Gated Reconfigurable Rotation of Nanomotors in Electric Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345761, 'Energy condition respecting warp drives: The role of spin in Einstein-Cartan theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345856, 'A user model for JND-based video quality assessment: theory and applications', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345936, 'Exact Embeddings of JT Gravity in Strings and M-theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2345951, 'Exact Embeddings of JT Gravity in Strings and M-theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346034, 'Observation of Light Guiding by Artificial Gauge Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346060, 'Improved Fourier restriction estimates in higher dimensions', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346081, 'Calculation of the Cherenkov fields in the cross-section of a short relativistic bunch', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346157, 'Neutrino propagation in an electron background with an inhomogeneous magnetic field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346210, 'Learning Rigidity in Dynamic Scenes with a Moving Camera for 3D Motion Field Estimation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346217, 'Renormalization of Einstein-Maxwell theory at one-loop', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346230, 'Hodge Decomposition of the wall shear stress vector fields characterizing biological flows', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346248, 'SKIFFS: Superconducting Kinetic Inductance Field-Frequency Sensors for Sensitive Magnetometry in Moderate Background Magnetic Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346291, 'Estimating Failure in Brittle Materials using Graph Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346354, 'A new sum-product estimate in prime fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346434, 'Unsupervised Domain Adaptive Re-Identification: Theory and Practice', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346490, 'Levy Differential Operators and Gauge Invariant Equations for Dirac and Higgs Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346501, 'SDSS IV MaNGA: Characterizing Non-Axisymmetric Motions in Galaxy Velocity Fields Using the Radon Transform', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346524, 'Magnetic exchange and susceptibilities in fcc iron: A supercell dynamical mean-field theory study', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346558, 'The Vlasov-Navier-Stokes equations as a mean field limit', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346567, 'A Behavioural Theory for Interactions in Collective-Adaptive Systems', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346570, 'On growth of the set $A(A+1)$ in arbitrary finite fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346603, 'Contributions to Four-Position Theory with Relative Rotations', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346617, 'The Theory of Bonds II: Closed 6R Linkages with Maximal Genus', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346624, 'The Theory of Bonds: A New Method for the Analysis of Linkages', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346629, 'The observed galaxy bispectrum from single-field inflation in the squeezed limit', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346652, 'A Brief History of Algebra with a Focus on the Distributive Law and Semiring Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346682, 'The Theory of Inflation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346692, 'Gapped Boundary Theory of the Twisted Gauge Theory Model of Three-Dimensional Topological Orders', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346704, 'The Proca Field in Curved Spacetimes and its Zero Mass Limit', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346808, 'Krasovskii-Subbotin approach to mean field type differential games', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346823, 'Free field structure of the model with a spin-$\\frac{3}{2}$ Rarita-Schwinger field directly coupled to a spin-$\\frac{1}{2}$ field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346837, 'Percolation for level-sets of Gaussian free fields on metric graphs', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346856, 'Chern class and Riemann-Roch theorem for cohomology theory without homotopy invariance', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346881, 'Quantum and Classical Lyapunov Exponents in Atom-Field Interaction Systems', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346903, 'Mean Field Equilibria for Resource Competition in Spatial Settings', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346916, 'Anisotropic functional Fourier deconvolution with long-memory dependent errors: a minimax study', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2346973, 'A Simple Model for Non-Abelian T-duality and Double Field Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347084, 'Technical design and commissioning of the sensor net for fine meshed measuring of magnetic field at KATRIN Spectrometer', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347121, 'Learning Rigidity in Dynamic Scenes with a Moving Camera for 3D Motion Field Estimation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347137, 'Learning Rigidity in Dynamic Scenes with a Moving Camera for 3D Motion Field Estimation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347156, 'Learning Rigidity in Dynamic Scenes with a Moving Camera for 3D Motion Field Estimation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347195, 'On Some Topological Properties of Fourier Transforms of Regular Holonomic D-Modules', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347214, 'Equivariant Morita-Takeuchi Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347215, 'Intrinsic structural and electronic properties of the Buffer Layer on Silicon Carbide unraveled by Density Functional Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347270, 'Serre-Tate theory for Calabi-Yau varieties', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347280, 'Differential Weil Descent and Differentially Large Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347286, 'Curvature correction to the field emission current', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347287, 'Quench dynamics of the Ising field theory in a magnetic field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347309, 'Quench dynamics of the Ising field theory in a magnetic field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347321, 'Baryons under Strong Magnetic Fields or in Theories with Space-dependent $\\theta$-term', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347331, 'Implementing nonlinear Compton scattering beyond the local constant field approximation', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347362, 'Fractal AI: A fragile theory of intelligence', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347403, 'Direct 3D Tomographic Reconstruction and Phase-Retrieval of Far-Field Coherent Diffraction Patterns', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347415, 'Strong field QED in lepton colliders and electron/laser interactions', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347422, 'Strong field QED in lepton colliders and electron/laser interactions', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347424, 'Topological and Geometric Universal Thermodynamics in Conformal Field Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347430, 'Experimental signatures of the quantum nature of radiation reaction in the field of an ultra-intense laser', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347441, 'Incompatible deformation field and Riemann curvature tensor', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347442, 'The Electromagnetic Field and Radiation Reaction Force for Point Charged Particle with Magnetic Moment', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347454, 'Visibility of Shafarevich-Tate group of abelian varieties over number field extensions', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347487, 'Thermodynamic laws for populations and quantum coherence: A self-contained introduction to the resource theory approach to thermodynamics', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347499, 'Dense Light Field Reconstruction From Sparse Sampling Using Residual Network', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347504, 'A Survey on the Theory of Bonds', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347506, 'Gradient flow approach to local mean-field spin systems', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347534, 'Recovering P(X) from a canonical complex field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347535, 'Energy transfer from space-time into matter and a bouncing inflation from Covariant Canonical Gauge theory of Gravity', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347552, 'Embedding, simulation and consistency of $\\cal PT$ -symmetric quantum Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347571, 'Geometric Langlands Twists of N = 4 Gauge Theory from Derived Algebraic Geometry', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347583, 'Back Reaction of 4D Conformal Fields on Static Black-Hole Geometry', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347587, 'Hopfield Network based Control and Diagnostics System for Accelerators', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347590, 'Gradient flow approach to local mean-field spin systems', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347594, 'Gradient flow approach to local mean-field spin systems', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347618, 'RF amplification property of the MgO-based magnetic tunnel junction using field-induced ferromagnetic resonance', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347621, 'Spintronic Oscillator Based on Magnetic Field Feedback', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347645, 'A resource theory of entanglement with a unique multipartite maximally entangled state', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347648, 'Pluripotential Theory and Convex Bodies: Large Deviation Principle', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347652, 'The Decoupling of $\\bar\\Omega$ in String Theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347663, 'Probing shear-induced rearrangements in Fourier Space. II. Differential Dynamic Microscopy', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347694, 'Partial Fourier--Mukai transform for integrable systems with applications to Hitchin fibration', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347704, 'A theory of single-shot error correction for adversarial noise', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347757, 'A $\\mathrm{U}(2) \\times \\mathrm{U}(3)$ gauge theory extension of the standard model', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347759, 'Who needs category theory?', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347777, 'Combinatorial aspects of the Legendre and Fourier transforms in perturbative quantum field theory', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347794, 'Hopf Galois structures on separable field extensions of odd prime power degree', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347795, 'Initial conditions for nuclear collisions: theory overview', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347836, 'Superconductor in a weak static gravitational field', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347863, 'Strongly Dependent Ordered Abelian Groups and Henselian Fields', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[])),
(2347886, 'On the variability of the solar mean magnetic field: contributions from various magnetic features on the surface of the Sun', User(native_id=12345, email='foo@baz.com', forename='', surname='', suffix='', identifier=None, affiliation='', endorsements=[]))
]
|
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Qubit reset to computational zero.
"""
from .instruction import Instruction
from .instructionset import InstructionSet
from .quantumcircuit import QuantumCircuit
from .quantumregister import QuantumRegister
class Reset(Instruction):
"""Qubit reset."""
def __init__(self, qubit, circ=None):
"""Create new reset instruction."""
super().__init__("reset", [], [qubit], [], circ)
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.reset(self.qargs[0]))
def reset(self, quantum_register):
"""Reset q."""
if isinstance(quantum_register, QuantumRegister):
instructions = InstructionSet()
for sizes in range(quantum_register.size):
instructions.add(self.reset((quantum_register, sizes)))
return instructions
return self._attach(Reset(quantum_register, self))
QuantumCircuit.reset = reset
|
#! /usr/bin/python
try:
import xmlrpclib as xrc
except ImportError:
import xmlrpc.client as xrc
#s = xrc.dumps(("str", 1, True, {"k1": 1, "k2": 2}), "testfunc")
s = xrc.dumps(("<str&#~!>", 1, True), "testfunc")
print(s)
print(xrc.dumps(({},), "testfunc"))
print(xrc.dumps(({"1": "ab", "2": "cd"},), "testfunc"))
svr = xrc.ServerProxy("http://127.0.0.1:2345/rpc")
print(svr.SayHello("12345<>&6"))
print(svr.SayHello2("12345<>&6"))
print(svr.RetStrs("AbCdEf"))
print(svr.RetIntStr("AbCdEf"))
print(svr.RetMapSI("AbCdEf"))
print("RetMapSIF: ", svr.RetMapSIF("AbCdEf"))
print(svr.RetMapSS("AbCdEf"))
print(svr.RetStruct("AbCdEf"))
print(svr.ttt("ttt AbCdEf"))
print(svr.mmm("mmm AbCdEf"))
print(svr.mmm("mmm AbCdEf", 2))
print(svr.mmm("mmm AbCdEf", 12, 3, 4))
print(svr.ddd("ddd AbCdEf", False))
print(svr.ddd("ddd AbCdEf"))
print(svr.rrr("ddd AbCdEf"))
print(svr.rrr("ddd AbCdEf", False))
|
"""empty message
Revision ID: 0058_add_letters_flag
Revises: 0057_change_email_template
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = "0058_add_letters_flag"
down_revision = "0057_change_email_template"
import sqlalchemy as sa
from alembic import op
def upgrade():
op.add_column(
"services",
sa.Column("can_send_letters", sa.Boolean(), nullable=False, server_default=sa.false()),
)
op.add_column(
"services_history",
sa.Column("can_send_letters", sa.Boolean(), nullable=False, server_default=sa.false()),
)
def downgrade():
op.drop_column("services_history", "can_send_letters")
op.drop_column("services", "can_send_letters")
|
from django.db import models
import django
django.setup()
# Create your models here.
class Carouseldata(models.Model):
heading = models.CharField(max_length=30)
text_field = models.CharField(max_length=250)
image = models.FileField(upload_to='uploads/')
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test the fileview interface."""
from grr.gui import runtests_test
from grr.lib import access_control
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class FileViewTestBase(test_lib.GRRSeleniumTest):
pass
class TestFileView(FileViewTestBase):
"""Test the fileview interface."""
def setUp(self):
super(TestFileView, self).setUp()
# Prepare our fixture.
with self.ACLChecksDisabled():
self.CreateFileVersions()
self.GrantClientApproval("C.0000000000000001")
@staticmethod
def CreateFileVersions():
"""Add a new version for a file."""
with test_lib.FakeTime(1333788833):
token = access_control.ACLToken(username="test")
# This file already exists in the fixture, and we overwrite it with a new
# version at 2012-04-07 08:53:53.
fd = aff4.FACTORY.Create(
"aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt",
"AFF4MemoryStream", mode="w", token=token)
fd.Write("Hello World")
fd.Close()
# Create another version of this file at 2012-04-09 16:27:13.
with test_lib.FakeTime(1333988833):
fd = aff4.FACTORY.Create(
"aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt",
"AFF4MemoryStream", mode="w", token=token)
fd.Write("Goodbye World")
fd.Close()
def testFileView(self):
"""Test the fileview interface."""
# This is ugly :( Django gets confused when you import in the wrong order
# though and fileview imports the Django http module so we have to delay
# import until the Django server is properly set up.
# pylint: disable=g-import-not-at-top
from grr.gui.plugins import fileview
# pylint: enable=g-import-not-at-top
# Set up multiple version for an attribute on the client for tests.
with self.ACLChecksDisabled():
for fake_time, hostname in [(1333788833, "HostnameV1"),
(1333888833, "HostnameV2"),
(1333988833, "HostnameV3")]:
with test_lib.FakeTime(fake_time):
client = aff4.FACTORY.Open(u"C.0000000000000001", mode="rw",
token=self.token)
client.Set(client.Schema.HOSTNAME(hostname))
client.Close()
self.Open("/")
self.Type("client_query", "0001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# Go to Browse VFS
self.Click("css=a:contains('Browse Virtual Filesystem')")
# Test the historical view for AFF4 elements.
self.Click("css=*[attribute=HOSTNAME] > ins")
self.WaitUntil(self.AllTextsPresent,
["HostnameV1", "HostnameV2", "HostnameV3"])
self.Click("css=*[attribute=HOSTNAME] > ins")
self.WaitUntilNot(self.IsTextPresent, "HostnameV1")
self.WaitUntilNot(self.IsTextPresent, "HostnameV2")
self.Click("css=#_fs ins.jstree-icon")
self.Click("css=#_fs-os ins.jstree-icon")
self.Click("css=#_fs-os-c ins.jstree-icon")
# Test file versioning.
self.WaitUntil(self.IsElementPresent, "css=#_fs-os-c-Downloads")
self.Click("link=Downloads")
# Verify that we have the latest version in the table by default
self.assertTrue(
"2012-04-09 16:27:13" in self.GetText("css=tr:contains(\"a.txt\")"))
# Click on the row.
self.Click("css=tr:contains(\"a.txt\")")
self.WaitUntilContains("a.txt @ 2012-04-09", self.GetText,
"css=div#main_rightBottomPane h3")
# Check the data in this file.
self.Click("css=#TextView")
self.WaitUntilContains("Goodbye World", self.GetText,
"css=div#text_viewer_data_content")
downloaded_files = []
def FakeDownload(unused_self, request, _):
aff4_path = request.REQ.get("aff4_path")
age = rdfvalue.RDFDatetime(request.REQ.get("age")) or aff4.NEWEST_TIME
downloaded_files.append((aff4_path, age))
return fileview.http.HttpResponse(
content="<script>window.close()</script>")
with utils.Stubber(fileview.DownloadView, "Download", FakeDownload):
# Try to download the file.
self.Click("css=#Download")
self.WaitUntil(self.IsTextPresent, "As downloaded on 2012-04-09 16:27:13")
self.Click("css=button:contains(\"Download\")")
# Click on the version selector.
self.Click("css=tr:contains(\"a.txt\") img.version-selector")
self.WaitUntilContains("Versions of", self.GetText,
"css=.version-selector-dialog h4")
# Select the previous version.
self.Click("css=td:contains(\"2012-04-07\")")
# Now we should have a different time.
self.WaitUntil(self.IsTextPresent, "As downloaded on 2012-04-07 08:53:53")
self.Click("css=button:contains(\"Download\")")
self.WaitUntil(self.IsElementPresent, "css=#TextView")
self.WaitUntil(lambda: len(downloaded_files) == 2)
# Both files should be the same...
self.assertEqual(downloaded_files[0][0],
u"aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt")
self.assertEqual(downloaded_files[1][0],
u"aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt")
# But from different times.
self.assertEqual(downloaded_files[0][1], 1333988833000000)
self.assertEqual(downloaded_files[1][1], 1333788833000000)
self.Click("css=#TextView")
# Make sure the file content has changed. This version has "Hello World" in
# it.
self.WaitUntilContains("Hello World", self.GetText,
"css=div#text_viewer_data_content")
# Some more unicode testing.
self.Click(u"css=tr:contains(\"中.txt\")")
self.Click("css=#Download")
self.WaitUntil(self.IsTextPresent, u"fs/os/c/Downloads/中国新闻网新闻中.txt")
# Test the hex viewer.
self.Click("css=#_fs-os-proc ins.jstree-icon")
self.Click("css=#_fs-os-proc-10 a")
self.Click("css=span[type=subject]:contains(\"cmdline\")")
target_aff4_path = "aff4:/C.0000000000000001/fs/os/proc/10/cmdline"
self.Click("css=[state-aff4_path='%s'] > li > #HexView" % target_aff4_path)
for i, value in enumerate(
"6c 73 00 68 65 6c 6c 6f 20 77 6f 72 6c 64 27 00 2d 6c".split(" ")):
self.WaitUntilEqual(value, self.GetText,
"css=#hex_area tr:first td:nth(%d)" % i)
for i, value in enumerate(
"l s . h e l l o w o r l d ' . - l".split(" ")):
self.WaitUntilEqual(value, self.GetText,
"css=#data_area tr:first td:nth(%d)" % i)
self.Click("css=a[renderer=\"AFF4Stats\"]")
# Navigate to the bin C.0000000000000001 directory
self.Click("link=bin C.0000000000000001")
# Filter the table for bash (should match both bash and rbash)
self.WaitUntil(self.IsElementPresent, "css=td:contains('bash')")
self.Click("css=th:contains('Name') img")
self.Type("css=.sort-dialog input[type=text]", "bash", end_with_enter=True)
self.WaitUntilEqual("rbash", self.GetText, "css=tr:nth(2) span")
self.assertEqual(
2, self.GetCssCount("css=#main_rightTopPane tbody > tr"))
self.assertEqual("bash", self.GetText("css=tr:nth(1) span"))
self.assertEqual("rbash", self.GetText("css=tr:nth(2) span"))
# Check that the previous search test is still available in the form.
self.Click("css=th:contains('Name') img")
self.assertEqual("bash", self.GetValue("css=.sort-dialog input"))
# If we anchor cat at the start should only filter one.
self.Type("css=.sort-dialog input[type=text]", "^cat", end_with_enter=True)
self.WaitUntilEqual("cat", self.GetText, "css=tr:nth(1) span")
self.assertEqual(
1, self.GetCssCount("css=#main_rightTopPane tbody > tr"))
self.Click("css=tr:nth(1)")
self.WaitUntilContains(
"aff4:/C.0000000000000001/fs/os/c/bin C.0000000000000001/cat",
self.GetText, "css=.tab-content h3")
self.WaitUntil(self.IsTextPresent, "1026267") # st_inode.
# Lets download it.
self.Click("Download")
self.Click("css=button:contains(\"Get a new Version\")")
self.Click("path_0")
self.WaitUntilEqual("fs", self.GetText, "css=tr td span:contains(fs)")
self.Click("Stats")
self.WaitUntilContains(
"aff4:/C.0000000000000001", self.GetText, "css=.tab-content h3")
# Grab the root directory again - should produce an Interrogate flow.
self.Click("css=button[id^=refresh]")
# Go to the flow management screen.
self.Click("css=a:contains('Manage launched flows')")
# For the client update, 2 flows have to be issued: UpdateVFSFile and
# Interrogate. UpdateVFSFile triggers VFSGRRClient.Update() method which
# triggers Interrogate.
self.WaitUntilEqual("Interrogate", self.GetText,
"//table/tbody/tr[1]/td[3]")
self.WaitUntilEqual("UpdateVFSFile", self.GetText,
"//table/tbody/tr[2]/td[3]")
self.Click("//table/tbody/tr[2]/td[3]")
self.WaitUntilEqual(
"aff4:/C.0000000000000001", self.GetText,
"css=table > tbody td.proto_key:contains(\"Vfs file urn\") "
"~ td.proto_value")
# Check that UpdateVFSFile is called for the cat file.
# During the test this file is VFSMemoryFile, so its' Update method does
# nothing, therefore UpdateVFSFile won't issue any other flows.
self.WaitUntilEqual("UpdateVFSFile", self.GetText,
"//table/tbody/tr[3]/td[3]")
self.Click("//table/tbody/tr[3]/td[3]")
self.WaitUntilContains(
"cat", self.GetText,
"css=table > tbody td.proto_key:contains(\"Vfs file urn\") "
"~ td.proto_value")
def testExportToolHintIsDisplayed(self):
self.Open("/#c=C.0000000000000001&main=VirtualFileSystemView")
self.Click("css=li[path='/fs'] > a")
self.Click("css=li[path='/fs/os'] > a")
self.Click("css=li[path='/fs/os/c'] > a")
self.Click("css=li[path='/fs/os/c/Downloads'] > a")
# Click on the row and on the Download tab.
self.Click("css=tr:contains(\"a.txt\")")
self.Click("css=#Download")
# Check that export tool download hint is displayed.
self.WaitUntil(
self.IsTextPresent, "/usr/bin/grr_export "
"--username test --reason 'Running tests' file --path "
"aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt --output .")
def testUpdateButton(self):
self.Open("/")
self.Type("client_query", "0001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# Go to Browse VFS
self.Click("css=a:contains('Browse Virtual Filesystem')")
self.Click("css=#_fs ins.jstree-icon")
self.Click("css=#_fs-os ins.jstree-icon")
self.Click("link=c")
# Ensure that refresh button is enabled
self.WaitUntilNot(self.IsElementPresent,
"css=button[id^=refresh][disabled]")
# Grab the root directory again - should produce an Interrogate flow.
self.Click("css=button[id^=refresh]")
# Check that the button got disabled
self.WaitUntil(self.IsElementPresent,
"css=button[id^=refresh][disabled]")
# Get the flows that should have been started and finish them.
with self.ACLChecksDisabled():
client_id = rdfvalue.ClientURN("C.0000000000000001")
fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(fd.ListChildren())
client_mock = action_mocks.ActionMock()
for flow_urn in flows:
for _ in test_lib.TestFlowHelper(
flow_urn, client_mock, client_id=client_id, token=self.token,
check_flow_errors=False):
pass
# Ensure that refresh button is enabled again.
#
# TODO(user): ideally, we should also check that something got
# updated, not only that button got enabled back.
self.WaitUntilNot(self.IsElementPresent,
"css=button[id^=refresh][disabled]")
def testRecursiveListDirectory(self):
"""Tests that Recursive Refresh button triggers correct flow."""
self.Open("/")
self.Type("client_query", "0001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# Go to Browse VFS
self.Click("css=a:contains('Browse Virtual Filesystem')")
self.Click("css=#_fs ins.jstree-icon")
self.Click("css=#_fs-os ins.jstree-icon")
self.Click("link=c")
# Perform recursive refresh
self.Click("css=button[id^=recursive_refresh]")
self.WaitUntil(self.IsTextPresent, "Recursive Refresh")
self.WaitUntil(self.IsTextPresent, "Max depth")
self.Type("css=input[id=v_-max_depth]", "423")
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Refresh started successfully!")
self.Click("css=button[name=Cancel]")
# Go to "Manage Flows" tab and check that RecursiveListDirectory flow has
# been created.
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=td:contains('RecursiveListDirectory')")
self.WaitUntil(self.IsElementPresent,
"css=.tab-content td.proto_value:contains('/c')")
self.WaitUntil(self.IsElementPresent,
"css=.tab-content td.proto_value:contains(423)")
def testFileViewHasResultsTabForRDFValueCollection(self):
collection_urn = "aff4:/C.0000000000000001/analysis/SomeFlow/results"
with self.ACLChecksDisabled():
with aff4.FACTORY.Create(
collection_urn, "RDFValueCollection", token=self.token) as fd:
fd.Add(rdfvalue.StatEntry(aff4path="aff4:/some/unique/path"))
self.GrantClientApproval("C.0000000000000001")
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Browse Virtual Filesystem')")
self.Click("css=li[path='/analysis'] > a")
self.Click("css=li[path='/analysis/SomeFlow'] > a")
self.Click("css=tr:contains('results')")
# The Results tab should appear and there should be no HexView and TextView
# and Download tabs.
self.WaitUntil(self.IsElementPresent, "css=#Results")
self.WaitUntilNot(self.IsElementPresent, "css=#DownloadView")
self.WaitUntilNot(self.IsElementPresent, "css=#FileTextViewer")
self.WaitUntilNot(self.IsElementPresent, "css=#FileHexViewer")
# Click on the Results tab and check that the StatEntry we added before is
# there.
self.Click("css=#Results")
self.WaitUntil(self.IsTextPresent, "aff4:/some/unique/path")
def testFileViewDoesNotHaveExportTabWhenCollectionHasNoFiles(self):
collection_urn = "aff4:/C.0000000000000001/analysis/SomeFlow/results"
with self.ACLChecksDisabled():
with aff4.FACTORY.Create(
collection_urn, "RDFValueCollection", token=self.token) as fd:
fd.Add(rdfvalue.NetworkConnection(pid=42))
self.GrantClientApproval("C.0000000000000001")
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Browse Virtual Filesystem')")
self.Click("css=li[path='/analysis'] > a")
self.Click("css=li[path='/analysis/SomeFlow'] > a")
self.Click("css=tr:contains('results')")
# The Results tab should appear, but the "Export" tab should be
# disabled since we only display export hint when we have collections of
# StatEntries or FileFinderResults.
self.WaitUntil(self.IsElementPresent, "css=#Export.disabled")
def CheckExportTabIsPresent(self):
self.Open("/#c=C.0000000000000001")
self.Click("css=a:contains('Browse Virtual Filesystem')")
self.Click("css=li[path='/analysis'] > a")
self.Click("css=li[path='/analysis/SomeFlow'] > a")
self.Click("css=tr:contains('results')")
# 'Export' tab should be there, since we're dealing with StatEntries.
self.Click("css=#Export")
self.WaitUntil(self.IsTextPresent,
"--username test --reason 'Running tests' collection_files "
"--path aff4:/C.0000000000000001/analysis/SomeFlow/results")
def testFileViewHasExportTabWhenCollectionHasStatEntries(self):
collection_urn = "aff4:/C.0000000000000001/analysis/SomeFlow/results"
with self.ACLChecksDisabled():
with aff4.FACTORY.Create(
collection_urn, "RDFValueCollection", token=self.token) as fd:
fd.Add(rdfvalue.StatEntry(aff4path="aff4:/some/unique/path"))
self.GrantClientApproval("C.0000000000000001")
self.CheckExportTabIsPresent()
def testFileViewHasExportTabWhenCollectionHasFileFinderResults(self):
collection_urn = "aff4:/C.0000000000000001/analysis/SomeFlow/results"
with self.ACLChecksDisabled():
with aff4.FACTORY.Create(
collection_urn, "RDFValueCollection", token=self.token) as fd:
fd.Add(rdfvalue.FileFinderResult(
stat_entry=rdfvalue.StatEntry(aff4path="aff4:/some/unique/path")))
self.GrantClientApproval("C.0000000000000001")
self.CheckExportTabIsPresent()
def testDoubleClickGoesInsideDirectory(self):
"""Tests that double click in FileTable goes inside the directory."""
self.Open("/")
self.Type("client_query", "0001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1 and go to 'Browse Virtual Filesystem'
self.Click("css=td:contains('0001')")
self.Click("css=a:contains('Browse Virtual Filesystem')")
# Now click on "/fs" inside the table. Tree shouldn't get updated,
# so click on "/registry".
self.Click("css=td:contains('/fs')")
self.Click("css=td:contains('/registry')")
# Now double click on "/fs".
self.DoubleClick("css=td:contains('/fs')")
# Now we should be inside the folder, and the tree should open.
self.WaitUntil(self.IsElementPresent,
"css=#_fs-os ins.jstree-icon")
# Check that breadcrumbs got updated.
self.WaitUntil(self.IsElementPresent,
"css=#main_rightTopPane .breadcrumb li:contains('fs')")
class TestHostInformation(FileViewTestBase):
"""Test the host information interface."""
def setUp(self):
super(TestHostInformation, self).setUp()
self.client_id = "C.0000000000000001"
with self.ACLChecksDisabled():
self.GrantClientApproval(self.client_id)
with aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token) as fd:
fd.Set(fd.Schema.USER, rdfvalue.Users())
def testClickingOnPlusOpensHistoricalAttributes(self):
"""Test the fileview interface."""
self.Open("/#c=" + self.client_id)
self.WaitUntil(self.IsTextPresent, "VFSGRRClient")
# We removed all the users, so no 'Steve O'Bryan' should be visible.
self.WaitUntilNot(self.IsTextPresent, "Steve O'Bryan")
# We click on '+' in USER cell and should see historical values of the
# USER attribute. "Steve O'Bryan" was full name of a user that we've
# deleted.
self.Click("css=td.attribute_opener[attribute=USER]")
self.WaitUntil(self.IsTextPresent, "Steve O'Bryan")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
import numpy as np
import pandas as pd
from libreco.data import random_split, DatasetPure
from libreco.algorithms import SVDpp # pure data, algorithm SVD++
# remove unnecessary tensorflow logging
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["KMP_WARNINGS"] = "FALSE"
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
if __name__ == "__main__":
data = pd.read_csv("sample_data/sample_movielens_rating.dat", sep="::",
names=["user", "item", "label", "time"])
# split whole data into three folds for training, evaluating and testing
train_data, eval_data, test_data = random_split(
data, multi_ratios=[0.8, 0.1, 0.1])
train_data, data_info = DatasetPure.build_trainset(train_data)
eval_data = DatasetPure.build_testset(eval_data)
test_data = DatasetPure.build_testset(test_data)
print(data_info) # n_users: 5894, n_items: 3253, data sparsity: 0.4172 %
svdpp = SVDpp(task="rating", data_info=data_info, embed_size=16,
n_epochs=3, lr=0.001, reg=None, batch_size=256)
# monitor metrics on eval_data during training
svdpp.fit(train_data, verbose=2, eval_data=eval_data,
metrics=["rmse", "mae", "r2"])
# do final evaluation on test data
svdpp.evaluate(test_data, metrics=["rmse", "mae"])
# predict preference of user 1 to item 2333
print("prediction: ", svdpp.predict(user=1, item=2333))
# recommend 7 items for user 1
print("recommendation: ", svdpp.recommend_user(user=1, n_rec=7))
|
from django.conf import settings
from django.http import HttpResponse, Http404
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils import translation
from django.views.generic.edit import CreateView
from django.views.generic.detail import BaseDetailView
from django.contrib.auth.decorators import login_required
from djgeojson.views import GeoJSONLayerView
from mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList, MapEntityFormat,
MapEntityDetail, MapEntityMapImage, MapEntityDocument, MapEntityCreate, MapEntityUpdate, MapEntityDelete,
LastModifiedMixin, JSONResponseMixin, DocumentConvert)
from mapentity.serializers import plain_text
from mapentity.helpers import alphabet_enumeration
from paperclip.models import Attachment
from geotrek.core.views import CreateFromTopologyMixin
from geotrek.core.models import AltimetryMixin
from geotrek.common.views import FormsetMixin
from geotrek.zoning.models import District, City, RestrictedArea
from .models import Trek, POI, WebLink
from .filters import TrekFilterSet, POIFilterSet
from .forms import TrekForm, TrekRelationshipFormSet, POIForm, WebLinkCreateFormPopup
from .serializers import TrekGPXSerializer
class FlattenPicturesMixin(object):
def get_template_names(self):
""" Due to bug in Django, providing get_queryset() method hides
template_names lookup.
https://code.djangoproject.com/ticket/17484
"""
opts = self.get_model()._meta
extra = ["%s/%s%s.html" % (opts.app_label, opts.object_name.lower(), self.template_name_suffix)]
return extra + super(FlattenPicturesMixin, self).get_template_names()
def get_queryset(self):
""" Override queryset to avoid attachment lookup while serializing.
It will fetch attachments, and force ``pictures`` attribute of instances.
"""
app_label = self.get_model()._meta.app_label
model_name = self.get_model()._meta.object_name.lower()
attachments = Attachment.objects.filter(content_type__app_label=app_label,
content_type__model=model_name)
pictures = {}
for attachment in attachments:
if attachment.is_image:
obj_id = attachment.object_id
pictures.setdefault(obj_id, []).append(attachment)
for obj in super(FlattenPicturesMixin, self).get_queryset():
obj.pictures = pictures.get(obj.id, [])
yield obj
class TrekLayer(MapEntityLayer):
properties = ['name', 'published']
queryset = Trek.objects.existing()
class TrekList(FlattenPicturesMixin, MapEntityList):
queryset = Trek.objects.existing()
filterform = TrekFilterSet
columns = ['id', 'name', 'duration', 'difficulty', 'departure', 'thumbnail']
class TrekJsonList(MapEntityJsonList, TrekList):
pass
class TrekJsonDetail(LastModifiedMixin, JSONResponseMixin, BaseDetailView):
queryset = Trek.objects.existing()
columns = ['name', 'slug', 'departure', 'arrival', 'duration', 'duration_pretty', 'description',
'description_teaser'] + AltimetryMixin.COLUMNS + ['published', 'published_status',
'networks', 'advice', 'ambiance', 'difficulty',
'information_desks', 'information_desk', # singular: retro-compat
'themes', 'usages', 'access', 'route', 'public_transport', 'advised_parking',
'web_links', 'is_park_centered', 'disabled_infrastructure',
'parking_location', 'thumbnail', 'pictures',
'cities', 'districts', 'relationships', 'map_image_url',
'elevation_area_url', 'points_reference']
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrekJsonDetail, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
ctx = {}
for fname in self.columns:
ctx[fname] = getattr(self.object, 'serializable_%s' % fname,
getattr(self.object, fname))
trek = self.get_object()
ctx['altimetric_profile'] = reverse('trekking:trek_profile', args=(trek.pk,))
ctx['poi_layer'] = reverse('trekking:trek_poi_geojson', args=(trek.pk,))
ctx['information_desk_layer'] = reverse('trekking:trek_information_desk_geojson', args=(trek.pk,))
ctx['filelist_url'] = reverse('get_attachments',
kwargs={'app_label': 'trekking',
'module_name': 'trek',
'pk': trek.pk})
ctx['gpx'] = reverse('trekking:trek_gpx_detail', args=(trek.pk,))
ctx['kml'] = reverse('trekking:trek_kml_detail', args=(trek.pk,))
ctx['printable'] = reverse('trekking:trek_printable', args=(trek.pk,))
return ctx
class TrekFormatList(MapEntityFormat, TrekList):
columns = set(TrekList.columns + TrekJsonDetail.columns + ['related', 'pois']) - set(['relationships', 'thumbnail', 'map_image_url', 'slug'])
class TrekGPXDetail(LastModifiedMixin, BaseDetailView):
queryset = Trek.objects.existing()
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrekGPXDetail, self).dispatch(*args, **kwargs)
def render_to_response(self, context):
gpx_serializer = TrekGPXSerializer()
response = HttpResponse(mimetype='application/gpx+xml')
response['Content-Disposition'] = 'attachment; filename=trek-%s.gpx' % self.get_object().pk
gpx_serializer.serialize([self.get_object()], stream=response, geom_field='geom')
return response
class TrekKMLDetail(LastModifiedMixin, BaseDetailView):
queryset = Trek.objects.existing()
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrekKMLDetail, self).dispatch(*args, **kwargs)
def render_to_response(self, context):
trek = self.get_object()
response = HttpResponse(trek.kml(),
content_type='application/vnd.google-earth.kml+xml')
return response
class TrekPOIGeoJSON(LastModifiedMixin, GeoJSONLayerView):
model = Trek # for LastModifiedMixin
srid = settings.API_SRID
pk_url_kwarg = 'pk'
properties = {'pk':'pk', 'name':'name', 'description':'description',
'max_elevation':'elevation', 'serializable_thumbnail':'thumbnail',
'serializable_type':'type', 'serializable_pictures':'pictures'}
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrekPOIGeoJSON, self).dispatch(*args, **kwargs)
def get_queryset(self):
try:
trek_pk = self.kwargs.get(self.pk_url_kwarg)
trek = Trek.objects.get(pk=trek_pk)
except Trek.DoesNotExist:
raise Http404
# All POIs of this trek
return trek.pois.select_related('type')
class TrekInformationDeskGeoJSON(LastModifiedMixin, GeoJSONLayerView):
model = Trek
srid = settings.API_SRID
pk_url_kwarg = 'pk'
properties = ['id', 'name', 'description', 'photo_url', 'phone',
'email', 'website', 'street', 'postal_code', 'municipality',
'latitude', 'longitude']
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrekInformationDeskGeoJSON, self).dispatch(*args, **kwargs)
def get_queryset(self):
try:
trek_pk = self.kwargs.get(self.pk_url_kwarg)
trek = Trek.objects.get(pk=trek_pk)
except Trek.DoesNotExist:
raise Http404
return trek.information_desks.all()
class TrekDetail(MapEntityDetail):
queryset = Trek.objects.existing()
@property
def icon_sizes(self):
return {
'POI': settings.TREK_ICON_SIZE_POI,
'parking': settings.TREK_ICON_SIZE_PARKING,
'information_desk': settings.TREK_ICON_SIZE_INFORMATION_DESK
}
def dispatch(self, *args, **kwargs):
lang = self.request.GET.get('lang')
if lang:
translation.activate(lang)
self.request.LANGUAGE_CODE = lang
return super(TrekDetail, self).dispatch(*args, **kwargs)
class TrekMapImage(MapEntityMapImage):
model = Trek
class TrekDocument(MapEntityDocument):
model = Trek
class TrekDocumentPublic(TrekDocument):
template_name_suffix = "_public"
def get_context_data(self, **kwargs):
context = super(TrekDocumentPublic, self).get_context_data(**kwargs)
trek = self.get_object()
context['object'] = trek
context['trek'] = trek
context['mapimage_ratio'] = settings.TREK_EXPORT_MAP_IMAGE_SIZE
context['headerimage_ratio'] = settings.TREK_EXPORT_HEADER_IMAGE_SIZE
information_desks = list(trek.information_desks.all())
if settings.TREK_EXPORT_INFORMATION_DESK_LIST_LIMIT > 0:
information_desks = information_desks[:settings.TREK_EXPORT_INFORMATION_DESK_LIST_LIMIT]
context['information_desks'] = information_desks
pois = list(trek.pois)
if settings.TREK_EXPORT_POI_LIST_LIMIT > 0:
pois = pois[:settings.TREK_EXPORT_POI_LIST_LIMIT]
context['pois'] = pois
# Replace HTML text with plain text
for attr in ['description', 'description_teaser', 'ambiance', 'advice', 'access',
'public_transport', 'advised_parking', 'disabled_infrastructure']:
setattr(trek, attr, plain_text(getattr(trek, attr)))
for poi in context['pois']:
setattr(poi, 'description', plain_text(getattr(poi, 'description')))
#
# POIs enumeration, like shown on the map
# https://github.com/makinacorpus/Geotrek/issues/871
enumeration = {}
letters = alphabet_enumeration(len(trek.pois))
for i, p in enumerate(trek.pois):
enumeration[p.pk] = letters[i]
context['enumeration'] = enumeration
return context
def render_to_response(self, context, **response_kwargs):
trek = self.get_object()
# Use attachment that overrides document print, if any.
try:
overriden = trek.get_attachment_print()
response = HttpResponse(mimetype='application/vnd.oasis.opendocument.text')
with open(overriden, 'rb') as f:
response.write(f.read())
return response
except ObjectDoesNotExist:
pass
# Prepare altimetric graph
trek.prepare_elevation_chart(self.request.build_absolute_uri('/'))
return super(TrekDocumentPublic, self).render_to_response(context, **response_kwargs)
class TrekPrint(DocumentConvert):
queryset = Trek.objects.existing()
def source_url(self):
return self.get_object().get_document_public_url()
class TrekRelationshipFormsetMixin(FormsetMixin):
context_name = 'relationship_formset'
formset_class = TrekRelationshipFormSet
class TrekCreate(TrekRelationshipFormsetMixin, CreateFromTopologyMixin, MapEntityCreate):
model = Trek
form_class = TrekForm
class TrekUpdate(TrekRelationshipFormsetMixin, MapEntityUpdate):
queryset = Trek.objects.existing()
form_class = TrekForm
class TrekDelete(MapEntityDelete):
model = Trek
class POILayer(MapEntityLayer):
queryset = POI.objects.existing()
properties = ['name']
class POIList(FlattenPicturesMixin, MapEntityList):
queryset = POI.objects.existing()
filterform = POIFilterSet
columns = ['id', 'name', 'type', 'thumbnail']
class POIJsonList(MapEntityJsonList, POIList):
pass
class POIFormatList(MapEntityFormat, POIList):
columns = set(POIList.columns + ['description', 'treks', 'districts', 'cities', 'areas'])
def get_queryset(self):
qs = super(POIFormatList, self).get_queryset()
denormalized = {}
# Since Land layers should have less records, start by them.
land_layers = [('districts', District),
('cities', City),
('areas', RestrictedArea)]
for attrname, land_layer in land_layers:
denormalized[attrname] = {}
for d in land_layer.objects.all():
overlapping = POI.objects.existing().filter(geom__within=d.geom)
for pid in overlapping.values_list('id', flat=True):
denormalized[attrname].setdefault(pid, []).append(d)
# Same for treks
denormalized['treks'] = {}
for d in Trek.objects.existing():
for pid in d.pois.all():
denormalized['treks'].setdefault(pid, []).append(d)
for poi in qs:
# Put denormalized in specific attribute used in serializers
for attrname in denormalized.keys():
overlapping = denormalized[attrname].get(poi.id, [])
setattr(poi, '%s_csv_display' % attrname, overlapping)
yield poi
class POIDetail(MapEntityDetail):
queryset = POI.objects.existing()
class POIDocument(MapEntityDocument):
model = POI
class POICreate(MapEntityCreate):
model = POI
form_class = POIForm
class POIUpdate(MapEntityUpdate):
queryset = POI.objects.existing()
form_class = POIForm
class POIDelete(MapEntityDelete):
model = POI
class WebLinkCreatePopup(CreateView):
model = WebLink
form_class = WebLinkCreateFormPopup
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(WebLinkCreatePopup, self).dispatch(*args, **kwargs)
def form_valid(self, form):
self.object = form.save()
return HttpResponse("""
<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script>
""" % (escape(form.instance._get_pk_val()), escape(form.instance)))
|
"""
This file contains the code for the task that gets details of an IPO.
"""
import json
from core.constants import GREET_MESSAGE, REDIS_HASHES, DATA_STR, V1_DATA_STR, PAYMENTS_LINK, INFO_MESSAGE, CREATORS_LINK_1, CREATORS_LINK_2
from redis_conf import RedisConf
from scrapers.mybot import MyBot
def fetch_ipo_details():
# command description used in the "help" command
commands = {
'/start': 'Get used to the bot\n',
'/help': 'Lookup available commands \n',
'/list': 'List all IPOs\n',
'/list_v1': 'List IPOs which do not have an RHP doc\n',
'/rhp': 'Use this command along with the company name. For example, /rhp zomato. You will receive the RHP '
'documents related to that IPO. \n',
'/donate': 'Donate 100 rupees if you like this service, it aids in paying for cloud services.\n',
'/contribute': 'Contribute to this project!\n',
'/contact': 'Contact information for feedback and queries.\n',
'/info': 'About the creators.'
}
# start the bot
bot = MyBot.create_bot()
redis_client = RedisConf.create_connection_to_redis_server(True)
@bot.message_handler(commands=['start'])
def send_welcome(message):
print('✅ Received command from {}'.format(message.chat.id))
# Checking if new user or existing user ,data stored in Redis
if RedisConf.check_if_exists(redis_client, str(message.chat.id), REDIS_HASHES['users']) == 1:
bot.send_message(message.chat.id, GREET_MESSAGE)
bot.send_message(message.chat.id, "This is your first time using this bot!")
bot.send_message(message.chat.id, '🖊 If you would like to see all IPOs, run /list')
RedisConf.store_in_redis(redis_client, str(message.chat.id), str(message.chat.id), REDIS_HASHES['users'])
command_help(message)
else:
print('{} is an existing user!'.format(message.chat.id))
bot.send_message(message.chat.id, '✋✋ Welcome Back! \n')
bot.send_message(message.chat.id, "To view commands, run " + '/help')
bot.send_message(message.chat.id, '🖊 If you would like to see current and upcoming IPOs, run /list')
# bot.send_message(message.chat.id, IPOScraper.ipo_scraper())
# this gives you the data.
# Help
@bot.message_handler(commands=['help'])
def command_help(m):
print('✅ Received command from {}'.format(m.chat.id))
cid = m.chat.id
bot.send_message(cid, '📚 Welcome to bot help! You will find everything you need to get started over here! ')
help_text = "🖊 The following commands are available: \n\n"
for key in commands: # generate help text out of the commands dictionary defined at the top
help_text += key + ": "
help_text += commands[key] + "\n"
bot.send_message(cid, help_text) # send the generated help page
# DOC Sharing handler
# Should return link for red herring or zip file itself (approx 7mb)
# def doc_req takes a message like so: Docs Zomato instead of /docs ...
def doc_request(message):
request = message.text.split()
if len(request) < 2 and request[0].lower() not in "RHP":
return False
else:
return True
# if message type by use is Docs <<comany name>> then send_docs runs
@bot.message_handler(func=doc_request)
def send_docs(message):
##connect to redis hash
print('✅ Received command for RHP from {}'.format(message.chat.id))
response, data = RedisConf.read_from_redis(r_client=redis_client, hash_name=REDIS_HASHES['ipo_details_v2'])
if response == 1:
print('❌ Cannot fetch RHP details from redis')
return
if not data:
print('❌ Cannot fetch RHP details from redis')
return
request = message.text.split()[1:][0]
# logic for getting links to red-herring prospectus should return no document if not available
# dummy message for testing
found = False
for item in data:
company_name_list = [word.lower() for word in item['Issuer Company'].split()]
if request in company_name_list or request == item['Issuer Company']:
found = True
try:
val = item['Red Herring Prospectus']
bot.send_message(message.chat.id, val[2:-2])
except Exception as e:
bot.send_message(message.chat.id, '❌ Could not find RHP details for this company.')
print(e)
else:
continue
if not found:
print('❌ Could not find company.')
# if RedisConf.check_if_exists(redis_client, request, 'IPO_DETAILS_V2') == 0:
# print('❌ Could not find company. ')
# for key in data:
# if key['Issuer Company'] == request:
# bot.send_message(message.chat.id, key['Red Herring Prospectus'][1])
#
# else:
# bot.send_message(message.chat.id, '❌ Could not find RH Prospectus for {}'.format(request))
# else:
# bot.send_message(message.chat.id, '❌ Please enter a valid company name (Full as stated in \list): ')
# if we can send doc then we use bot.send_document else just a link
# Subscriptions to IPO
def sub_ipo(message):
request = message.text.split()
if len(request) < 2 and request[0].lower() not in "Docs":
return False
else:
return True
# if message type by use is Docs <<comany name>> then send_docs runs
@bot.message_handler(func=sub_ipo)
def add_ipo(message):
request = message.text.split()[1]
# logic
# dummy message for testing
bot.send_message(message.chat.id,
f"you have subscribed to {request}") + "\n You will now recieve notifcations when events " \
"take place "
# if we cant do subscription then atleast we need to show the timeline for that IPO ,ill look into it"
@bot.message_handler(commands=['Subscriptions'])
def command_show_subscriptions(m):
cid = m.chat.id
# for cid key in redis display all subscriptions
bot.send_message(cid, "Your subscriptions are:")
@bot.message_handler(commands=['notify'])
def notify(message):
print('✅ Received command from {}'.format(message.chat.id))
message_id = message.chat.id
if RedisConf.check_if_exists(redis_client, str(message_id), REDIS_HASHES['notifications']) == 0:
bot.send_message(message_id, '❗ You have already opted for notifications! You will get an update whenever '
'there is one. ')
elif RedisConf.check_if_exists(redis_client, str(message_id), REDIS_HASHES['users']) == 0:
RedisConf.store_in_redis(redis_client, str(message_id), str(message_id), REDIS_HASHES['notifications'])
bot.send_message(message_id,
'Congratulations! 👏 You will now be notified whenever a new IPO is available!')
@bot.message_handler(commands=['list'])
def ipo_list(message):
print('✅ Received command from {}'.format(message.chat.id))
response, data = RedisConf.read_from_redis(r_client=redis_client, hash_name=REDIS_HASHES['ipo_details_v2'])
if response == 1:
print('❌ Cannot fetch details from redis')
return
if not data:
print('❌ Cannot fetch details from redis')
return
for i in range(len(data)):
item = data[i]
data_str = DATA_STR.format(
item['Issuer Company'],
item['Open'],
item['Close'],
item['Lot Size'],
item['Issue Price'],
item['Cost of 1 lot'],
# item['Red Herring Prospectus']
)
bot.send_message(message.chat.id, data_str)
@bot.message_handler(commands=['contribute'])
def contribute(message):
print('✅ Received command from {}'.format(message.chat.id))
bot.send_message(message.chat.id, 'If you would like to contribute to this project, please visit this link: '
'https://github.com/aaditya2200/IPO-proj')
bot.send_message(message.chat.id, 'If there is anything we can change, let us know by sending an email. You '
'can find contact info on GitHub. 📧📨')
@bot.message_handler(commands=['donate'])
def donate(message):
bot.send_message(message.chat.id, '💰 You can donate an amount of 100 rupees at the below link. If you would like to donate an '
'amount lesser or lower, please contact us. See /contact for more.')
bot.send_message(message.chat.id, PAYMENTS_LINK)
@bot.message_handler(commands=['contact'])
def contact(message):
bot.send_message(message.chat.id, 'mailto:oneipo941@gmail.com')
@bot.message_handler(commands=['list_v1'])
def list_all(message):
print('✅ Received command from {}'.format(message.chat.id))
response, data = RedisConf.read_from_redis(r_client=redis_client, hash_name=REDIS_HASHES['current_ipo_details'])
if response == 1:
print('❌ Cannot fetch details from redis')
return
if not data:
print('❌ Cannot fetch details from redis')
return
for i in range(len(data)):
item = data[i]
data_str = V1_DATA_STR.format(
item['Issuer Company'],
item['Exchange'],
item['Open'],
item['Close'],
item['Lot Size'],
item['Issue Price (Rs)'],
item['Issue Price (Rs. Cr.)']
)
bot.send_message(message.chat.id, data_str)
@bot.message_handler(commands=['info'])
def info(message):
bot.send_message(message.chat.id, INFO_MESSAGE)
bot.send_message(message.chat.id, CREATORS_LINK_1)
bot.send_message(message.chat.id, CREATORS_LINK_2)
print('👂 Listening for messages')
bot.polling()
print('\n✅ Successfully completed the task.')
return
|
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import lxml.etree as _xml
import qiime2.sdk as _sdk
import q2galaxy.core.util as _util
import q2galaxy.core.templaters as _templaters
import q2galaxy.core.environment as _environment
import q2galaxy.core.usage as _usage
from q2galaxy.api.usage import GalaxyRSTInstructionsUsage
__all__ = ['template_action_iter', 'template_plugin_iter',
'template_builtins_iter', 'template_all_iter', 'template_action',
'template_plugin', 'template_builtins', 'template_all',
'GalaxyRSTInstructionsUsage', 'template_tool_conf']
def _template_dir_iter(directory):
if not os.path.exists(directory):
os.mkdir(directory)
yield {'status': 'created', 'type': 'directory', 'path': directory}
def _template_tool_iter(tool, path):
is_existing = os.path.exists(path)
_util.write_tool(tool, path)
if not is_existing:
yield {'status': 'created', 'type': 'file', 'path': path}
else:
yield {'status': 'updated', 'type': 'file', 'path': path}
def template_action_iter(plugin, action, directory):
meta = _environment.find_conda_meta()
filename = _templaters.make_tool_id(plugin.id, action.id) + '.xml'
filepath = os.path.join(directory, filename)
test_dir = os.path.join(directory, 'test-data', '')
tool = _templaters.make_tool(meta, plugin, action)
yield from _template_tool_iter(tool, filepath)
yield from _template_dir_iter(test_dir)
yield from _usage.collect_test_data(action, test_dir)
def template_plugin_iter(plugin, directory):
suite_name = f'suite_qiime2_{plugin.id.replace("_", "-")}'
suite_dir = os.path.join(directory, suite_name, '')
if plugin.actions:
yield from _template_dir_iter(suite_dir)
for action in plugin.actions.values():
yield from template_action_iter(plugin, action, suite_dir)
def template_builtins_iter(directory):
meta = _environment.find_conda_meta()
suite_name = 'suite_qiime2_tools'
suite_dir = os.path.join(directory, suite_name, '')
yield from _template_dir_iter(suite_dir)
for tool_id, tool_maker in _templaters.BUILTIN_MAKERS.items():
path = os.path.join(suite_dir, tool_id + '.xml')
tool = tool_maker(meta, tool_id)
yield from _template_tool_iter(tool, path)
def template_all_iter(directory):
pm = _sdk.PluginManager()
for plugin in pm.plugins.values():
yield from template_plugin_iter(plugin, directory)
yield from template_builtins_iter(directory)
def template_action(plugin, action, directory):
for _ in template_action_iter(plugin, action, directory):
pass
def template_plugin(plugin, directory):
for _ in template_plugin_iter(plugin, directory):
pass
def template_builtins(directory):
for _ in template_builtins_iter(directory):
pass
def template_all(directory):
for _ in template_all_iter(directory):
pass
def template_tool_conf(directory, out_path):
toolbox = _util.XMLNode('toolbox')
section = _util.XMLNode('section', id='getext', name='Get Data')
section.append(_util.XMLNode('tool', file='data_source/upload.xml'))
toolbox.append(section)
section = _util.XMLNode('section', id='qiime2_tools', name='QIIME 2 Tools')
suite_name = 'suite_qiime2_tools'
suite_dir = os.path.join(directory, suite_name)
for tool_id in _templaters.BUILTIN_MAKERS:
path = os.path.join(suite_dir, tool_id + '.xml')
section.append(_util.XMLNode('tool', file=path))
toolbox.append(section)
pm = _sdk.PluginManager()
for plugin in sorted(pm.plugins.values(), key=lambda x: x.id):
plugin_name = plugin.id.replace('_', '-')
suite_name = f'suite_qiime2_{plugin_name}'
section = _util.XMLNode('section', id=suite_name,
name=f'QIIME 2 {plugin_name}')
for action in sorted(plugin.actions.values(), key=lambda x: x.id):
filename = _templaters.make_tool_id(plugin.id, action.id) + '.xml'
path = os.path.join(directory, suite_name, filename)
section.append(_util.XMLNode('tool', file=path))
toolbox.append(section)
with open(out_path, 'wb') as fh:
_xml.indent(toolbox, ' ' * 4)
fh.write(_xml.tostring(toolbox, pretty_print=True, encoding='utf-8',
xml_declaration=True))
|
#!/usr/bin/env python3
# =============================================================================
# License: WTFPL
# =============================================================================
"""Getting to grips with the pandas DataFrame.
Run this example script using this command:
python example.py
Read more here: https://pandas.pydata.org/pandas-docs/stable/reference/frame.html
"""
from csvdm import DataManager
# =============================================================================
DATA_DIR = "data"
DATA_FILE = "testdata.csv" # Assuming 'data/testdata.csv' exist
# -----------------------------------------------------------------------------
# Example csv entry: 999999999999,A1,CC,Operator,Station 1,9999
# =============================================================================
if __name__ == '__main__':
# =========================================================================
db = DataManager(DATA_FILE, DATA_DIR, 'uid')
# =========================================================================
print("Testing the DataManager on a defects database", end='\n\n')
print(db.__class__)
print(db.__doc__)
# =========================================================================
print(f'Starting dataset: {db.row_count()} rows.', end='\n\n')
# db.info()
db.dump()
# =========================================================================
uid = 5
# db.delete_row(uid)
col_name = 'uid'
value = uid
db.delete_where(value, col_name) # It never happened
col_name = 'cause'
value = "Machine"
db.delete_where(value, col_name) # Maintenance is on it
# =========================================================================
print(f'\nAfter delete method called on uid={uid} and {col_name}={value}: {db.row_count()} rows left.', end='\n\n')
db.dump()
# =========================================================================
print(f'\nEntry #5 and all Machine defects scrubbed. All clean. :)', end='\n\n')
db.save()
# Hourly reporting
print(f"CC count\t| {db.count('CC', 'defect_type')}")
print(f"MS count\t| {db.count('MS', 'defect_type')}")
print(f"Nuclear Meltdown count\t| {db.count('Nuclear Meltdown', 'defect_type')}")
print(f"Machine count\t| {db.count('Machine', 'cause')}")
print(f"Operator count\t| {db.count('Operator', 'cause')}")
print(f"Gremlins count\t| {db.count('Gremlins', 'cause')}")
# test_defect = [
# ['99999999999', 'A1', 'CC', 'Operator', 'Station 1'],
# ['11111111111', 'A2', 'CC', 'Operator', 'The Pit']
# ]
# db.insert(test_defect)
# print(db.contains('99999999999', 'panel_id'))
|
import pytest
from pytest_bdd import given, scenario, then, when
@pytest.mark.skip(reason="Currently not implemented")
@scenario('../docker_build.feature', 'Dockerfile is not found')
def test_dockerfile_is_not_found():
pass
@given('a path that does not contain a Dockerfile')
def a_path_that_does_not_contain_a_dockerfile():
"""a path that does not contain a Dockerfile."""
@when('smart containers is asked to build it')
def smart_containers_is_asked_to_build_it():
"""smart containers is asked to build it."""
@then('it should return an error')
def it_should_return_an_error():
"""it should return an error."""
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import os
import pytest
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.sql import SparkSession
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.data.pandas import read_csv
from zoo.orca.data import SparkXShards
from zoo.orca.learn.pytorch import Estimator
from zoo.orca.learn.metrics import Accuracy
from zoo.orca.learn.trigger import EveryEpoch
from zoo.orca.learn.optimizers import SGD
from zoo.orca.learn.optimizers.schedule import Default
from zoo.orca import OrcaContext
import tempfile
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
class TestEstimatorForDataFrame(TestCase):
def setUp(self):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.sc = init_orca_context(cores=4)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
self.spark = SparkSession(self.sc)
self.spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
self.spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
def tearDown(self):
""" teardown any state that was previously setup with a setup_method
call.
"""
stop_orca_context()
def test_bigdl_pytorch_estimator_dataframe_predict(self):
def loss_func(input, target):
return nn.CrossEntropyLoss().forward(input, target.flatten().long())
class IdentityNet(nn.Module):
def __init__(self):
super().__init__()
# need this line to avoid optimizer raise empty variable list
self.fc1 = nn.Linear(5, 5)
def forward(self, input_):
return input_
model = IdentityNet()
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2,
size=()))])).toDF(["feature", "label"])
with tempfile.TemporaryDirectory() as temp_dir_name:
estimator = Estimator.from_torch(model=model, loss=loss_func,
optimizer=SGD(learningrate_schedule=Default()),
model_dir=temp_dir_name)
result = estimator.predict(df, feature_cols=["feature"])
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result.selectExpr(expr).first()["error"] == 0
def test_bigdl_pytorch_estimator_dataframe_fit_evaluate(self):
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc = nn.Linear(5, 5)
def forward(self, x):
x = self.fc(x)
return F.log_softmax(x, dim=1)
model = SimpleModel()
def loss_func(input, target):
return nn.CrossEntropyLoss().forward(input, target.flatten().long())
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2,
size=()))])).toDF(["feature", "label"])
with tempfile.TemporaryDirectory() as temp_dir_name:
estimator = Estimator.from_torch(model=model, loss=loss_func, metrics=[Accuracy()],
optimizer=SGD(learningrate_schedule=Default()),
model_dir=temp_dir_name)
estimator.fit(data=df, epochs=4, batch_size=2, validation_data=df,
checkpoint_trigger=EveryEpoch(),
feature_cols=["feature"], label_cols=["label"])
eval_result = estimator.evaluate(df, batch_size=2,
feature_cols=["feature"], label_cols=["label"])
assert isinstance(eval_result, dict)
if __name__ == "__main__":
pytest.main([__file__])
|
import cv2
from cv2 import aruco as aruco
import numpy as np
import os
def aruco_3D_to_dict(object_points):
object_ndarray = np.loadtxt(object_points, delimiter=",")
return {int(array[0]): array[1:].tolist() for array in object_ndarray}
def load_coefficients(path):
'''Loads camera matrix and distortion coefficients.'''
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)
# note we also have to specify the type to retrieve other wise we only get a
# FileNode object back instead of a matrix
camera_matrix = cv_file.getNode('K').mat()
dist_coefs = cv_file.getNode('D').mat()
rot_vecs = cv_file.getNode('R').mat()
tran_vecs = cv_file.getNode('T').mat()
cv_file.release()
return camera_matrix, dist_coefs, rot_vecs, tran_vecs
def plot_aruco_markers(img, bboxs, ids):
if (ids is not None):
for bbox, id in zip(bboxs, ids):
cv2.putText(img, str(id), (int(bbox[0][0][0]), int(bbox[0][0][1])), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 255), 1)
cv2.imshow("Image", img)
cv2.waitKey(0)
def find_aruco_markers(img, marker_size=5, total_markers=50, draw=True):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
key = getattr(aruco, f"DICT_{marker_size}X{marker_size}_{total_markers}")
aruco_dict = aruco.Dictionary_get(key)
aruco_params = aruco.DetectorParameters_create()
bboxs, ids, rejected = aruco.detectMarkers(gray_img, aruco_dict, parameters=aruco_params)
if draw and len(bboxs) != 0:
aruco.drawDetectedMarkers(img, bboxs)
return bboxs, ids
def main():
cap = cv2.VideoCapture(0)
while (True):
success, img = cap.read()
#img = cv2.rotate(img, cv2.ROTATE_180) #cv2.ROTATE_90_COUNTERCLOCKWISE
#img = cv2.flip(img, 0)
bboxs, ids = find_aruco_markers(img)
if (ids is not None):
for bbox, id in zip(bboxs, ids):
cv2.putText(img, str(id), (bbox[0][0][0], bbox[0][0][1]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 255), 1)
cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
from pygwin._pg import pg as _pg
from pygwin.surface import surface as _surface
from PIL import Image as _im
import tempfile as _tf
import randstr as _rs
import pickle as _p
import bz2 as _bz2
import os as _os
def load(path):
if path.endswith('.gif'):
im = _im.open(path)
with _tf.TemporaryDirectory() as td:
surfs = []
for i in range(im.n_frames):
im.seek(i)
p = _os.path.join(td,f'{i}.png')
im.save(p)
s = _pg.image.load(p)
_os.remove(p)
sg = _surface(s.get_size())
sg.blit(s,(0,0))
surfs.append(sg)
return surfs
else:
im = _im.open(path.encode('utf8').decode('utf8'))
image = _pg.image.fromstring(im.tobytes(),im.size,im.mode)
surf = _surface(im.size)
surf.blit(image,(0,0))
return surf
def save(surface, dest):
_pg.image.save_extended(surface._grp(), dest)
def toBytes(surface):
return _bz2.compress(_p.dumps([_pg.image.tostring(surface._grp(),"RGBA"),list(surface.size)]))
def fromBytes(bytes):
string = _p.loads(_bz2.decompress(bytes))
surf = _pg.image.fromstring(string[0],tuple(string[1]),"RGBA")
surface = _surface(tuple(string[1]))
surface.blit(surf,(0,0))
return surface
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Check density inversion QC test
"""
import numpy as np
from cotede.qc import ProfileQC
from cotede.qctests import DensityInversion, densitystep
from ..data import DummyData
from .compare import compare_feature_input_types, compare_input_types
try:
import gsw
GSW_AVAILABLE = True
except ImportError:
GSW_AVAILABLE = False
def test_densitystep():
if not GSW_AVAILABLE:
return
p = [1.0, 100, 200, 300, 500, 5000, np.nan]
t = [27.44, 14.55, 11.96, 11.02, 7.65, 2.12, 2.12]
s = [35.71, 35.50, 35.13, 35.02, 34.72, 35.03, 35.03]
output = [np.nan, 3.3484632, 0.2433187, 0.0911988, 0.317172, 0.9046589, np.nan]
drho = densitystep(s, t, p)
assert isinstance(drho, np.ndarray)
assert type(drho) == np.ndarray
assert np.allclose(drho, output, equal_nan=True)
def test_feature_input_types():
if not GSW_AVAILABLE:
return
p = [1.0, 100, 200, 300, 500, 5000, np.nan]
t = [27.44, 14.55, 11.96, 11.02, 7.65, 2.12, 2.12]
SA = [35.71, 35.50, 35.13, 35.02, 34.72, 35.03, 35.03]
compare_feature_input_types(densitystep, SA=SA, t=t, p=p)
def test_standard_dataset():
"""Test DensityInversion procedure with a standard dataset
"""
if not GSW_AVAILABLE:
return
profile = DummyData()
features = {
"densitystep": [
np.nan,
0.0091339,
0.0077907,
0.0175282,
0.1450310,
0.5896058,
0.5023247,
0.7156530,
0.2924434,
0.3559480,
0.6476343,
-0.4131068,
0.2489996,
np.nan,
np.nan,
]
}
flags = {"density_inversion": [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 0, 0]}
cfg = {"threshold": -0.03}
y = DensityInversion(profile, cfg)
for f in features:
assert np.allclose(y.features[f], features[f], equal_nan=True)
for f in flags:
assert np.allclose(y.flags[f], flags[f], equal_nan=True)
def test_densityinversion_from_profileqc():
"""Validate if ProfileQC can run DensityInversion
It requires GSW to estimate density if the density itself is not provided.
"""
cfg = {
"TEMP": {"density_inversion": {"threshold": -0.03}},
"PSAL": {"density_inversion": {"threshold": -0.03}},
}
profile = DummyData()
pqc = ProfileQC(profile, cfg=cfg)
for v in ("TEMP", "PSAL"):
assert "density_inversion" in pqc.flags[v]
if not GSW_AVAILABLE:
assert (pqc.flags[v]["density_inversion"] == 0).all()
# def test_input_types():
# cfg = {"threshold": 4}
# compare_input_types(Spike, cfg)
|
from dataiku.exporter import Exporter
import re
import json
import logging
from splunklib.binding import connect
import splunklib.client as client
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO,
format='splunk plugin %(levelname)s - %(message)s')
class SplunkIndexExporter(Exporter):
DEFAULT_SPLUNK_PORT = "8089"
ISO_8601_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%Q"
EPOCH_TIME_FORMAT = "%s.%Q"
def __init__(self, config, plugin_config):
"""
:param config: the dict of the configuration of the object
:param plugin_config: contains the plugin settings
"""
logger.info("SplunkIndexExporter:Init")
self.config = config
self.plugin_config = plugin_config
self.splunk_socket = None
self.dataset_schema = None
self.row_index = 0
try:
self.splunk_instance = config.get('splunk_login')['splunk_instance']
self.parse_url()
self.splunk_username = config.get('splunk_login')['splunk_username']
self.splunk_password = config.get('splunk_login')['splunk_password']
except Exception as err:
raise Exception("The Splunk instance URL or login details are not filled in. ({})".format(err))
self.splunk_app = config.get('splunk_app')
self.index_name = config.get('index_name').lower()
self.search_string = ""
self.splunk_sourcetype = config.get('splunk_sourcetype')
self.source_host = config.get("source_host", "dss")
self.overwrite_existing_index = config.get('overwrite_existing_index', False)
args = {
"host": self.splunk_host,
"port": self.splunk_port,
"username": self.splunk_username,
"password": self.splunk_password
}
if not self.splunk_app == "":
args["app"] = self.splunk_app
self.client = connect(**args)
logger.info("SplunkIndexExporter:Connected to Splunk")
self.authorization_token = self.client.token
def parse_url(self):
regex = '(?:http.*://)?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
groups = re.search(regex, self.splunk_instance)
self.splunk_port = groups.group('port')
if self.splunk_port == "" or self.splunk_port is None:
self.splunk_port = self.DEFAULT_SPLUNK_PORT
self.splunk_host = groups.group('host')
def open(self, schema):
self.dataset_schema = schema
service = client.connect(
host=self.splunk_host,
port=self.splunk_port,
username=self.splunk_username,
password=self.splunk_password
)
try:
if self.overwrite_existing_index:
service.indexes.delete(self.index_name)
except Exception as Err:
logger.info('deleting error={}'.format(Err))
try:
myindex = service.indexes[self.index_name]
except Exception as Err:
logging.info("Creating indexe following {}".format(Err))
myindex = service.indexes.create(self.index_name)
self.splunk_socket = myindex.attach(sourcetype=self.splunk_sourcetype, host=self.source_host)
def open_to_file(self, schema, destination_file_path):
"""
Start exporting. Only called for exporters with behavior OUTPUT_TO_FILE
:param schema: the column names and types of the data that will be streamed
in the write_row() calls
:param destination_file_path: the path where the exported data should be put
"""
raise Exception("Unimplemented")
def write_row(self, row):
self._send_row(row, self.splunk_socket)
def _send_row(self, row, splunk_socket):
event = {}
for value, schema in zip(row, self.dataset_schema["columns"]):
column_name = schema["name"]
event[column_name] = value
event.pop("_raw", None)
if self.splunk_sourcetype == "_json":
event_string = json.dumps(event) + '\r\n'
else:
event_string = self._generate_event_string(event) + '\r\n'
splunk_socket.send(event_string.encode())
def _generate_event_string(self, event):
elements = []
for element in event:
elements.append(element + "=" + str(json.dumps(event[element])))
return " ".join(elements)
def close(self):
"""
Perform any necessary cleanup
"""
self.splunk_socket.close()
|
from tkinter import *
root = Tk() #Tk() opens up a blank window, and we've set it equal to root
topFrame = Frame(root) #It's better to split your window layout into parts, which here, are called frames
topFrame.pack() #pack literally means to stuff 'things' into the window, here we stuffed in one frame
bottomFrame = Frame(root)
bottomFrame.pack()
#What's a GUI without having something to click on?
B1 = Button(topFrame, text = "button 1", fg="red")
# Button() creates, well, a button which can be given many parameters, example - the text, colour of text, foreground - background colour etc
# Here, we're creating buttons and storing them.
B2 = Button(topFrame, text = "button 2", fg="blue")
B3 = Button(topFrame, text = "button 3", fg="green")
B4 = Button(topFrame, text = "button 4", fg="purple")
B1.pack(side=LEFT) #As discussed above, we've created a button and now need to 'stuff' it in the window. Pack() is, again, the way to go
B2.pack(side=LEFT) #Pack too has it's own parameters. 'side' is pretty self explanatory. There are better ways to define locations for objects, Google is your friend.
B3.pack(side=LEFT)
B4.pack(side=BOTTOM)
#We've created some buttons, great. It'd be even better to assign some functions to them, so lets define some -
def eventB1(event):
print('You Pressed B1')
def eventB2(event):
print('You Pressed B2')
def eventB3(event):
print('You Pressed B3')
def eventB4(event):
print('You Pressed B4')
B1.bind("<Button-1>", eventB1)
# bind() can bind specified buttons to a function, which can be called upon by the binded keys specified here
# <Button-1> is your left mouse button, <Button-2> and <Button-3> are middle and right mouse buttons respectively
# Here, clicking on the button with your left mouse button will call the function, using other keys (unless specified) won't work
B2.bind("<Button-1>", eventB2)
B3.bind("<Button-1>", eventB3)
B4.bind("<Button-1>", eventB4)
root.mainloop() # Main loop keeps the program running. Go ahead and try running the code without this line.
|
from requests_html import HTMLSession
import sbol2
import rdflib
class col_methods:
"""A class used to carry out a switch case statement for different\
properties in SBOL files
"""
def __init__(self, prop_nm, prop_val, sbol_doc, role_dict, org_dict):
"""The switch statement to call different methods based on prop_nm
Args:
prop_nm (str): the name of the property
prop_val (str): the value of the property
sbol_doc (sbol document): sbol document containing the properties
being passed in
role_dict (dictionary): maps sequence ontology terms to human
readable names
org_dict (dictionary): maps ncbi txids to human readable names
"""
# global varibales for dataframe switch statements
self.prop_nm = prop_nm
self.prop_val = prop_val
self.sbol_doc = sbol_doc
self.role_dict = role_dict
self.org_dict = org_dict
function_call_dict = {'Role': 'role', 'Types': 'types',
'Sequence': 'sequence',
'Source Organism': 'organism',
'Target Organism': 'organism'}
if self.prop_nm in function_call_dict:
self.prop_nm = function_call_dict[self.prop_nm]
# if the column name matches the function name, call the function
try:
getattr(self, self.prop_nm)()
# if the column name does not match the function name, call 'no_change'
except AttributeError:
getattr(self, 'no_change')()
def no_change(self):
"""Else case for the switch statement"""
pass
def role(self):
"""Uses prop_val as the key in a dictionary to get the new value.
It is a way of converting an ontology term to a human readable one
"""
role_val = str(self.prop_val)
if role_val in self.role_dict:
self.prop_val = self.role_dict[role_val]
def types(self):
"""Split types uri to only be the last bit after the final hash
Raises:
ValueError: If self.prop_val does not contain a #
"""
self.prop_val = str(self.prop_val)
if '#' not in self.prop_val:
raise ValueError
else:
self.prop_val = self.prop_val.split('#')[-1]
def sequence(self):
"""Gets the sequence from the document based on the sequence uri
Raises:
TypeError: If the prop_val from initialisation is not a uri
or string
ValueError: If the prop_val from initialisation is not a uri
in the sbol document provided at initialisation
"""
if type(self.prop_val) not in [rdflib.term.URIRef, str]:
raise TypeError
else:
try:
temp = self.sbol_doc.getSequence(self.prop_val)
self.prop_val = temp.elements
except sbol2.sbolerror.SBOLError:
# if uri not found in document
raise ValueError
def organism(self):
""" Converts a uri containing a txid into a human readable name
either by using the ontology provided or by pulling the name from
the ncbi database. If the name is pulled from the database it is added
to the ontology for the rest of the program run (the assumption is
that a rare organism may be used multiple times)
Raises:
TypeError: if self.prop_val is not a string or uri
ValueError: if self.prop_val doesn't contain
'https://identifiers.org/taxonomy:'
TypeError: if self.org_dict is not a dictionary
"""
if type(self.prop_val) not in [rdflib.term.URIRef, str]:
raise TypeError
elif 'https://identifiers.org/taxonomy:' not in self.prop_val:
raise ValueError
if type(self.org_dict) is not dict:
raise TypeError
txid = str(self.prop_val).split(':')[-1]
if txid in self.org_dict:
self.prop_val = self.org_dict[txid]
else:
session = HTMLSession()
r = session.get(f'https://identifiers.org/taxonomy:{txid}')
v = r.html.find('strong', first=True)
self.prop_val = v.text
self.org_dict[txid] = self.prop_val
|
def merge_sort(items):
if len(items) <= 1:
return items
# split
middle_index = len(items) // 2
left_split = items[:middle_index]
right_split = items[middle_index:]
return middle_index, left_split, right_split
# re-merge
def merge(left, right):
result = []
while (left and right):
if left[0] < right[0]:
result.append(left[0])
left.pop(0)
else:
result.append(right[0])
right.pop(0)
return result
|
import datetime
import pytz
from django.contrib.auth.models import Group, Permission
from django.core.exceptions import ValidationError
from django.db.models.deletion import ProtectedError
from django.test import SimpleTestCase, TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from changelogs.models import Project, User, Version
from changelogs.validators import validate_project_url
class IndexViewTests(TestCase):
def test_successful(self):
response = self.client.get(reverse("changelogs:index"))
self.assertContains(response, "Collector for changelogs")
class AboutViewTests(TestCase):
def test_successful(self):
response = self.client.get(reverse("changelogs:about"))
self.assertContains(
response, "Changelogger is a service to store all your changelogs"
)
class ProfileViewTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob",
email="jacob@mail.com",
password="top_secret",
first_name="Jacob",
last_name="Smith",
)
def test_anonymous(self):
response = self.client.get(reverse("changelogs:profile"))
self.assertRedirects(response, "/login/?next=/profile/")
def test_successful(self):
self.client.login(username="jacob", password="top_secret")
response = self.client.get(reverse("changelogs:profile"))
self.assertContains(response, "name: Jacob Smith")
self.assertContains(response, "e-mail: jacob@mail.com")
self.assertContains(response, f"API token: {self.user.auth_token}")
class SubscriptionsViewTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_successful(self):
another_user = User.objects.create_user(
username="john", email="john@mail.com", password="my_password"
)
Project.objects.create(
title="django",
is_public=True,
url="https://github.com/django/django",
owner=self.user,
)
Project.objects.create(
title="requests", url="https://github.com/psf/requests", owner=another_user,
)
self.client.login(username="jacob", password="top_secret")
response = self.client.get(reverse("changelogs:subscriptions"))
self.assertContains(response, "Manage subscriptions")
self.assertContains(response, "django")
self.assertNotContains(response, "requests")
def test_anonymous(self):
response = self.client.get(reverse("changelogs:subscriptions"))
self.assertRedirects(response, "/login/?next=/subscriptions/")
class ApiDocumentationViewTests(TestCase):
def test_successful(self):
response = self.client.get(reverse("changelogs:api_documentation"))
self.assertContains(response, "Automate Changelogger via a simple API.")
class ProjectsViewTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_no_projects(self):
response = self.client.get(reverse("changelogs:projects"))
self.assertContains(response, "No projects are available =(")
self.assertQuerysetEqual(response.context["projects_list"], [])
def test_anonymous(self):
Project.objects.create(
title="django",
is_public=True,
url="https://github.com/django/django",
owner=self.user,
)
Project.objects.create(
title="requests",
is_public=True,
url="https://github.com/psf/requests",
owner=self.user,
)
Project.objects.create(
title="flask", url="https://github.com/pallets/flask", owner=self.user
)
response = self.client.get(reverse("changelogs:projects"))
self.assertContains(response, "django")
self.assertContains(response, "https://github.com/django/django")
self.assertContains(response, "requests")
self.assertContains(response, "https://github.com/psf/requests")
self.assertNotContains(response, "flask")
self.assertNotContains(response, "https://github.com/pallets/flask")
self.assertQuerysetEqual(
response.context["projects_list"],
[
"<Project: django (https://github.com/django/django)>",
"<Project: requests (https://github.com/psf/requests)>",
],
)
def test_authorized_user(self):
project_django = Project.objects.create(
title="django", url="https://github.com/django/django", owner=self.user
)
project_django.subscribers.add(self.user)
Project.objects.create(
title="requests", url="https://github.com/psf/requests", owner=self.user
)
Project.objects.create(
title="flask",
is_public=True,
url="https://github.com/pallets/flask",
owner=self.user,
)
self.client.login(username="jacob", password="top_secret")
response = self.client.get(reverse("changelogs:projects"))
self.assertContains(response, "django")
self.assertContains(response, "https://github.com/django/django")
self.assertNotContains(response, "flask")
self.assertNotContains(response, "https://github.com/pallets/flask")
self.assertNotContains(response, "requests")
self.assertNotContains(response, "https://github.com/psf/requests")
class VersionDetailViewTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_successful(self):
project_django = Project.objects.create(
title="django", url="https://github.com/django/django", owner=self.user
)
version_django_1 = Version.objects.create(
title="1.0.0",
date_time=datetime.datetime.now(tz=pytz.utc),
project=project_django,
body=(
"* change one",
"* change two",
"",
),
)
self.client.login(username="jacob", password="top_secret")
response = self.client.get(
reverse(
"changelogs:version_detail",
args=(project_django.id, version_django_1.id),
)
)
self.assertContains(response, "django-1.0.0")
self.assertContains(response, "change one")
self.assertContains(
response,
"""
<img
alt="image"
src="https://github.com/django/django/uploads/c76c7e2525ac077aea6334e1f87c88b1/image.png"
/>
""",
html=True,
)
def test_wrong_version(self):
response = self.client.get(reverse("changelogs:version_detail", args=(1, 1)))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_anonymous_wrong_permissions(self):
project_django = Project.objects.create(
title="django",
url="https://github.com/django/django",
owner=self.user,
is_public=False,
)
versions_django_1 = Version.objects.create(
title="1.0.0",
date_time=datetime.datetime.now(tz=pytz.utc),
project=project_django,
body="* change one* change two",
)
response = self.client.get(
reverse(
"changelogs:version_detail",
args=(project_django.id, versions_django_1.id,),
)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class ProjectDetailViewTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_logged_in_successful(self):
project_django = Project.objects.create(
title="django",
url="https://github.com/django/django",
owner=self.user,
is_public=False,
)
Version.objects.create(
title="1.0.0",
date_time=datetime.datetime.now(tz=pytz.utc),
project=project_django,
body="* change one* change two",
)
self.client.login(username="jacob", password="top_secret")
response = self.client.get(
reverse("changelogs:project_detail", args=(project_django.id,),)
)
self.assertContains(response, "django")
def test_logged_in_wrong_permissions(self):
another_user = User.objects.create_user(
username="john", email="john@mail.com", password="my_password"
)
project_django = Project.objects.create(
title="django",
url="https://github.com/django/django",
owner=another_user,
is_public=False,
)
Version.objects.create(
title="1.0.0",
date_time=datetime.datetime.now(tz=pytz.utc),
project=project_django,
body="* change one* change two",
)
self.client.login(username="jacob", password="top_secret")
response = self.client.get(
reverse("changelogs:project_detail", args=(project_django.id,),)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_anonymous_successful(self):
project_django = Project.objects.create(
title="django",
url="https://github.com/django/django",
owner=self.user,
is_public=True,
)
Version.objects.create(
title="1.0.0",
date_time=datetime.datetime.now(tz=pytz.utc),
project=project_django,
body="* change one* change two",
)
response = self.client.get(
reverse("changelogs:project_detail", args=(project_django.id,),)
)
self.assertContains(response, "django")
def test_anonymous_wrong_permissions(self):
project_django = Project.objects.create(
title="django",
url="https://github.com/django/django",
owner=self.user,
is_public=False,
)
response = self.client.get(
reverse("changelogs:project_detail", args=(project_django.id,),)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class AddVersionViewTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_get_successful(self):
self.client.login(username="jacob", password="top_secret")
project_django = Project.objects.create(
title="Django", url="https://github.com/django/django", owner=self.user
)
response = self.client.get(
reverse("changelogs:add_version", args=(project_django.id,),)
)
self.assertContains(response, "Title")
self.assertContains(response, "Body")
self.assertContains(response, "Add version to Django")
def test_get_anonymous(self):
response = self.client.get(reverse("changelogs:add_version", args=(1000,),))
self.assertRedirects(response, "/login/?next=/projects/1000/versions/add")
def test_get_wrong_project_id(self):
self.client.login(username="jacob", password="top_secret")
response = self.client.get(reverse("changelogs:add_version", args=(1000,),))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class ProjectModelTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_repository_owner_property(self):
sentry_project = Project.objects.create(
title="Sentry", url="https://github.com/getsentry/sentry", owner=self.user
)
self.assertEqual(sentry_project.repository_owner, "getsentry")
def test_repository_name_property(self):
sentry_project = Project.objects.create(
title="Sentry", url="https://github.com/getsentry/sentry", owner=self.user
)
self.assertEqual(sentry_project.repository_name, "sentry")
def test_is_github_property_true(self):
sentry_project = Project.objects.create(
title="Sentry", url="https://github.com/getsentry/sentry", owner=self.user
)
self.assertTrue(sentry_project.is_github_project)
def test_is_github_property_false(self):
gitlab_project = Project.objects.create(
title="GitLab", url="https://gitlab.com/gitlab-org/gitlab", owner=self.user
)
self.assertFalse(gitlab_project.is_github_project)
def test_str_representation(self):
sentry_project = Project.objects.create(
title="Sentry", url="https://github.com/getsentry/sentry", owner=self.user
)
self.assertEqual(
str(sentry_project), "Sentry (https://github.com/getsentry/sentry)"
)
def test_project_owner_delete(self):
Project.objects.create(
title="Sentry", url="https://github.com/getsentry/sentry", owner=self.user
)
with self.assertRaises(ProtectedError):
self.user.delete()
def test_is_subscribed_by_user(self):
user = User.objects.create_user(
username="sherlock", email="sherlock@mail.com", password="top_secret"
)
another_user = User.objects.create_user(
username="john", email="john@mail.com", password="my_password"
)
project = Project.objects.create(
title="Project", url="https://github.com/me/project", owner=user
)
project.subscribers.add(user)
self.assertTrue(project.is_subscribed_by_user(user))
self.assertFalse(project.is_subscribed_by_user(another_user))
def test_accessible_by_user_filter(self):
user = User.objects.create_user(
username="sherlock", email="sherlock@mail.com", password="top_secret"
)
another_user = User.objects.create_user(
username="john", email="john@mail.com", password="my_password"
)
project_1 = Project.objects.create(
title="Project1", url="https://github.com/me/project1", owner=user
)
project_1.team.add(user)
project_1.team.add(another_user)
project_2 = Project.objects.create(
title="Project2", url="https://github.com/me/project2", owner=another_user
)
project_3 = Project.objects.create(
title="Project3",
url="https://github.com/me/project3",
owner=user,
is_public=True,
)
project_4 = Project.objects.create(
title="Project4",
url="https://github.com/me/project4",
owner=another_user,
is_public=True,
)
projects = Project.objects.accessible_by_user(user).all()
self.assertEqual(len(projects), 3)
self.assertTrue(project_1 in projects)
self.assertTrue(project_2 not in projects)
self.assertTrue(project_3 in projects)
self.assertTrue(project_4 in projects)
class VersionModelTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_str_representation(self):
sentry_project = Project.objects.create(
title="Sentry", url="https://github.com/getsentry/sentry", owner=self.user
)
version_sentry_1 = Version.objects.create(
title="1.0.0",
date_time=datetime.datetime.now(tz=pytz.utc),
project=sentry_project,
body="* change one* change two",
)
self.assertEqual(str(version_sentry_1), "1.0.0 (Sentry)")
class RestApiTests(APITestCase):
def setUp(self):
self.group = Group.objects.create(name="Users")
self.group.permissions.add(Permission.objects.get(name="Can view project"))
self.group.permissions.add(Permission.objects.get(name="Can view version"))
self.group.permissions.add(Permission.objects.get(name="Can add version"))
self.group.permissions.add(Permission.objects.get(name="Can change version"))
self.group.permissions.add(Permission.objects.get(name="Can delete version"))
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
self.user.groups.add(self.group)
def test_create_new_version_anonymous(self):
response = self.client.post(
"/api/versions/",
{
"title": "0.1.0",
"date_time": datetime.datetime.now(tz=pytz.utc),
"body": "small fixes",
"project": 1,
},
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_new_version_wrong_permissions(self):
self.group.permissions.remove(Permission.objects.get(name="Can add version"))
self.client.force_authenticate(user=self.user, token=self.user.auth_token)
response = self.client.post(
"/api/versions/",
{
"title": "0.1.0",
"date_time": datetime.datetime.now(tz=pytz.utc),
"body": "small fixes",
"project": 1,
},
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_new_version_successful(self):
sentry_project = Project.objects.create(
title="Sentry", url="https://github.com/getsentry/sentry", owner=self.user
)
self.assertEqual(Version.objects.count(), 0)
self.client.force_authenticate(user=self.user, token=self.user.auth_token)
response = self.client.post(
"/api/versions/",
{
"title": "0.1.0",
"date_time": datetime.datetime.now(tz=pytz.utc),
"body": "small fixes",
"project": sentry_project.id,
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Version.objects.count(), 1)
version = Version.objects.first()
self.assertEqual(version.project_id, sentry_project.id)
self.assertEqual(version.body, "small fixes")
def test_view_version_successful(self):
project_django = Project.objects.create(
title="django", url="https://github.com/django/django", owner=self.user
)
project_django.subscribers.add(self.user)
version_django_1 = Version.objects.create(
title="1.0.0",
date_time=datetime.datetime(2019, 12, 4, tzinfo=pytz.utc),
project=project_django,
body="* change one* change two",
)
self.client.force_authenticate(user=self.user, token=self.user.auth_token)
response = self.client.get(f"/api/versions/{version_django_1.id}/")
self.assertEqual(
response.json(),
{
"body": "* change one* change two",
"date_time": "2019-12-04T00:00:00Z",
"id": 1,
"project": 1,
"title": "1.0.0",
},
)
def test_view_project_successful(self):
project_django = Project.objects.create(
title="django", url="https://github.com/django/django", owner=self.user
)
self.client.force_authenticate(user=self.user, token=self.user.auth_token)
response = self.client.get(f"/api/projects/{project_django.id}/")
self.assertEqual(
response.json(),
{"id": 1, "title": "django", "url": "https://github.com/django/django"},
)
class AddProjectViewTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="jacob", email="jacob@mail.com", password="top_secret"
)
def test_get_successful(self):
self.client.login(username="jacob", password="top_secret")
response = self.client.get(reverse("changelogs:add_project"))
self.assertContains(response, "Title")
self.assertContains(response, "URL")
self.assertContains(response, "Is public")
self.assertContains(response, "Add project")
def test_post_successful(self):
self.assertEqual(Project.objects.count(), 0)
self.client.login(username="jacob", password="top_secret")
response = self.client.post(
reverse("changelogs:add_project"),
{"title": "Django", "url": "https://github.com/django/django"},
)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
project = Project.objects.first()
self.assertEqual(project.title, "Django")
self.assertEqual(project.url, "https://github.com/django/django")
self.assertFalse(project.is_public)
self.assertEqual(project.owner, self.user)
def test_anonymous(self):
response = self.client.get(reverse("changelogs:add_project"))
self.assertRedirects(response, "/login/?next=/projects/add")
class ErrorTemplatesTests(SimpleTestCase):
def test_404(self):
response = self.client.get("/404/")
self.assertContains(
response, "Page not found =(", status_code=status.HTTP_404_NOT_FOUND
)
class ValidatorsTests(SimpleTestCase):
def test_validate_project_url(self):
validate_project_url("https://github.com/pallets/flask")
with self.assertRaises(ValidationError) as error_context:
validate_project_url("https://github.com/pallets/flask/")
self.assertEqual(
error_context.exception.message,
"Projects's URL has slash at the end, it's not required",
)
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
from matplotlib.dates import DateFormatter
path = "C:\\Users\\pierre\\Downloads\\files\\Download\\samsunghealth_pierre.major_202008021218\\com.samsung.shealth.sleep.202008021218.csv"
df = pd.read_csv(path, index_col=False)
print(df.columns)
# df.columns = df.columns.str.replace('com.samsung.health.heart_rate.','')
df=df.drop(columns = ["extra_data","original_efficiency","original_bed_time","datauuid","original_wake_up_time","pkg_name","deviceuuid"])
df["datetime"]=pd.to_datetime(df["start_time"]/1000, unit = 's')
df["original_starttime"]=pd.to_datetime(df["start_time"]/1000, unit = 's')
# df["datetime"]=df["datetime"].apply(lambda x: x-dt.timedelta(hours=x.hour+1) if x.hour < 2 else x)
df["end_datetime"]=pd.to_datetime(df["end_time"]/1000, unit = 's')
df["diff"]=(df["end_time"] - df["start_time"])/3600000
df_zero=df.loc[df["diff"]<1]
print(df_zero.info())
df_j=df.loc[ df["datetime"] > dt.datetime.strptime("2020-06-12 11:59:24",'%Y-%m-%d %H:%M:%S')]
df_j=df_j.loc[ df_j["datetime"] < dt.datetime.strptime("2020-06-14 12:59:24",'%Y-%m-%d %H:%M:%S')]
# print(df_j.describe())
# print(df_j.head())
df.index = df["datetime"]
# print(df.head())
dfm =df.resample('D').count()
print(dfm.describe())
fig, ax = plt.subplots()
# dfm.reset_index().plot(x='datetime', y='diff')
dfm.plot(x='datetime', y='diff')
print(dfm.info())
exit()
# print(df["diff"].describe())
# df.plot.scatter(x="datetime",y="diff")
date_form = DateFormatter("%y-%m-%d")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
plt.show()
# df["sleep_time"]=df["com.samsung.health.sleep.end_time"]-df["com.samsung.health.sleep.start_time"]
df_j=df.iloc[ df["datetime"] > dt.datetime.strptime("2020-06-12 11:59:24",'%Y-%m-%d %H:%M:%S')]
df_j=df_j.iloc[ df_j["datetime"] < dt.datetime.strptime("2020-06-14 12:59:24",'%Y-%m-%d %H:%M:%S')]
print(df_j.head())
print(df_j.info())
|
import random
from myelin.core import Policy
class RandomPolicy(Policy):
"""
A uniformly random policy.
# Arguments
action_space: a callable that returns a list of available
actions from a given state.
"""
def __init__(self, action_space):
self.action_space = action_space
def get_action(self, state):
actions = self.action_space(state)
if not actions:
raise ValueError('Must have at least one available action.')
return random.choice(actions)
def get_action_prob(self, state, action):
return 1.0 / len(self.action_space(state))
def get_action_probs(self, state):
return 1.0 / len(self.action_space(state))
class FixedPolicy(Policy):
def __init__(self, action_space):
self.action_space = action_space
self.vf = {}
def get_action(self, state):
actions = self.action_space(state)
if not actions:
raise ValueError('Must have at least one available action.')
if state in self.vf:
return self.vf[state]
return random.choice(actions)
def get_action_prob(self, state, action):
return 1.0 / len(self.action_space(state))
|
'''
Created on Jun 4, 2016
@author: Debanjan.Mahata
'''
import requests
import json
import sys
import config
import yagmail
import csv
from time import sleep
from config import DEVICE_PLANT_MAPPING
from config import PLANT_DEVICE_MAPPING
from config import DEVICE_STALE_DATA_MINS
from config import CAM_STALE_DATA_MINS
from config import max_ph, min_ph
from config import max_ec, min_ec
from config import max_tfw, min_tfw
from config import max_tbw, min_tbw
from config import proxy
from config import DEVICE_CONN_NO_TRIES
from config import DEVICE_CONN_WAIT_TIME
from config import GROWTH_URL
from config import proxies
from dateutil import parser
from datetime import datetime
from utility import time_diff
from utility import to_datetime
from utility import variance
from datetime import timedelta
from config import report_emails
from config import admin_email, admin_passwd
class Plant:
"""Class representing individual plants monitored by the system"""
def __init__(self, plant_id):
#id allocated to the plant
self.plant_id = plant_id
#id of the photon device monitoring the plant
self.plant_device_id = None
#name of the photon device monitoring the plant
self.plant_device_name = None
#pH value of the solution containing the plant
self.ph = None
#Electrical Conductivity value of the solt containing the plant
self.ec = None
#Time between watering set for the plant
self.tbw = None
#Time for watering set for the plant
self.tfw = None
#Current height of the plant as captured by the camera
self.current_height = None
#Current growth of the plant as captured by the camera
self.current_growth = None
#Growth of the plant as captured on the previous day
self.last_day_growth = None
#Height of the plant as captured on the previous day
self.last_day_height = None
#Hourly growth difference
self.hourly_growth_diff = None
#Hourly height difference
self.hourly_height_diff = None
#Daily growth difference
self.daily_growth_diff = None
#Daily height difference
self.daily_height_diff = None
#Time when the factors of the system were manipulated
self.last_time_manipulated = None
#Time when the reading of the measurements were taken
self.read_time = None
#Time when the plant was last heard
self.last_time_heard = None
#Time when the camera shot was taken for measuring height and growth
self.cam_shot_time = None
#Indicates whether the reading currently recorded is stale due to
#loss in connection
self.stale_read_flag = False
#Indicates whether the reading currently recorded is stale due to loss
#in connection with the camera
self.stale_cam_flag = False
#Flag indicating whether the pH value recorded is abnormal
self.abnormal_ph_val_flag = False
#Flag indicating whether the EC value recorded is abnormal
self.abnormal_ec_val_flag = False
#Flag indicating whether the time between watering is abnormal
self.abnormal_tbw_flag = False
#Flag indicating whether the time for watering is abnormal
self.abnormal_tfw_flag = False
def get_plant_id(self):
"""Gets the plant id"""
return self.plant_id
def get_plant_device_id(self):
"""Gets the id of the photon device to which the plant is connected"""
return self.plant_device.get_device_id()
def get_plant_device_name(self):
"""Gets the name of the device to which the plant is connected"""
return self.plant_device.get_device_name()
def get_ph(self):
"""Gets the pH value of the solution for the plant"""
return self.ph
def set_ph(self, ph):
"""Sets the pH value of the solution for the plant"""
self.ph = ph
def get_ec(self):
"""Gets the EC value of the solution for the plant"""
return self.ec
def set_ec(self, ec):
"""Sets the EC value of the solution for the plant"""
self.ec = ec
def get_tbw(self):
"""Gets the time between water value for the plant"""
return self.tbw
def set_tbw(self, tbw):
"""Sets the time between water value for the plant"""
self.tbw = tbw
def get_tfw(self):
"""Gets the time for water value for the plant"""
return self.tfw
def set_tfw(self, tfw):
"""Sets the time for water value for the plant"""
self.tfw = tfw
def get_current_height(self):
"""Gets the current height for the plant"""
return self.current_height
def set_current_height(self, curr_height):
"""Sets the current height for the plant"""
self.current_height = curr_height
def get_current_growth(self):
"""Gets the current growth for the plant"""
return self.current_growth
def set_current_growth(self, curr_growth):
"""Sets the current growth for the plant"""
self.current_growth = curr_growth
def get_last_day_growth(self):
"""Gets the Growth of the plant as captured on the previous day """
return self.last_day_growth
def set_last_day_growth(self, last_day_growth):
"""Sets the Growth of the plant as captured on the previous day """
self.last_day_growth = last_day_growth
def get_last_day_height(self):
"""Gets the Height of the plant as captured on the previous day"""
return self.last_day_height
def set_last_day_height(self, last_day_height):
"""Sets the Height of the plant as captured on the previous day"""
self.last_day_height = last_day_height
def get_daily_growth_diff(self):
"""Gets the Daily growth difference"""
return self.daily_growth_diff
def set_daily_growth_diff(self, daily_growth_diff):
"""Sets the Daily growth difference"""
self.daily_growth_diff = daily_growth_diff
def get_daily_height_diff(self):
"""Gets the Daily height difference"""
return self.daily_height_diff
def set_daily_height_diff(self, daily_height_diff):
"""Sets the Daily height difference"""
self.daily_height_diff = daily_height_diff
def get_hourly_height_diff(self):
"""Gets the Hourly height difference"""
return self.hourly_height_diff
def set_hourly_height_diff(self, diff):
"""Sets the Hourly height difference"""
self.hourly_height_diff = diff
def get_hourly_growth_diff(self):
"""Gets the Hourly growth difference"""
return self.hourly_growth_diff
def set_hourly_growth_diff(self, diff):
"""Sets the Hourly growth difference"""
self.hourly_growth_diff = diff
def get_last_time_manipulated(self):
"""Gets the Time when the factors of the system were manipulated"""
return self.last_time_manipulated
def set_last_time_manipulated(self, date_time):
"""Sets the Time when the factors of the system were manipulated"""
self.last_time_manipulated = date_time
def get_read_time(self):
"""Gets the Time when the reading of the measurements were taken"""
return self.read_time
def set_read_time(self, last_time_read):
"""Sets the Time when the reading of the measurements were taken"""
self.read_time = last_time_read
def get_cam_shot_time(self):
"""Gets the Time when the camera shot was taken for measuring height
and growth"""
return self.cam_shot_time
def set_cam_shot_time(self, date_time):
"""Sets the Time when the camera shot was taken for measuring height
and growth"""
self.cam_shot_time = date_time
def get_last_time_heard(self):
"""Gets the Time when the plant was last heard"""
return self.last_time_heard
def set_last_time_heard(self, last_time_heard):
"""Sets the Time when the plant was last heard"""
self.last_time_heard = last_time_heard
def get_stale_read_flag(self):
"""Gets the flag indicating whether the reading currently recorded is
stale due to loss in connection"""
return self.stale_read_flag
def set_stale_read_flag(self, flag):
"""Sets the flag indicating whether the reading currently recorded is
stale due to loss in connection"""
self.stale_read_flag = flag
def get_stale_cam_flag(self):
"""Gets the flag indicating whether the reading currently recorded
is stale due to loss in connectivity with the camera"""
return self.stale_cam_flag
def set_stale_cam_flag(self, flag):
"""Sets the flag indicating whether the reading currently recorded
is stale due to loss in connectivity with the camera"""
self.stale_cam_flag = flag
def get_abnormal_ph_val_flag(self):
"""Gets the Flag indicating whether the pH value is abnormal"""
return self.abnormal_ph_val_flag
def set_abnormal_ph_val_flag(self, flag):
"""Sets the Flag indicating whether the pH value is abnormal"""
self.abnormal_ph_val_flag = flag
def get_abnormal_ec_val_flag(self):
"""Gets the Flag indicating whether the EC value is abnormal"""
return self.abnormal_ec_val_flag
def set_abnormal_ec_val_flag(self, flag):
"""Sets the Flag indicating whether the EC value is abnormal"""
self.abnormal_ec_val_flag = flag
def get_abnormal_tbw_flag(self):
"""Gets the Flag indicating whether the time between watering is abnormal"""
return self.abnormal_tbw_flag
def set_abnormal_tbw_flag(self, flag):
"""Sets the Flag indicating whether the time between watering is abnormal"""
self.abnormal_tbw_flag = flag
def set_abnormal_tfw_flag(self, flag):
"""Sets the Flag indicating whether the time for watering is abnormal"""
self.abnormal_tfw_flag = flag
def get_abnormal_tfw_flag(self):
"""Gets the Flag indicating whether the time for watering is abnormal"""
return self.abnormal_tfw_flag
class MonitorPlantData:
"""Class representing and performing the regular monitoring of the plants
in the system"""
def __init__(self, devices, plants):
#list of photon device objects connected to the system
self.devices = devices
#List of plant objects in the system
self.plants = plants
#device plant mapping from the configuration file
self.DEVICE_PLANT_MAPPING = DEVICE_PLANT_MAPPING
#plant device mapping from the configuration file
self.PLANT_DEVICE_MAPPING = PLANT_DEVICE_MAPPING
#list of photon devices connected to the cloud
self.connected_devices = []
#list of photon device disconnected from the cloud
self.disconnected_devices = []
#list of plants connected to the cloud
self.connected_plants = []
#list of plants disconnected to the cloud
self.disconnected_plants = []
def plant_connectivity_check(self):
"""Performs the connectivity check of the photon devices and the plants
connected to them at a given instance. Groups the connected and
disconnected plants and devices into separate lists"""
for device in self.DEVICE_PLANT_MAPPING:
if self.check_device_connection(device):
self.connected_devices.append(device)
for plant in self.DEVICE_PLANT_MAPPING[device]:
self.connected_plants.append(plant)
else:
self.disconnected_devices.append(device)
for plant in self.DEVICE_PLANT_MAPPING[device]:
self.disconnected_plants.append(plant)
def hourly_monitor(self):
"""Performs the hourly monitoring and readings of the plants"""
#performs connectivity check
self.plant_connectivity_check()
#monitors and reads the plant data connected at that instant
for plant in self.connected_plants:
self.read_plant_data(plant)
#Returns back to the disconnected plants in order to check for the
#current connectivity. Tries for a set number of attempts and then
#breaks, notifying the failure to connect and asking for checking the
#connectivity.
for plant in self.disconnected_plants:
status = self.check_device_connection(self.PLANT_DEVICE_MAPPING[plant]["device_name"])
no_tries = 0
while status == False:
sleep(DEVICE_CONN_WAIT_TIME)
status = self.check_device_connection(self.PLANT_DEVICE_MAPPING[plant]["device_name"])
no_tries += 1
if no_tries > DEVICE_CONN_NO_TRIES:
print "The readings for "+plant+" could not be recorded. Please check the connectivity"
break
if status == True:
self.disconnected_plants.remove(plant)
self.read_plant_data(plant)
def status_check(self):
"""A utility method for checking and printing the status of the
photon devices and plants in the system connected to the cloud.
Used for report generation"""
print("Report Generated at: ", datetime.now())
print("\n")
print("Connected Devices:", self.connected_devices)
print("Disconnected Devices:", self.disconnected_devices)
print("\n")
print("Connected Plants:", self.connected_plants)
print("Disconnected Plants:", self.disconnected_plants)
print("\n\n")
print("Latest values for connected plants:")
print("\n")
connected_plants = self.get_connected_plants()
for plant in connected_plants:
print("Plant Id: ", plant.get_plant_id())
print("pH value: ", plant.get_ph())
print("EC value: ", plant.get_ec())
print("Time for watering: ", plant.get_tfw())
print("Time between watering: ", plant.get_tbw())
print("Growth: ", plant.get_current_growth())
print("Height: ", plant.get_current_height())
print("Latest Cam Shot Time (EST): ", plant.get_cam_shot_time())
print("Last time the plant was heard: ", plant.get_last_time_heard())
print("Is the latest data recorded from device stale? ", plant.get_stale_read_flag())
print("Is the latest cam shot data recorded stale? ", plant.get_stale_cam_flag())
print("Is the latest ph Value recorded abnormal? ", plant.get_abnormal_ph_val_flag())
print("Is the latest EC value recorded abnormal? ", plant.get_abnormal_ec_val_flag())
print("Is the latest time between watering recorded abnormal? ", plant.get_abnormal_tbw_flag())
print("Is the latest time for watering recorded abnormal? ", plant.get_abnormal_tfw_flag())
print("\n------------------------\n")
print("Latest values for disconnected plants:")
disconnected_plants = self.get_disconnected_plants()
for plant in disconnected_plants:
print("Plant Id: ", plant.get_plant_id())
print("pH value: ", plant.get_ph())
print("EC value: ", plant.get_ec())
print("Time for watering: ", plant.get_tfw())
print("Time between watering: ", plant.get_tbw())
print("Growth: ", plant.get_current_growth())
print("Height: ", plant.get_current_height())
print("Latest Cam Shot Time (EST): ", plant.get_cam_shot_time())
print("Last time the plant was heard: ", plant.get_last_time_heard())
print("Is the latest data recorded from device stale? ", plant.get_stale_read_flag())
print("Is the latest cam shot data recorded stale? ", plant.get_stale_cam_flag())
print("Is the latest ph Value recorded abnormal? ", plant.get_abnormal_ph_val_flag())
print("Is the latest EC value recorded abnormal? ", plant.get_abnormal_ec_val_flag())
print("Is the latest time between watering recorded abnormal? ", plant.get_abnormal_tbw_flag())
print("Is the latest time for watering recorded abnormal? ", plant.get_abnormal_tfw_flag())
print("\n------------------------\n")
print("\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
def create_summary_report(self, coll):
"""Method for generating a summarized report of the plants from the
mongodb collections used for storing the plant data."""
print("\nSummary of pH, EC and Growth values for the past 24 hours.\n")
plant_keys = PLANT_DEVICE_MAPPING.keys()
plant_dict = {}
times = []
for plant_id in plant_keys:
print("Plant Id:", plant_id)
current_time = datetime.now()
past24hr = current_time - timedelta(hours=24)
last_observations = coll.find({"plant_id" : plant_id,
"read_time" : {"$gte" : past24hr ,
"$lt" : current_time}})
ph_values = []
ec_values = []
growth_values = []
height_values = []
for entries in last_observations:
#print("Ph Value of the plant", entries["phVal"])
times.append(entries["read_time"])
try:
ph_values.append((float(str(entries["phVal"])), entries["read_time"]))
growth_values.append((float(str(entries["growth"])), entries["read_time"]))
height_values.append((float(str(entries["height"])), entries["read_time"]))
ec_val = float(str(entries["nutrient_conductivity"]))
ec_values.append((ec_val, entries["read_time"]))
except ValueError:
pass
plant_dict[plant_id] = {"phVals": ph_values, "ecVals": ec_values,
"growthVals" : growth_values,
"heightVals": height_values}
#print(plant_dict)
self.get_plant_summary(plant_dict, plant_id, times, coll)
def get_plant_summary(self, plant_dict, plant_id, times, data_collection):
"""Method for getting the plant summary information from the plant
data stored in the mongodb collections"""
ph_vals = [entries[0] for entries in plant_dict[plant_id]["phVals"]]
if ph_vals == []:
print("No pH values were recorded in the given time period")
else:
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for ph between "+str(min_time)+" and "+str(max_time))
ph_variance = variance(ph_vals)
min_ph = min(ph_vals)
max_ph = max(ph_vals)
erratic_ph_vals = [entries[0] for entries in plant_dict[plant_id]["phVals"]
if entries[0] > config.max_ph or entries[0] < config.min_ph]
print("The minimum ph value recorded: "+ str(min_ph))
print("The maximum ph value recorded: "+ str(max_ph))
print("Variance in ph values recorded: "+ str(ph_variance))
print("Unique ph values: "+ str(list(set(ph_vals))))
print("Erratic ph values: "+ str(erratic_ph_vals))
print("\nph values and their respective timings:\n")
for entries in plant_dict[plant_id]["phVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
ec_vals = [entries[0] for entries in plant_dict[plant_id]["ecVals"]]
if ec_vals == []:
print("No EC values were recorded in the given period of time")
else:
ec_variance = variance(ec_vals)
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for ec between "+str(min_time)+" and "+str(max_time))
min_ec = min(ec_vals)
max_ec = max(ec_vals)
erratic_ec_vals = [entries[0] for entries in plant_dict[plant_id]["ecVals"]
if entries[0] > config.max_ec or entries[0] < config.min_ec]
print("The minimum ec value recorded: "+ str(min_ec))
print("The maximum ec value recorded: "+ str(max_ec))
print("Variance in ec values recorded: "+ str(ec_variance))
print("Unique ec values: "+ str(list(set(ec_vals))))
print("Erratic ec values: "+ str(erratic_ec_vals))
print("\nec values and their respective timings:\n")
for entries in plant_dict[plant_id]["ecVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
growth_vals = [entries[0] for entries in plant_dict[plant_id]["growthVals"]]
if growth_vals == []:
print("No growth values were recorded in the given period of time")
else:
growth_variance = variance(growth_vals)
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for growth between "+str(min_time)+" and "+str(max_time))
min_growth = min(growth_vals)
max_growth = max(growth_vals)
print("The minimum growth value recorded: "+ str(min_growth))
print("The maximum growth value recorded: "+ str(max_growth))
print("Variance in growth values recorded: "+ str(growth_variance))
print("Unique growth values: "+ str(list(set(growth_vals))))
print("\ngrowth values and their respective timings:\n")
for entries in plant_dict[plant_id]["growthVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
height_vals = [entries[0] for entries in plant_dict[plant_id]["heightVals"]]
if height_vals == []:
print("No height values were recorded in the given period of time")
else:
height_variance = variance(height_vals)
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for height between "+str(min_time)+" and "+str(max_time))
min_height = min(height_vals)
max_height = max(height_vals)
print("The minimum height value recorded: "+ str(min_height))
print("The maximum height value recorded: "+ str(max_height))
print("Variance in height values recorded: "+ str(height_variance))
print("Unique height values: "+ str(list(set(height_vals))))
print("\nheight values and their respective timings:\n")
for entries in plant_dict[plant_id]["heightVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
print("\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
def read_plant_data(self, plant):
"""Method used for reading all the data points related to a plant
connected to the system"""
plant_obj = self.plants[plant]
plant_device_name = self.PLANT_DEVICE_MAPPING[plant]["device_name"]
plant_device = self.devices[plant_device_name]
#get the time when the plant was last heard
plant_last_heard = to_datetime(self.devices[plant_device_name].last_heard)
plant_obj.set_last_time_heard(plant_last_heard)
#get the current read time
curr_read_time = datetime.now()
plant_obj.set_read_time(curr_read_time)
#get the stale device flag
STALE_PLANT_DATA_FLAG = self.is_data_read_stale(curr_read_time,
plant_last_heard)
plant_obj.set_stale_read_flag(STALE_PLANT_DATA_FLAG)
#get camera data for the plant
camera_data_obj = PlantGrowth(plant)
if proxy == False:
pass
else:
camera_data_obj.set_proxy()
plant_growth_tuple = camera_data_obj.get_growth_data()
#set the camera data for the plant instance
if plant_growth_tuple == None:
growth = None
STALE_CAM_DATA = True
shot_time = None
height = None
last_plant_height = plant_obj.get_current_height()
plant_obj.set_current_height(last_plant_height)
plant_obj.set_hourly_height_diff(0.0)
last_plant_growth = plant_obj.get_current_growth()
plant_obj.set_current_growth(last_plant_growth)
plant_obj.set_hourly_growth_diff(0.0)
plant_obj.set_stale_cam_flag(STALE_CAM_DATA)
plant_obj.set_cam_shot_time(plant_obj.get_cam_shot_time())
else:
growth = plant_growth_tuple[0]
STALE_CAM_DATA = plant_growth_tuple[1]
shot_time = plant_growth_tuple[2]
height = plant_growth_tuple[3]
last_plant_height = plant_obj.get_current_height()
hourly_height_diff = height - last_plant_height
plant_obj.set_current_height(height)
plant_obj.set_hourly_height_diff(hourly_height_diff)
last_plant_growth = plant_obj.get_current_growth()
hourly_growth_diff = growth - last_plant_growth
plant_obj.set_current_growth(growth)
plant_obj.set_hourly_growth_diff(hourly_growth_diff)
plant_obj.set_stale_cam_flag(STALE_CAM_DATA)
plant_obj.set_cam_shot_time(shot_time)
#get the ph readings of the plant
plant_ph_obj = PlantPh(plant, plant_device)
ph_reading = plant_ph_obj.get_ph_reading()
#set the current ph readings of the plant
if ph_reading == None:
plant_obj.set_ph(plant_obj.get_ph())
plant_obj.set_abnormal_ph_val_flag(True)
else:
plant_obj.set_ph(ph_reading[0])
plant_obj.set_abnormal_ph_val_flag(ph_reading[1])
#get the ec readings of the plant
plant_ec_obj = PlantEc(plant, plant_device)
ec_reading = plant_ec_obj.get_ec_reading()
#set the current ph readings of the plant
if ec_reading == None:
plant_obj.set_ec(plant_obj.get_ec())
plant_obj.set_abnormal_ec_val_flag(True)
else:
plant_obj.set_ec(ec_reading[0])
plant_obj.set_abnormal_ec_val_flag(ec_reading[1])
#get the watering readings of the plant
plant_watering_obj = PlantWatering(plant, plant_device)
watering_reading = plant_watering_obj.get_watering_reading()
#set the current ph readings of the plant
if watering_reading == None:
plant_obj.set_tbw(plant_obj.get_tbw())
plant_obj.set_tfw(plant_obj.get_tfw())
plant_obj.set_abnormal_tbw_flag(True)
plant_obj.set_abnormal_tfw_flag(True)
else:
plant_obj.set_tbw(watering_reading[0])
plant_obj.set_tfw(watering_reading[2])
plant_obj.set_abnormal_tbw_flag(watering_reading[1])
plant_obj.set_abnormal_tfw_flag(watering_reading[3])
def check_device_connection(self, device_name):
"""Checks the current connectivity of the given photon device identified
by its name"""
return self.devices[device_name].connected
def get_plants(self):
"""Gets the list of plant objects"""
return self.plants.values()
def get_connected_devices(self):
"""Gets the list of connected photon devices"""
return self.connected_devices
def get_disconnected_devices(self):
"""Gets the list of disconnected photon devices"""
return self.disconnected_devices
def get_connected_plants(self):
"""Gets the list of connected plants"""
return [self.plants[entries] for entries in self.connected_plants]
def get_disconnected_plants(self):
"""Gets the list of disconnected plants"""
return [self.plants[entries] for entries in self.disconnected_plants]
def is_data_read_stale(self, curr_time, last_heard):
"""Method for calculating if the time at which the plant data is read
is too old. This may be due to previously stored values in the photon
device which got disconnected from the cloud and did not update the
recent data"""
time_diff = curr_time - last_heard
time_diff_mins = float(time_diff.seconds) / 60.0
STALE_DATA = False
if time_diff_mins >= DEVICE_STALE_DATA_MINS:
STALE_DATA = True
return STALE_DATA
class PlantGrowth:
"""Class representing the growth data captured by the camera"""
def __init__(self, plant_id):
#plant id for which the growth data needs to be captured
self.plant_id = plant_id
#URL for the camera server
self.GROWTH_URL = GROWTH_URL
#set the proxy flag
self.proxy = False
def set_proxy(self):
"""Sets the proxy flag"""
self.proxy = True
def get_growth_data(self):
"""gets the current growth value of the given plant id"""
payload = {}
payload["plant"] = self.plant_id
base_url = self.GROWTH_URL
if self.proxy:
response = requests.get(base_url, params=payload, proxies=proxies)
else:
response = requests.get(base_url, params=payload)
try:
json_resp = json.loads(response.text)
STALE_CAM_DATA = False
growth = json_resp[0]["camera_output"]
height = json_resp[0]["height"]
shot_time = parser.parse(json_resp[0]["shot_time"]).replace(tzinfo=None)
time_diff = time_diff(shot_time, datetime.now())
time_diff_mins = time_diff[0]
if time_diff_mins >= CAM_STALE_DATA_MINS:
STALE_CAM_DATA = True
return (growth, STALE_CAM_DATA, shot_time, height)
else:
return (growth, STALE_CAM_DATA, shot_time, height)
except:
return None
class PlantPh:
"""Class representing the pH readings for a plant"""
def __init__(self, plant_id, device):
#Id of the plant for which the pH reading is being recorded
self.plant_id = plant_id
#Device from which the reading is recorded
self.device = device
#Flag indicating whether the pH reading is abnormal
self.abnormal_ph_flag = False
def get_ph_reading(self):
"""Method for reading the pH value of the solution for a particular
plant"""
if self.plant_id[1] == "0":
try:
ph_plant0 = float(self.device.P0_phVal)
if ph_plant0 > max_ph or ph_plant0 < min_ph:
self.abnormal_ph_flag = True
return (ph_plant0, self.abnormal_ph_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
if self.plant_id[1] == "1":
try:
ph_plant1 = float(self.device.P1_phVal)
if ph_plant1 > max_ph or ph_plant1 < min_ph:
self.abnormal_ph_flag = True
return (ph_plant1, self.abnormal_ph_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
class PlantWatering:
"""Class representing the watering frequency readings for a plant"""
def __init__(self, plant_id, device):
#Id of the plant for which the watering frequency reading is being recorded
self.plant_id = plant_id
#Device from which the reading is recorded
self.device = device
#Flag indicating whether the time for watering reading is abnormal
self.abnormal_tfw_flag = False
#Flag indicating whether the time between watering reading is abnormal
self.abnormal_tbw_flag = False
def get_watering_reading(self):
"""Method for reading the watering frequency values for a particular plant"""
if self.plant_id[1] == "0":
try:
tbw_plant0 = int(self.device.P0_TBW)
tfw_plant0 = int(self.device.P0_TFW)
if tbw_plant0 > max_tbw or tbw_plant0 < min_tbw:
self.abnormal_tbw_flag = True
if tfw_plant0 > max_tfw or tfw_plant0 < min_tfw:
self.abnormal_tfw_flag = True
return (tbw_plant0, self.abnormal_tbw_flag, tfw_plant0, self.abnormal_tfw_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
if self.plant_id[1] == "1":
try:
tbw_plant1 = int(self.device.P1_TBW)
tfw_plant1 = int(self.device.P1_TFW)
if tbw_plant1 > max_tbw or tbw_plant1 < min_tbw:
self.abnormal_tbw_flag = True
if tfw_plant1 > max_tfw or tfw_plant1 < min_tfw:
self.abnormal_tfw_flag = True
return (tbw_plant1, self.abnormal_tbw_flag, tfw_plant1, self.abnormal_tfw_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
class PlantEc:
"""Class representing the Electrical Conductivity readings for a plant"""
def __init__(self, plant_id, device):
#Id of the plant for which the Electrical Conductivity reading is being recorded
self.plant_id = plant_id
#Device from which the reading is recorded
self.device = device
#Flag indicating whether the EC reading is abnormal
self.abnormal_ec_flag = False
def get_ec_reading(self):
"""Method for reading the Electrical Conductivity values for a particular plant"""
if self.plant_id[1] == "0":
try:
ec_plant0 = float(self.device.P0_nutrientC)
if ec_plant0 > max_ec or ec_plant0 < min_ec:
self.abnormal_ec_flag = True
return (ec_plant0, self.abnormal_ec_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
if self.plant_id[1] == "1":
try:
ec_plant1 = float(self.device.P1_nutrientC)
if ec_plant1 > max_ec or ec_plant1 < min_ec:
self.abnormal_ec_flag = True
return (ec_plant1, self.abnormal_ec_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
class Report:
"""Class representing the daily reports generated"""
def __init__(self, plant_monitor_obj, data_collection, emails):
#Instance of the plant monitor object containing the recent plant data
self.plant_monitor_obj = plant_monitor_obj
#mongodb collection storing the daily plant data
self.data_collection = data_collection
#list of email ids to which the automated reports needs to be sent
#the list is obtained from the configuration files
self.emails = emails
#file for stroing the report for the day
self.report_file = open("report.txt", "w")
#csv file containing the daily readings
self.csv_file = open("daily_plant_readings.csv", "w")
def generate_report(self):
"""Method for generating the daily report"""
sys.stdout = self.report_file
#self.plant_monitor_obj.status_check()
self.plant_monitor_obj.create_summary_report(self.data_collection)
def generate_csv(self):
"""Method for generating csv files for last 24 hours readings"""
current_time = datetime.now()
past24hr = current_time - timedelta(hours=24)
last_observations = self.data_collection.find({"read_time" :
{"$gte" : past24hr ,
"$lt" : current_time}})
plant_data_list = []
for entries in last_observations:
plant_data_list.append(entries)
keys = plant_data_list[0].keys()
with self.csv_file as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(plant_data_list)
def send_emails(self):
yagmail.register(admin_email, admin_passwd)
yag = yagmail.SMTP(admin_email)
to = self.emails
subject = '24 hour report for hydroponic environment'
body = 'Please find the past 24 hour report generarted from the automated \
hydroponic environment along with the data readings recorded in a csv file. \
Report File -> report.txt \
Data Readings -> daily_plant_readings.csv \
Please note this is an automatically \
generated email. For more information about the project and its \
source code please check the GitHub repository: https://github.com/infyponics/infyponics'
report_doc = 'report.txt'
csv_readings = 'daily_plant_readings.csv'
yag.send(to = to, subject = subject, contents = [body, report_doc,
csv_readings])
|
#!/usr/bin/python3
import camoco.RefGenDist as RefGenDist
from collections import defaultdict
import matplotlib.pylab as plt
from .Camoco import Camoco
from .Locus import Gene, Locus
from .Chrom import Chrom
from .Genome import Genome
from .Tools import memoize, rawFile
from .Exceptions import CamocoZeroWindowError
import itertools
import collections
import random
import pandas as pd
import numpy as np
import scipy as sp
import math
import gzip
import re
from functools import lru_cache
class RefGen(Camoco):
def __init__(self, name):
# initialize camoco instance
super().__init__(name, type="RefGen")
self._create_tables()
self._build_indices()
@property
def genome(self):
"""
Returns a list of chromosome object present in RefGen.
"""
return Genome(
self.type + self.name,
chroms=[
Chrom(*x)
for x in self.db.cursor().execute(
"""
SELECT id,length FROM chromosomes
"""
)
],
)
def Gene(self, chrom, start, end, name, window=0, sub_loci=None, **kwargs):
"""
Returns a gene object including kwargs
"""
attrs = dict(
self.db.cursor()
.execute(
"""
SELECT key,val FROM gene_attrs WHERE id = ?
""",
(name,),
)
.fetchall()
)
return Gene(chrom, start, end, name, window, sub_loci, **kwargs).update(attrs)
@memoize
def num_genes(self):
"""
Returns the number of genes in the dataset
"""
return self.db.cursor().execute(""" SELECT COUNT(*) FROM genes""").fetchone()[0]
def random_gene(self, **kwargs):
"""
Returns a random gene within the reference genome.
Also allows passing of keyword arguments to Locus
constructor method allowing for flexible generation.
See Locus.__init__ for more details.
Parameters
----------
**kwargs : key,value pairs
Extra parameters passed onto the locus init method.
Returns
-------
A Gene object (camoco.Locus based)
"""
return self.Gene(
*self.db.cursor()
.execute(
"""
SELECT chromosome,start,end,id from genes WHERE rowid = ?
""",
(random.randint(1, self.num_genes()),),
)
.fetchone(),
**kwargs
)
def intersection(self, gene_list):
"""
Return the subset of genes that are in the refgen.
Parameters
----------
gene_list : list-like of co.Locus
a list of Gene (Locus) objects
Returns
-------
a list like object containing genes that
are in the refgen.
"""
return [x for x in gene_list if x in self]
def random_genes(self, n, **kwargs):
"""
Return random genes from the RefGen, without replacement.
Parameters
----------
n : int
**kwargs : key,value pairs
Extra parameters passed onto the locus init method
Returns
-------
An iterable containing n (unique) random genes
"""
rand_nums = np.random.choice(self.num_genes() + 1, n, replace=False)
gene_info = self.db.cursor().executemany(
"SELECT chromosome,start,end,id from genes WHERE rowid = ?",
[[int(rownum)] for rownum in rand_nums],
)
return set(
[
Gene(chr, start, end=end, id=id, **kwargs)
for (chr, start, end, id) in gene_info
]
)
def iter_chromosomes(self):
""" returns chrom object iterator """
return (
Chrom(*x)
for x in self.db.cursor().execute(
"""
SELECT id,length FROM chromosomes
"""
)
)
def iter_genes(self):
"""
Iterates over genes in RefGen.
Returns
-------
A generator containing genes
"""
for x in self.db.cursor().execute(
"""
SELECT chromosome,start,end,id FROM genes
"""
):
yield self.Gene(*x)
@lru_cache(maxsize=131072)
def from_id(self, gene_id, **kwargs):
"""
Returns a gene object from a string
Parameters
----------
gene_id : str
ID of the gene you want to pull out
Returns
-------
A single gene Object
"""
cur = self.db.cursor()
if gene_id not in self:
result = cur.execute(
"SELECT id FROM aliases WHERE alias = ?", [gene_id]
).fetchone()
if not result:
raise ValueError("{} not in {}".format(gene_id, self.name))
gene_id = result[0]
gene_id = gene_id.upper()
info = cur.execute(
"SELECT chromosome,start,end,id FROM genes WHERE id = ?", [gene_id]
).fetchone()
return self.Gene(*info, **kwargs)
def from_ids(self, gene_ids, check_shape=False, **kwargs):
"""
Returns a list of gene object from an iterable of id strings
OR from a single gene id string.
Parameters
----------
gene_list : str OR iterable of str
ID(s) of the genes you want to pull out
check_shape : bool (default: False)
Check if you get back the same number of ids you
pass in. If false (default), just give back what
you find, ignoring erronous ids.
**kwargs : keyword arguments
Pass additional keyword arguments which will be
assigned to genes. (e.g. window=1)
Returns
-------
A list of locus objects if you pass in an iterable,
otherwise a single gene
"""
if isinstance(gene_ids, str): # pragma: no cover
import warnings
warnings.warn(
"Passing singe values into RefGen.from_ids is deprecated. Use RefGen.from_id() "
"or slicing syntax instead."
)
return self.from_id(gene_ids, **kwargs)
genes = []
for id in gene_ids:
try:
genes.append(self.from_id(id, **kwargs))
except ValueError as e:
if check_shape == False:
continue
else:
raise e
return genes
# NOTE: Dont LRU cache this, it gets cached in from_id
def __getitem__(self, item):
"""
A convenience method to extract loci from the reference genome.
"""
if isinstance(item, str):
return self.from_id(item)
# Allow for iterables of items to be passed in
else:
return self.from_ids(item)
def chromosome(self, id):
"""
returns a chromosome object
"""
try:
return Chrom(
*self.db.cursor()
.execute("""SELECT id,length FROM chromosomes WHERE id = ?""", (id,))
.fetchone()
)
except Exception as e:
self.log("No chromosome where id = {}. Error: {}", id, e)
raise ValueError(e)
def encompassing_genes(self, loci, chain=True):
"""
Returns the gene encompassing the locus. In other words
if a locus (e.g. a SNP) is inside of a gene, i.e. the
start of the gene is upstream and the end of the gene
is downstream of the locus boundaries, this method will
return it. Not to be confused with candidate genes, which
will return genes upstream and downstream surrounding a locus.
Parameters
----------
loci : an iterable of loci
The method will return encompassing genes for each
locus in ther iterable. If a single locus is passed,
a single result will be returned.
chain : bool (defualt: True)
Specifies whether or not to chain results. If true
a single list will be returned, if False, a result for
each locus passed in will be returned.
"""
if isinstance(loci, Locus):
return [
self.Gene(*x)
for x in self.db.cursor().execute(
"""
SELECT chromosome,start,end,id FROM genes
WHERE chromosome = ?
AND start <= ? AND end >= ?
""",
(loci.chrom, loci.start, loci.end),
)
]
else:
iterator = iter(loci)
genes = [self.encompassing_genes(locus, chain=chain) for locus in iterator]
if chain:
genes = list(itertools.chain(*genes))
return genes
def genes_within(self, loci, chain=True):
"""
Returns the genes that START within a locus
start/end boundry.
Looks like: (y=yes,returned; n=no,not returned)
nnn nnnnnnn yyyyyy yyyyy yyyyyy yyyyyyy
start end
-----x****************************x-------------
"""
if isinstance(loci, Locus):
return [
self.Gene(*x)
for x in self.db.cursor().execute(
"""
SELECT chromosome,start,end,id FROM genes
WHERE chromosome = ?
AND start >= ? AND start <= ?
""",
(loci.chrom, loci.start, loci.end),
)
]
else:
iterator = iter(loci)
genes = [self.genes_within(locus, chain=chain) for locus in iterator]
if chain:
genes = list(itertools.chain(*genes))
return genes
def upstream_genes(self, locus, gene_limit=1000, window_size=None):
"""
Find genes that START upstream of a locus.
Genes are ordered so that the nearest genes are
at the beginning of the list.
Return Genes that overlap with the upstream window,
This includes partially overlapping genes, but NOT
genes that are returned by the genes_within method.
Looks like: (y=yes,returned; n=no,not returned)
nnn yyyyyyy yyyyyy yyyyy yyyyyy nnnn nnnn nnnnnnnn
nnnn
start end
-----------------------------x****************x--
^_________________________| Window (upstream)
"""
if locus.window == 0 and window_size is None: # C
raise CamocoZeroWindowError("Asking for upstream genes for {}", locus.id)
if window_size is not None:
upstream = locus.start - window_size
else:
upstream = locus.upstream
return [
self.Gene(*x)
for x in self.db.cursor().execute(
"""
SELECT chromosome,start,end,id FROM genes
INDEXED BY gene_start_end
WHERE chromosome = ?
AND start >= ? -- Gene must end AFTER locus window (upstream)
AND start < ? -- Gene must start BEFORE locus
ORDER BY start DESC
LIMIT ?
""",
(locus.chrom, upstream, locus.start, gene_limit),
)
]
def downstream_genes(self, locus, gene_limit=1000, window_size=None):
"""
Returns genes downstream of a locus. Genes are ordered
so that the nearest genes are at the beginning of the list.
Return Genes that overlap with the downstream window,
This includes partially overlapping genes, but NOT
genes that are returned by the genes_within method.
Looks like: (y=yes,returned; n=no,not returned)
nnn nnnnnnn nnnnnn nnnn yyyy yyyyyy yyyy yyyyyy nnnnn
start end
---x****************x--------------------------------
|_______________________^ Window (downstream)
"""
if locus.window == 0 and window_size is None:
raise CamocoZeroWindowError(
"Asking for upstream genes for {} with no window size", locus.id
)
if window_size is not None:
downstream = locus.end + window_size
else:
downstream = locus.downstream
return [
self.Gene(*x)
for x in self.db.cursor().execute(
"""
SELECT chromosome,start,end,id FROM genes
INDEXED BY gene_start_end
WHERE chromosome = ?
AND start > ?
AND start <= ?
ORDER BY start ASC
LIMIT ?
""",
(locus.chrom, locus.end, downstream, gene_limit),
)
]
def flanking_genes(self, loci, flank_limit=2, chain=True, window_size=None):
"""
Returns genes upstream and downstream from a locus
** done NOT include genes within locus **
"""
if isinstance(loci, Locus):
# If we cant iterate, we have a single locus
locus = loci
if locus.window == 0 and window_size is None:
raise CamocoZeroWindowError(
"Asking for upstream genes for {} and no window size.", locus.id
)
upstream_gene_limit = int(flank_limit)
downstream_gene_limit = int(flank_limit)
up_genes = self.upstream_genes(
locus, gene_limit=upstream_gene_limit, window_size=window_size
)
down_genes = self.downstream_genes(
locus, gene_limit=downstream_gene_limit, window_size=window_size
)
if chain:
return list(itertools.chain(up_genes, down_genes))
return (up_genes, down_genes)
else:
iterator = iter(loci)
genes = [
self.flanking_genes(
locus, flank_limit=flank_limit, window_size=window_size
)
for locus in iterator
]
if chain:
genes = list(itertools.chain(*genes))
return genes
def candidate_genes(
self,
loci,
flank_limit=2,
chain=True,
window_size=None,
include_parent_locus=False,
include_parent_attrs=False,
include_num_intervening=False,
include_rank_intervening=False,
include_num_siblings=False,
include_SNP_distance=False,
attrs=None,
return_table=False,
):
"""
Locus to Gene mapping.
Return Genes between locus start and stop, plus additional
flanking genes (up to flank_limit)
Parameters
----------
loci : camoco.Locus (also handles an iterable containing Loci)
a camoco locus or iterable of loci
flank_limit : int (default : 2)
The total number of flanking genes **on each side**
considered a candidate surrounding a locus
chain : bool (default : true)
Calls itertools chain on results before returning
window_size : int (default: None)
Optional parameter used to extend or shorten a locus
window from which to choose candidates from. If None,
the function will resort to what is available in the
window attribute of the Locus.
include_parent_locus : bool (default: False)
Optional parameter which will update candidate genes
'attr' attribute with the id of the parent locus
which contains it.
include_parent_attrs : iterable (default: False)
Optional parameter to include attributes from the parent
locus. Parent locus attrs specified here will be included.
If effective loci is > 1, the maximum value will be
included. E.g. - including the SNP effect size with
candidate genes.
include_num_intervening : bool (default: False)
Optional argument which adds an attribute to each
candidate genes containing the rank of each gene
as a function of distance away from the parent
locus. (i.e. the closest candidate is 1 and the
furthest candidate is n)
include_rank_intervening : bool (default: False)
Optional argument which adds the rank of each
candidatea as a funtion of distance from the parent
Locus. i.e. The closest gene is ranked 1 and the furthest
gene is ranked n.
include_num_siblings : bool (default: False)
Optional argument which adds an attribute to each
candidate gene containing the number of total
candidates (siblings) identifies at the locus.
include_SNP_distance : bool (default:False)
Include the distance from the canadidate gene and
the parent SNP
attrs : dict (default: None)
An optional dictionary which will be updated to each
candidate genes attr value.
return_table : bool(default: False)
If True, return a Pandas table (DataFrame)
Returns
-------
a list of candidate genes (or list of lists if chain is False)
"""
if isinstance(loci, Locus):
# If not an iterator, its a single locus
locus = loci
genes_within = self.genes_within(locus)
up_genes, down_genes = self.flanking_genes(
locus, flank_limit=flank_limit, chain=False, window_size=window_size
)
# This always returns candidates together, if
# you want specific up,within and down genes
# use the specific methods
genes = sorted(itertools.chain(up_genes, genes_within, down_genes))
# include the number of effective loci
if include_rank_intervening == True:
ranks = sp.stats.rankdata(
[abs(x.center_distance(locus)) for x in genes]
)
# Iterate through candidate genes and propagate the
# parental info
for i, gene in enumerate(genes):
# gene.update({'num_effective_loci':len(locus.sub_loci)})
# include parent locus id if thats specified
if include_parent_locus == True:
gene.update({"parent_locus": locus.id})
if include_rank_intervening == True:
gene.update({"intervening_rank": ranks[i]})
# update all the parent_attrs
if include_parent_attrs and len(include_parent_attrs) > 0:
if "all" in include_parent_attrs:
include_parent_attrs = locus.attr.keys()
for attr in include_parent_attrs:
attr_name = "parent_{}".format(attr)
gene.update({attr_name: locus[attr]})
if include_num_intervening == True:
num_down = 0
num_up = 0
# Sort the genes by their distance from the locus
genes_with_distances = [
(gene, abs(gene.center_distance(locus))) for gene in genes
]
genes_with_distances = sorted(genes_with_distances, key=lambda x: x[1])
for gene, distance in genes_with_distances:
if locus.within(gene):
gene.update({"num_intervening": -1})
elif gene.center >= locus.center:
gene.update({"num_intervening": num_down})
num_down += 1
elif gene.center <= locus.center:
gene.update({"num_intervening": num_up})
num_up += 1
if include_num_siblings == True:
for gene in genes:
gene.update({"num_siblings": len(genes)})
if include_SNP_distance == True:
for gene in genes:
distance = abs(gene.center_distance(locus))
gene.update({"SNP_distance": distance})
if attrs is not None:
for gene in genes:
gene.update(attrs)
if return_table == True:
genes = pd.DataFrame([x.as_dict() for x in genes])
return genes
else:
iterator = iter(sorted(loci))
genes = [
# This is becoming a pain
self.candidate_genes(
locus,
flank_limit=flank_limit,
chain=chain,
window_size=window_size,
include_parent_locus=include_parent_locus,
include_parent_attrs=include_parent_attrs,
include_num_intervening=include_num_intervening,
include_rank_intervening=include_rank_intervening,
include_num_siblings=include_num_siblings,
include_SNP_distance=include_SNP_distance,
return_table=return_table,
attrs=attrs,
)
for locus in iterator
]
if chain: # C
if return_table:
genes = pd.concat(genes)
else:
genes = list(set(itertools.chain(*genes)))
return genes
def bootstrap_candidate_genes(
self,
loci,
flank_limit=2,
chain=True,
window_size=None,
include_parent_locus=False,
):
"""
Returns candidate genes which are random, but conserves
total number of overall genes.
Parameters
----------
loci : camoco.Locus (also handles an iterable containing Loci)
a camoco locus or iterable of loci
flank_limit : int (default : 2)
The total number of flanking genes **on each side**
considered a candidate surrounding a locus
chain : bool (default : true)
Calls itertools chain on results before returning,
include_parent_locus : bool (default: False)
Optional parameter which will update candidate genes
'attr' attribute with the id of the parent locus
which contains it.
Returns
-------
a list of candidate genes (or list of lists if chain is False)
"""
if isinstance(loci, Locus):
# We now have a single locus
locus = loci
# grab the actual candidate genes
num_candidates = len(
self.candidate_genes(
locus, flank_limit=flank_limit, chain=True, window_size=window_size
)
)
if num_candidates == 0:
return []
# Snps a random genes from the genome
random_gene = self.random_gene()
# Snag the same number of candidates
random_candidates = self.upstream_genes(
random_gene, gene_limit=num_candidates, window_size=10e100
)
if len(random_candidates) != num_candidates: # C
# somehow we hit the end of a chromosome
# or something, just recurse
random_candidates = self.bootstrap_candidate_genes(
locus, flank_limit=flank_limit, chain=True
)
if include_parent_locus == True: # C
for gene in random_candidates:
gene.update({"parent_locus": random_gene.id})
return random_candidates
else:
# Sort the loci so we can collapse down
locus_list = sorted(loci)
seen = set()
bootstraps = list()
target = self.candidate_genes(
locus_list,
flank_limit=flank_limit,
chain=False,
window_size=window_size,
)
target_accumulator = 0
candidate_accumulator = 0
# self.log('target: {}, loci: {}',len(target),len(locus_list))
for i, (locus, targ) in enumerate(zip(locus_list, target)):
# compare downstream of last locus to current locus
candidates = self.bootstrap_candidate_genes(
locus,
flank_limit=flank_limit,
chain=True,
window_size=window_size,
include_parent_locus=include_parent_locus,
)
# If genes randomly overlap, resample
while len(seen.intersection(candidates)) > 0: # C
candidates = self.bootstrap_candidate_genes(
locus,
flank_limit=flank_limit,
window_size=window_size,
chain=True,
include_parent_locus=include_parent_locus,
)
# Add all new bootstrapped genes to the seen list
seen |= set(candidates)
bootstraps.append(candidates)
if chain: # C
bootstraps = list(seen)
# self.log("Found {} bootstraps",len(bootstraps))
return bootstraps
def pairwise_distance(self, gene_list=None): # pragma: no cover
"""
returns a vector containing the pairwise distances between genes
in gene_list in vector form. See np.squareform for matrix
conversion.
"""
if gene_list is None: # C
gene_list = list(self.iter_genes())
query = """
SELECT genes.id, chrom.rowid, start, end FROM genes
LEFT JOIN chromosomes chrom ON genes.chromosome = chrom.id
WHERE genes.id in ("{}")
ORDER BY genes.id
""".format(
'","'.join([g.id for g in gene_list])
)
# extract chromosome row ids and gene start positions for each gene
positions = pd.DataFrame(
# Grab the chromosomes rowid because its numeric
self.db.cursor().execute(query).fetchall(),
columns=["gene", "chrom", "start", "end"],
).sort_values(by="gene")
# chromosome needs to be floats
positions.chrom = positions.chrom.astype("float")
# Do a couple of checks
assert len(positions) == len(gene_list), "Some genes in dataset not if RefGen"
assert all(
positions.gene == [g.id for g in gene_list]
), "Genes are not in the correct order!"
distances = RefGenDist.gene_distances(
positions.chrom.values, positions.start.values, positions.end.values
)
return distances
def summary(self): # C
print(
"\n".join(
["Reference Genome: {}", "{} genes", "Genome:", "{}"]
).format(
self.name, self.num_genes(), self.genome
)
)
def plot_loci(self, loci, filename, flank_limit=2): # pragma: no cover
"""
Plots the loci, windows and candidate genes
Parameters
----------
loci : iterable of co.Loci
The loci to print
filename : str
The output filename
"""
plt.clf()
# Each chromosome gets a plot
chroms = set([x.chrom for x in loci])
# Create a figure with a subplot for each chromosome
f, axes = plt.subplots(len(chroms), figsize=(10, 4 * len(chroms)))
if len(chroms) == 1:
axes = [axes]
# Split loci by chromosome
chromloci = defaultdict(list)
for locus in sorted(loci):
chromloci[locus.chrom].append(locus)
# iterate over Loci
seen_chroms = set([loci[0].chrom])
voffset = 1 # Vertical Offset
hoffset = 0 # Horizonatal Offset
current_chrom = 0
for i, locus in enumerate(loci):
# Reset the temp variables in necessary
if locus.chrom not in seen_chroms:
seen_chroms.add(locus.chrom)
current_chrom += 1
voffset = 1
hoffset = 0
# Do the access things
cax = axes[current_chrom]
cax.set_ylabel("Chrom: " + locus.chrom)
cax.set_xlabel("Loci")
cax.get_yaxis().set_ticks([])
# cax.get_xaxis().set_ticks([])
# shortcut for current axis
# cax.hold(True)
# place marker for start window
cax.scatter(hoffset, voffset, marker=">")
# place marker for start snp
cax.scatter(hoffset + locus.window, voffset, marker=".", color="blue")
# Place a marker for middle
cax.scatter(
hoffset + locus.window + len(locus) / 2,
voffset,
marker="|",
color="blue",
)
# place marker for stop snp
cax.scatter(
hoffset + locus.window + len(locus), voffset, marker=".", color="blue"
)
# place marker for stop snp
cax.scatter(
hoffset + locus.window + len(locus) + locus.window, voffset, marker="<"
)
# place markers for sub snps
for subsnp in locus.sub_loci:
cax.scatter(
hoffset + subsnp.start - locus.start + locus.window,
voffset,
marker=".",
color="blue",
)
# place a block for interlocal distance
cax.barh(
bottom=voffset,
width=50,
height=1,
left=hoffset + locus.window + len(locus) + locus.window,
color="red",
)
# grab the candidate genes
for gene in self.candidate_genes(locus, flank_limit=flank_limit):
cax.barh(
bottom=voffset,
width=len(gene),
height=1,
left=gene.start - locus.start + locus.window,
color="red",
)
voffset += 5
plt.savefig(filename)
return f
def __repr__(self):
return "Reference Genome: - {}".format(
self.name
)
def __len__(self):
return self.num_genes()
def __contains__(self, obj):
""" flexible on what you pass into the 'in' function """
if isinstance(obj, Locus): # C
# you can pass in a gene object (this expression
# should ALWAYS be true if you
# created gene object from this RefGen)
if (
self.db.cursor()
.execute(
"""SELECT COUNT(*) FROM genes WHERE id = ?""", (obj.id.upper(),)
)
.fetchone()[0]
== 1
):
return True
else:
return False
elif isinstance(obj, str):
# Can be a string object
if (
self.db.cursor()
.execute(
"""
SELECT COUNT(*) FROM genes WHERE id = ?""",
(str(obj).upper(),),
)
.fetchone()[0]
== 1
):
return True
else:
return False
else: # C
raise TypeError("Cannot test for containment for {}".format(obj))
def _build_indices(self):
self.log("Building Indices")
cur = self.db.cursor()
# cur.execute('DROP INDEX IF EXISTS gene_start_end;')
# cur.execute('DROP INDEX IF EXISTS gene_end_start;')
# cur.execute('DROP INDEX IF EXISTS gene_start;')
# cur.execute('DROP INDEX IF EXISTS gene_end;')
# cur.execute('DROP INDEX IF EXISTS geneid')
# cur.execute('DROP INDEX IF EXISTS geneattr')
cur.execute(
"""
CREATE INDEX IF NOT EXISTS gene_start_end ON genes (chromosome,start DESC, end ASC, id);
CREATE INDEX IF NOT EXISTS gene_end_start ON genes (chromosome,end DESC,start DESC,id);
CREATE INDEX IF NOT EXISTS gene_start ON genes (chromosome,start);
CREATE INDEX IF NOT EXISTS gene_end ON genes (chromosome,end);
CREATE INDEX IF NOT EXISTS geneid ON genes (id);
CREATE INDEX IF NOT EXISTS geneattr ON gene_attrs (id);
CREATE INDEX IF NOT EXISTS id ON func(id);
CREATE INDEX IF NOT EXISTS id ON ortho_func(id);
"""
)
def add_gene(self, gene, refgen=None):
if isinstance(gene, Locus): # C
self.db.cursor().execute(
"""
INSERT OR REPLACE INTO genes VALUES (?,?,?,?)
""",
(gene.name, gene.chrom, gene.start, gene.end),
)
self.db.cursor().executemany(
"""
INSERT OR REPLACE INTO gene_attrs VALUES (?,?,?)
""",
[(gene.id, key, val) for key, val in gene.attr.items()],
)
if refgen:
aliases = refgen.aliases(gene.id)
if aliases:
self.db.cursor().executemany(
"""
INSERT OR IGNORE INTO aliases VALUES (?,?)""",
[(al, id) for al in aliases],
)
else:
# support adding lists of genes
genes = list(gene)
self.log("Adding {} Genes info to database".format(len(genes)))
cur = self.db.cursor()
cur.execute("BEGIN TRANSACTION")
cur.executemany(
"INSERT OR REPLACE INTO genes VALUES (?,?,?,?)",
((gene.name, gene.chrom, gene.start, gene.end) for gene in genes),
)
self.log("Adding Gene attr info to database")
cur.executemany(
"INSERT OR REPLACE INTO gene_attrs VALUES (?,?,?)",
(
(gene.id, key, val)
for gene in genes
for key, val in gene.attr.items()
),
)
if refgen:
al_map = refgen.aliases([gene.id for gene in genes])
als = []
for id, al_list in al_map.items():
for al in al_list:
als.append([al, id])
cur.executemany("INSERT OR REPLACE INTO aliases VALUES (?,?)", als)
cur.execute("END TRANSACTION")
def add_chromosome(self, chrom):
""" adds a chromosome object to the class """
self.db.cursor().execute(
"""
INSERT OR REPLACE INTO chromosomes VALUES (?,?)
""",
(chrom.id, chrom.length),
)
def add_aliases(self, alias_file, id_col=0, alias_col=1, headers=True):
"""
Add alias map to the RefGen
Parameters
----------
alias_file : string (path)
The path to the alias file
id_col : int (default: 0)
The column containing the gene identifier
alias_col : int (default: 1)
The columns containing the alias
header : bool (default: True)
A switch stating if there is a header row to ignore or not
"""
with rawFile(alias_file) as IN: # C
if headers:
garb = IN.readline()
aliases = []
self.log("Importing aliases from: {}", alias_file)
for line in IN:
row = re.split(",|\t", line)
if row[id_col].strip() in self:
aliases.append((row[alias_col], row[id_col].strip()))
cur = self.db.cursor()
self.log("Saving them in the alias table.")
cur.execute("BEGIN TRANSACTION")
cur.executemany("INSERT OR REPLACE INTO aliases VALUES (?,?)", aliases)
cur.execute("END TRANSACTION")
def num_aliases(self): # C
"""
Returns the number of aliases currently in the database
"""
return self.db.cursor().execute("SELECT COUNT(*) FROM aliases").fetchone()[0]
def aliases(self, gene_id): # C
if isinstance(gene_id, str):
return [
alias[0]
for alias in self.db.cursor().execute(
"""
SELECT alias FROM aliases
WHERE id = ?
""",
(gene_id.upper(),),
)
]
else:
cur = self.db.cursor()
al_list = cur.executemany(
"""
SELECT alias,id FROM aliases WHERE id = ?
""",
[(id,) for id in gene_id],
)
als = dict()
for al, id in al_list: # C
if id in als:
als[id].append(al)
else:
als[id] = [al]
return als
def remove_aliases(self): # C
self.db.cursor().execute("DELETE FROM aliases;")
def has_annotations(self): # C
cur = self.db.cursor()
cur.execute("SELECT count(*) FROM func;")
return int(cur.fetchone()[0]) > 0
def get_annotations(self, item): # C
# Build the query from all the genes provided
if isinstance(item, (set, list)):
ls = "{}".format("','".join([str(x) for x in item]))
single = False
else:
ls = item
single = True
query = "SELECT * FROM func WHERE id IN ('{}');".format(ls)
# Run the query and turn the result into a list of tuples
cur = self.db.cursor()
cur.execute(query)
annotes = cur.fetchall()
# If a list of genes was passed in, return a dictionary of lists
if not single:
res = {}
for id, desc in annotes:
if id in res:
res[id].append(desc)
else:
res[id] = [desc]
# Otherwise just return the list annotations
else:
res = []
for id, desc in annotes:
res.append(desc)
return res
def export_annotations(self, filename=None, sep="\t"): # C
"""
Make a table of all functional annotations.
"""
# Find the default filename
if filename == None:
filename = self.name + "_func.tsv"
# Pull them all from sqlite
cur = self.db.cursor()
cur.execute("SELECT * FROM func;")
# Used pandas to save it
df = pd.DataFrame(cur.fetchall(), columns=["gene", "desc"]).set_index("gene")
df.to_csv(filename, sep=sep)
def add_annotations(self, filename, sep="\t", gene_col=0, skip_cols=None): # C
"""
Imports Annotation relationships from a csv file. By default will
assume gene names are first column
Parameters
----------
filename : str
The file containing the annotations
sep : str (default: \\t)
The delimiter for the columns in the annotation file
gene_col : int (default: 0)
The index of the column containing the gene IDs
skip_cols : default:None
Optional names of columns to drop before adding
annotations
Returns
-------
None if successful
"""
# import from file, assume right now that in correct order
tbl = pd.read_table(filename, sep=sep, dtype=object)
idx_name = tbl.columns[gene_col]
tbl[idx_name] = tbl[idx_name].str.upper()
# Set thie index to be the specified gene column
tbl.set_index(idx_name, inplace=True)
# Drop columns if we need to
if skip_cols is not None:
# removing certain columns
tbl.drop(tbl.columns[skip_cols], axis=1, inplace=True)
# Get rid of any genes not in the refence genome
cur = self.db.cursor()
cur.execute("SELECT id FROM genes;")
rm = set(tbl.index.values) - set([id[0] for id in cur.fetchall()])
if rm:
tbl.drop(rm, axis=0, inplace=True)
del rm, cur
# One Annotation per row, drop the nulls and duplicates
tbl = tbl.reset_index()
tbl = pd.melt(tbl, id_vars=idx_name, var_name="col", value_name="desc")
tbl.drop("col", axis=1, inplace=True)
tbl.dropna(axis=0, inplace=True)
tbl.drop_duplicates(inplace=True)
# Run the transaction to throw them in there
cur = self.db.cursor()
try:
cur.execute("BEGIN TRANSACTION")
cur.executemany(
"INSERT INTO func VALUES (?,?)", tbl.itertuples(index=False)
)
cur.execute("END TRANSACTION")
except Exception as e:
self.log("import failed: {}", e)
cur.execute("ROLLBACK")
# Make sure the indices are built
self._build_indices()
def remove_annotations(self): # C
self.db.cursor().execute("DELETE FROM func;")
@classmethod
def create(cls, name, description, type):
self = super().create(name, description, type=type)
self.db.cursor().execute(
"""
DROP TABLE IF EXISTS chromosomes;
DROP TABLE IF EXISTS genes;
DROP TABLE IF EXISTS gene_attrs;
DROP TABLE IF EXISTS aliases;
DROP TABLE IF EXISTS func;
DROP TABLE IF EXISTS ortho_func;
"""
)
self._create_tables()
self._build_indices()
return self
@classmethod # C
def from_DataFrame(
cls,
df,
name,
description,
chrom_col="chrom",
start_col="start",
stop_col="stop",
id_col="ID",
):
"""
Imports a RefGen object from a CSV.
"""
self = cls.create(name, description, type="RefGen")
genes = list()
for i, row in df.iterrows():
genes.append(
Gene(
row[chrom_col],
int(row[start_col]),
int(row[stop_col]),
id=row[id_col],
).update(dict(row.items()))
)
self.add_gene(genes)
self._build_indices()
return self
@classmethod
def from_gff(
cls,
filename,
name,
description,
chrom_feature="chromosome",
gene_feature="gene",
ID_attr="ID",
attr_split="=",
):
"""
Imports RefGen object from a gff (General Feature Format) file.
See more about the format here:
http://www.ensembl.org/info/website/upload/gff.html
Parameters
----------
filename : str
The path to the GFF file.
name : str
The name if the RefGen object to be stored in the core
camoco database.
description : str
A short description of the RefGen for future reference
chrom_feature : str (default: chromosome)
The name of the feature (in column 3) that designates a
a chromosome.
gene_feature : str (default: gene)
The name of the feature (in column 2) that designates a
gene. These features will be the main object that the RefGen
encompasses.
ID_attr : str (default: ID)
The key in the attribute column which designates the ID or
name of the feature.
attr_split : str (default: '=')
The delimiter for keys and values in the attribute column
"""
self = cls.create(name, description, type="RefGen")
genes = list()
chroms = dict()
if filename.endswith(".gz"):
IN = gzip.open(filename, "rt")
else:
IN = open(filename, "r")
for line in IN:
# skip comment lines
if line.startswith("#"):
continue
(
chrom,
source,
feature,
start,
end,
score,
strand,
frame,
attributes,
) = line.strip().split("\t")
attributes = dict(
[
(field.strip().split(attr_split))
for field in attributes.strip(";").split(";")
]
)
if feature == chrom_feature:
self.log("Found a chromosome: {}", attributes["ID"].strip('"'))
self.add_chromosome(Chrom(attributes["ID"].strip('"'), end))
if feature == gene_feature:
genes.append(
Gene(
chrom,
int(start),
int(end),
attributes[ID_attr].upper().strip('"'),
strand=strand,
**attributes
).update(attributes)
)
# Keep track of seen chromosomes
if chrom not in chroms:
chroms[chrom] = end
else:
if end > chroms[chrom]:
chroms[chrom] = end
for id, end in chroms.items():
self.add_chromosome(Chrom(id.strip('"'), end))
IN.close()
self.add_gene(genes)
self._build_indices()
return self
def copy(self, name, description): # C
"""
Creates a copy of a refgen with a new name and description.
Parameters
----------
name : str
Name of the copy refgen
description : str
Short description of the reference genome
Returns
-------
co.RefGen object containing the same genes and
chromosomems as the original.
"""
copy = self.create(name, description, "RefGen")
# Should have the same chromosomems
for chrom in self.iter_chromosomes():
copy.add_chromosome(chrom)
# Should have the same gene list
copy.add_gene(self.iter_genes(), refgen=self)
copy._build_indices()
return copy
@classmethod
def filtered_refgen(cls, name, description, refgen, gene_list):
"""
Copies from a previous instance of refgen, making sure
each gene is within gene list
"""
self = cls.create(name, description, "RefGen")
# Should have the same chromosomes
for chrom in refgen.iter_chromosomes():
self.add_chromosome(chrom)
# Add the genes from genelist
self.add_gene(gene_list, refgen=refgen)
self._build_indices()
return self
def _create_tables(self):
cur = self.db.cursor()
cur.execute(
"""
/*
Create a table that has chromosome lengths
*/
CREATE TABLE IF NOT EXISTS chromosomes (
id TEXT NOT NULL UNIQUE,
length INTEGER NOT NULL
);
/*
Create a table the contains gene start and
end positions (with chromosome)
*/
CREATE TABLE IF NOT EXISTS genes (
id TEXT NOT NULL UNIQUE,
chromosome TEXT NOT NULL,
start INTEGER,
end INTEGER
);
/*
Create a table that contains gene attribute
mappings
*/
CREATE TABLE IF NOT EXISTS gene_attrs (
id TEXT NOT NULL,
key TEXT,
val TEXT
);
CREATE TABLE IF NOT EXISTS aliases (
alias TEXT UNIQUE,
id TEXT
);
CREATE TABLE IF NOT EXISTS func (
id TEXT,
desc TEXT,
UNIQUE(id,desc) ON CONFLICT IGNORE
);
CREATE TABLE IF NOT EXISTS ortho_func (
id TEXT,
desc TEXT,
UNIQUE(id,desc) ON CONFLICT IGNORE
);"""
)
|
import tkinter as tk
import os
import util
from frames.add_quantity_frame import AddQuantityFrame
from frames.add_unit_frame import AddUnitFrame
from frames.converter_frame import ConverterFrame
from frames.main_frame import MainFrame
from util import get_all_quantities
from constants.files import *
class Application(tk.Tk):
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 600
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.__init_base_params()
self.__create_db()
self.__load_data()
self.selected_quantity = tk.StringVar()
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (ConverterFrame, AddQuantityFrame, AddUnitFrame, MainFrame):
page_name = F.__name__
frame = F(container, self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(MainFrame.__name__)
def show_frame(self, page_name):
self.__load_data()
frame = self.frames[page_name]
frame.render()
frame.tkraise()
def __init_base_params(self):
self.title("Конвертер величин")
position_right = int(self.winfo_screenwidth() / 2 - self.WINDOW_WIDTH / 2)
position_down = int(self.winfo_screenheight() / 2 - self.WINDOW_HEIGHT / 2)
self.geometry("{}x{}".format(self.WINDOW_WIDTH, self.WINDOW_HEIGHT))
self.geometry("+{}+{}".format(position_right, position_down))
self.resizable(False, False)
def __create_db(self):
if os.path.isfile(QUANTITIES_FILE_PATH):
return
util.set_default_quantities()
def __load_data(self):
self.phys_quantities = {}
all_quantities = get_all_quantities()
for quantity in all_quantities:
self.phys_quantities[quantity.name] = quantity.units
|
import pytest
import logging
from esprima import parseScript
from photoshop import PhotoshopConnection
from photoshop.protocol import Pixmap
from .mock import (
script_output_server, subscribe_server, error_status_server, jpeg_server,
pixmap_server, filestream_server, PASSWORD
)
class CallbackHandler(object):
def __init__(self):
self.count = 0
def __call__(self, conn, data):
assert data == b'{}'
self.count += 1
if self.count >= 3:
return True
return False
def test_subscribe(subscribe_server):
with PhotoshopConnection(
PASSWORD, port=subscribe_server[1], validator=parseScript
) as conn:
conn.subscribe('imageChanged', CallbackHandler(), block=True)
def test_subscribe_error(error_status_server):
with PhotoshopConnection(
PASSWORD, port=error_status_server[1], validator=parseScript
) as conn:
conn.subscribe('imageChanged', CallbackHandler(), block=True)
def test_get_document_thumbnail(jpeg_server):
with PhotoshopConnection(
PASSWORD, port=jpeg_server[1], validator=parseScript
) as conn:
jpeg_binary = conn.get_document_thumbnail()
assert isinstance(jpeg_binary, bytes)
def test_get_layer_thumbnail(pixmap_server):
with PhotoshopConnection(
PASSWORD, port=pixmap_server[1], validator=parseScript
) as conn:
pixmap = conn.get_layer_thumbnail()
assert isinstance(pixmap, Pixmap)
def test_get_layer_shape(script_output_server):
with PhotoshopConnection(
PASSWORD, port=script_output_server[1], validator=parseScript
) as conn:
shape_info = conn.get_layer_shape()
assert isinstance(shape_info, dict)
def test_get_document_info(script_output_server):
with PhotoshopConnection(
PASSWORD, port=script_output_server[1], validator=parseScript
) as conn:
document_info = conn.get_document_info()
assert isinstance(document_info, dict)
def test_get_document_stream(filestream_server):
with PhotoshopConnection(
PASSWORD, port=filestream_server[1], validator=parseScript
) as conn:
document_info = conn.get_document_stream()
assert isinstance(document_info, dict)
|
from hallo.events import EventMessage, EventMode
from hallo.server import Server
from hallo.test.server_mock import ServerMock
def test_devoice_not_irc(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = "NOT_IRC"
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "only available for irc" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0_privmsg(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, None, user1, "devoice"))
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "in a private message" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice other_channel")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "other_channel is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
serv1.get_channel_by_address("test_chan2", "test_chan2")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].user == user1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user2)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_voice = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_voice = True
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user_not_here(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user2 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_voice = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_voice = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = False
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "i'm not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
|
import boto3
from jinja2 import Environment, FileSystemLoader
import re
from .APIException import Exception_400
import os
def generate_conf(request):
data = extract_data(request)
template_string = data['data']
key_prefix = data['parameters']['key_prefix']
aws_region = os.environ['AWS_REGION']
template_keys = get_template_keys_from_string(template_string)
cmdb_param_names = generate_cmdb_param_names(template_keys, key_prefix)
secret_dict = {}
for template_key, cmdb_param_name in cmdb_param_names.items():
secret_dict[template_key] = get_ssm_param(cmdb_param_name, aws_region)
jinja_env = Environment(
loader=FileSystemLoader('.'),
trim_blocks=True,
lstrip_blocks=True
)
# register the custom Jinja filter
jinja_env.filters['split'] = split_filter
template_without_secret = jinja_env.from_string(template_string)
template_with_secrets = generate_template_with_secrets(template_without_secret, secret_dict)
return template_with_secrets
def extract_data(request):
data = request.data.decode("utf-8")
key_prefix = request.args.get('key_prefix')
if data == '':
raise Exception_400("request body is missing", "request body is missing", None)
return {"data": data, "parameters": {"key_prefix": key_prefix}}
def get_template_keys_from_string(template_string):
"""
Retrieve all the keys from the template string which are identified by '{{ <key_name> }}'.
:param template_string: the template in a string
:type template_string: string
:return: the list of the template keys corresponding to secret parameters
:rtype: list[string]
"""
keys = re.findall(r"{{([a-z0-9_ ]+)", template_string)
trimmed_keys = [key.strip() for key in keys]
return trimmed_keys
def generate_cmdb_param_names(template_keys, key_prefix):
"""
Generate the secret parameter names based on the client and environment. In the Parameter Store,
the parameter names follow the following pattern: /<client>/<env>/<template_key>. Example: /ripl/prod/redshift/host
:param template_keys: all the secret parameters to retrieve based on the template
:type template_keys: list[string]
:param key_prefix: a prefix to add before template keys (can be None)
:type key_prefix: string
:return: a dictionary containing the template keys with the corresponding secret parameter name as value
Example: {"redshift_user": "/ripl/prod/redshift/user"}
:rtype: dict
"""
res = {}
if key_prefix is None:
key_prefix_string = "/"
else:
key_prefix_string = "/{}/".format(key_prefix)
for template_key in template_keys:
# as we can't use '-' in the template variable name, we use '__'. However in the Parameter Store, the '-'
# can be used. Therefore we have to do the conversion
template_key_with_dash = template_key.replace('__', '-')
# '_' in the template corresponds to '/' in the AWS Parameter Store
path = template_key_with_dash.replace('_', '/')
res[template_key] = "{}{}".format(key_prefix_string, path)
return res
def get_ssm_param(cmdb_param_name, region_name="eu-west-1"):
"""
Get the secret parameter from the AWS Parameter Store thanks to its name. The permissions to access the Parameter
Store are:
* "Assume Role" for remote server
* "Environment variables" for dev mode and running tests locally (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY)
:param cmdb_param_name: the name of the secret parameter name to retrieve from the Parameter Store
:type cmdb_param_name: string
:param region_name: the region name of the Parameter Store we want to use
:type region_name: string
:return: the secret parameter
:rtype: string
"""
ssm = boto3.client('ssm', region_name=region_name)
ssm_response = ssm.get_parameter(
Name=cmdb_param_name,
WithDecryption=True
)
ssm_parameter = ssm_response.get("Parameter").get("Value")
return ssm_parameter
def generate_template_with_secrets(template_without_secret, secret_dict):
"""
Generate the template with the secrets.
:param template_without_secret: the template without the secret
:type template_without_secret: jinja2.Template
:param secret_dict: the secret
:type secret_dict: dict
:return: the template with the secrets
:rtype: string
"""
return template_without_secret.render(secret_dict)
def split_filter(string_to_split, delimiter):
"""
Create a custom Jinja filter to use it in the Jinja2 template. Same functionality as the split function in the
Python language
:param string_to_split: the string to split
:type string_to_split: string
:param delimiter: the delimiter to split the string into
:type delimiter: string
:return: the string split as a list
:rtype: list[string]
"""
if string_to_split is not None:
return string_to_split.split(delimiter)
else:
return None
|
import pytest
import pandas
from nereid.src.treatment_facility.constructors import build_treatment_facility_nodes
from nereid.core.io import parse_configuration_logic
@pytest.mark.parametrize(
"ctxt_key, has_met_data",
[("default", True), ("default_api_no_tf_joins_valid", False)],
)
@pytest.mark.parametrize(
"model, checkfor",
[
("PermPoolFacility", "retention_volume_cuft"),
("RetAndTmntFacility", "retention_volume_cuft"),
("BioInfFacility", "retention_volume_cuft"),
("FlowAndRetFacility", "retention_volume_cuft"),
("RetentionFacility", "retention_volume_cuft"),
("TmntFacility", "treatment_volume_cuft"),
("CisternFacility", "design_storm_depth_inches"), # TODO
("DryWellFacility", "retention_volume_cuft"),
("LowFlowFacility", "design_storm_depth_inches"), # TODO
("FlowFacility", "design_storm_depth_inches"), # TODO
("NTFacility", "design_storm_depth_inches"),
],
)
def test_build_treatment_facility_nodes(
contexts, valid_treatment_facility_dicts, ctxt_key, has_met_data, model, checkfor
):
context = contexts[ctxt_key]
tmnt_facilities = pandas.DataFrame([valid_treatment_facility_dicts[model]])
df, messages = parse_configuration_logic(
df=pandas.DataFrame(tmnt_facilities),
config_section="api_recognize",
config_object="treatment_facility",
context=context,
)
node = build_treatment_facility_nodes(df)[0]
check_val = node.get(checkfor)
assert isinstance(check_val, float)
if has_met_data:
assert node.get("rain_gauge") is not None
else:
assert node.get("rain_gauge") is None
@pytest.mark.parametrize(
"ctxt_key, has_met_data",
[("default", True), ("default_api_no_tf_joins_valid", False)],
)
def test_build_treatment_facility_nodes_from_long_list(
contexts, valid_treatment_facilities, ctxt_key, has_met_data
):
context = contexts[ctxt_key]
tmnt_facilities = pandas.DataFrame(valid_treatment_facilities)
df, messages = parse_configuration_logic(
df=tmnt_facilities,
config_section="api_recognize",
config_object="treatment_facility",
context=context,
)
nodes = build_treatment_facility_nodes(df)
for n in nodes:
if has_met_data:
assert n.get("rain_gauge") is not None
else:
assert n.get("rain_gauge") is None
|
'''
Function:
新年贺卡生成器
Author:
Car
微信公众号:
Car的皮皮
'''
import os
import io
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import *
from PyQt5 import QtWidgets, QtGui
from PIL import Image, ImageDraw, ImageFont
'''新年贺卡生成器'''
class NewYearCardGenerator(QtWidgets.QWidget):
tool_name = '新年贺卡生成器'
def __init__(self, parent=None, title='新年贺卡生成器 —— Car的皮皮', **kwargs):
super(NewYearCardGenerator, self).__init__()
rootdir = os.path.split(os.path.abspath(__file__))[0]
self.setFixedSize(600, 500)
self.setWindowTitle(title)
self.setWindowIcon(QIcon(os.path.join(rootdir, 'resources/icon/icon.png')))
self.grid = QGridLayout()
# 一些全局变量
self.card_image = None
self.font_size = 35
# 定义组件
# --Label
self.content_label = QLabel('内容路径:')
self.bg_label = QLabel('背景路径:')
self.font_label = QLabel('字体路径:')
self.fontcolor_label = QLabel('字体颜色:')
self.show_label = QLabel()
self.show_label.setScaledContents(True)
self.show_label.setMaximumSize(600, 300)
# --输入框
self.content_edit = QLineEdit()
self.content_edit.setText(os.path.join(rootdir, 'resources/contents/1.card'))
self.bg_edit = QLineEdit()
self.bg_edit.setText(os.path.join(rootdir, 'resources/bgimages/1.png'))
self.font_edit = QLineEdit()
self.font_edit.setText(os.path.join(rootdir, 'resources/fonts/font.TTF'))
# --按钮
self.choose_content_button = QPushButton('选择路径')
self.choose_bg_button = QPushButton('选择路径')
self.choose_font_button = QPushButton('选择路径')
self.generate_button = QPushButton('生成贺卡')
self.save_button = QPushButton('保存贺卡')
# --下拉框
self.font_color_combobox = QComboBox()
for color in ['red', 'white', 'black', 'blue', 'yellow', 'green']:
self.font_color_combobox.addItem(color)
# 布局
self.grid.addWidget(self.show_label, 0, 0, 5, 5)
self.grid.addWidget(self.content_label, 5, 0, 1, 1)
self.grid.addWidget(self.content_edit, 5, 1, 1, 3)
self.grid.addWidget(self.choose_content_button, 5, 4, 1, 1)
self.grid.addWidget(self.bg_label, 6, 0, 1, 1)
self.grid.addWidget(self.bg_edit, 6, 1, 1, 3)
self.grid.addWidget(self.choose_bg_button, 6, 4, 1, 1)
self.grid.addWidget(self.font_label, 7, 0, 1, 1)
self.grid.addWidget(self.font_edit, 7, 1, 1, 3)
self.grid.addWidget(self.choose_font_button, 7, 4, 1, 1)
self.grid.addWidget(self.fontcolor_label, 8, 0, 1, 1)
self.grid.addWidget(self.font_color_combobox, 8, 1, 1, 1)
self.grid.addWidget(self.generate_button, 8, 3, 1, 1)
self.grid.addWidget(self.save_button, 8, 4, 1, 1)
self.setLayout(self.grid)
# 事件绑定
self.choose_content_button.clicked.connect(self.openContentFilepath)
self.choose_bg_button.clicked.connect(self.openBGFilepath)
self.choose_font_button.clicked.connect(self.openFontFilepath)
self.generate_button.clicked.connect(self.generate)
self.save_button.clicked.connect(self.save)
self.generate()
'''生成贺卡'''
def generate(self):
# 检查路径是否存在
content_path = self.content_edit.text()
bg_path = self.bg_edit.text()
font_path = self.font_edit.text()
font_color = self.font_color_combobox.currentText()
if (
(not self.checkFilepath(content_path))
or (not self.checkFilepath(bg_path))
or (not self.checkFilepath(font_path))
):
self.card_image = None
return False
# 写贺卡
contents = open(content_path, encoding='utf-8').read().split('\n')
font_card = ImageFont.truetype(font_path, self.font_size)
image = Image.open(bg_path).convert('RGB')
draw = ImageDraw.Draw(image)
draw.text((180, 30), contents[0], font=font_card, fill=font_color)
for idx, content in enumerate(contents[1:-1]):
draw.text(
(220, 40 + (idx + 1) * 40), content, font=font_card, fill=font_color
)
draw.text(
(180, 40 + (idx + 2) * 40 + 10),
contents[-1],
font=font_card,
fill=font_color,
)
# 显示
fp = io.BytesIO()
image.save(fp, 'BMP')
qtimg = QtGui.QImage()
qtimg.loadFromData(fp.getvalue(), 'BMP')
qtimg_pixmap = QtGui.QPixmap.fromImage(qtimg)
self.show_label.setPixmap(qtimg_pixmap)
self.card_image = image
'''打开贺卡内容文件'''
def openContentFilepath(self):
filepath = QFileDialog.getOpenFileName(self, "请选取贺卡内容文件", '.')
self.content_edit.setText(filepath[0])
'''打开贺卡背景图片文件'''
def openBGFilepath(self):
filepath = QFileDialog.getOpenFileName(self, "请选取贺卡背景图片", '.')
self.bg_edit.setText(filepath[0])
'''打开字体路径'''
def openFontFilepath(self):
filepath = QFileDialog.getOpenFileName(self, "请选取字体文件", '.')
self.font_edit.setText(filepath[0])
'''保存贺卡'''
def save(self):
filename = QFileDialog.getSaveFileName(self, '保存', './card.jpg', '所有文件(*)')
if filename[0] != '' and self.card_image:
self.card_image.save(filename[0])
QDialog().show()
'''检查文件是否存在'''
def checkFilepath(self, filepath):
if not filepath:
return False
return os.path.isfile(filepath)
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#
# This file was originally part of PySP and Pyomo, available: https://github.com/Pyomo/pysp
# Copied with modification from pysp/tests/unit/testdata/reference_test_scenario_tree_model.py
import os
import tempfile
from mpisppy.utils.pysp_model.tree_structure_model import \
CreateAbstractScenarioTreeModel
with tempfile.NamedTemporaryFile(mode="w", suffix=".dat", delete=False) as f:
f.write("""
set Stages :=
t1
t2
;
set Nodes :=
root
n1
n2
n3
;
param NodeStage :=
root t1
n1 t2
n2 t2
n3 t2
;
set Children[root] :=
n1
n2
n3
;
param ConditionalProbability :=
root 1.0
n1 0.33333333
n2 0.33333334
n3 0.33333333
;
set Scenarios :=
s1
s2
s3
;
param ScenarioLeafNode :=
s1 n1
s2 n2
s3 n3
;
set StageVariables[t1] :=
x
;
param StageCost :=
t1 cost[1]
t2 cost[2]
;
""")
model = CreateAbstractScenarioTreeModel().create_instance(f.name)
os.remove(f.name)
|
from __future__ import absolute_import
from __future__ import print_function
import os, tqdm
from ephys.spiketrains import get_spiketrain
from ephys import core
from ephys import events
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from six.moves import zip
def do_raster(raster_data, times, ticks, ntrials, ax=None, spike_linewidth=1.5,
spike_color='k', tick_linewidth=1.5, tick_color='r'):
'''
Generalized raster plotting function
Parameters
------
raster_data : list of lists of floats
List of lists. Each sublist corresponds to one row of events
Each element of a sublist is an event times
times : list of floats
The beginning and end times to plot
ticks : list of floats
Will add a vertical tick across the whole plot for each time in list
ax : Matplotlib axes handle, optional
Axes on which to produce raster. Default gca.
spike_linewidth : float, optional
width in points of ticks for spikes
spike_color : str
color of ticks for spikes
tick_linewidth : float
width in points of ticks for events
tick_color : str
color of ticks for events
Returns
------
raster_plot :
Handle to the raster plot
'''
raster_data = np.array(raster_data)
if ax is None:
ax = plt.gca()
ax.set_xlim(times)
ax.set_ylim((-0.5, ntrials-0.5))
ax.set_yticks(range(0, ntrials))
ax.eventplot(raster_data, linewidths=spike_linewidth, colors=spike_color)
for pltticks in ticks:
ax.axvline(pltticks, color=tick_color)
return ax
def plot_raster_cell_stim(spikes, trials, clusterID,
stim, period, rec, fs, ax=None, stim_ref='stim', **kwargs):
'''
Plots a spike raster for a single cell and stimulus
Parameters
------
spikes : pandas dataframe
spike dataframe from core
trials : pandas dataframe
trials dataframe from events
clusterID : int
ID number of the cluster you wish to make the raster for
stim : str
Name of the stimulus you wish to plot cluster's activity for
period : list of floats
Time window for the raster:
[Seconds_pre_stimulus_onset, Seconds_post_stimulus_end]
rec : int
Recording ID
fs : float
Sampling rate
plot_params : dict
Drawing parameters:
'spike_linewidth' - linewidth of ticks for spikes
'tick_linewidth' - linewidth of ticks for event markers
'spike_color' - color of spike ticks
'tick_color' - color of event ticks
ax : Matplotlib axes handle, optional
Axes on which to produce the raster. Default is to use gca
kwargs :
keyword arguments are passed to the do_raster method
'''
stim_trials = trials[trials['stimulus'] == stim]
ntrials = len(stim_trials)
stim_starts = stim_trials['time_samples'].values
stim_ends = stim_trials['stimulus_end'].values
stim_end_seconds = np.unique((stim_ends - stim_starts) / fs)[0]
if stim_ref == 'stim':
window = [period[0], stim_end_seconds + period[1]]
elif stim_ref == 'abs':
window = [period[0], period[1]]
raster_data = []
for trial, start in enumerate(stim_starts):
sptrain = get_spiketrain(rec, start, clusterID, spikes, window, fs)
raster_data.append(sptrain)
ax = do_raster(raster_data, window, [0, stim_end_seconds], ntrials, ax, **kwargs)
return ax
def plot_all_rasters(block_path):
''' Plots all the rasters from all units for all stimuli
Places them in a blockpath/rasters folder
'''
rasters_folder = os.path.join(block_path, 'rasters/')
spikes = core.load_spikes(block_path)
trials = events.oe_load_trials(block_path)
fs = core.load_fs(block_path)
stims = np.unique(trials['stimulus'].values)
clusters = core.load_clusters(block_path)
os.makedirs(rasters_folder, exist_ok=True)
for cluster in tqdm.tqdm(clusters["cluster"]):
os.makedirs(os.path.join(rasters_folder, '{}/'.format(cluster)), exist_ok=True)
for stim in tqdm.tqdm(stims):
fig = plt.figure()
ax = plot_raster_cell_stim(spikes, trials, cluster, stim, [-2, 2], 0, fs)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Trial Number')
ax.set_title('Unit: {} Stimulus: {}'.format(cluster, stim))
plt.savefig(os.path.join(rasters_folder, '{}/unit-{}_stim-{}.pdf'.format(cluster, cluster, stim)))
plt.close(fig)
def plot_raster_cell_stim_emily(spikes, trials, clusterID,
stim, period, rec, fs, ax=None, **kwargs):
'''
Plots a spike raster for a single cell and stimulus
Parameters
------
spikes : pandas dataframe
spike dataframe from core
trials : pandas dataframe
trials dataframe from events
clusterID : int
ID number of the cluster you wish to make the raster for
stim : str
Name of the stimulus you wish to plot cluster's activity for
period : list of floats
Time window for the raster:
[Seconds_pre_stimulus_onset, Seconds_post_stimulus_end]
rec : int
Recording ID
fs : float
Sampling rate
plot_params : dict
Drawing parameters:
'spike_linewidth' - linewidth of ticks for spikes
'tick_linewidth' - linewidth of ticks for event markers
'spike_color' - color of spike ticks
'tick_color' - color of event ticks
ax : Matplotlib axes handle, optional
Axes on which to produce the raster. Default is to use gca
kwargs :
keyword arguments are passed to the do_raster method
'''
stim_trials = trials[trials['stimulus'] == stim]
stim_recs = stim_trials['recording'].values
ntrials = len(stim_trials)
stim_starts = stim_trials['time_samples'].values
stim_ends = stim_trials['stimulus_end'].values
stim_end_seconds = np.unique((stim_ends - stim_starts) / fs)[0]
window = [period[0], stim_end_seconds + period[1]]
raster_data = []
for trial, stpl in enumerate(zip(stim_starts, stim_recs)):
start = stpl[0]
srec = stpl[1]
sptrain = get_spiketrain(srec, start, clusterID, spikes, window, fs)
raster_data.append(sptrain)
do_raster(raster_data, window, [0, stim_end_seconds], ax, **kwargs)
def plot_raster_stim_trial(spikes, trials, clusters,
stim, trial, period, rec, fs, plot_params=None, ax=None):
'''
Plots a spike raster for all cells for a single trial of a single stimulus
'''
nclus = len(clusters)
cluIDs = clusters['cluster'].values
stim_trials = trials[trials['stimulus']==stim]
this_trial = stim_trials.iloc[trial]
stim_start = this_trial['time_samples']
stim_end = this_trial['stimulus_end']
stim_end_seconds = np.unique((stim_end - stim_start)/fs)[0]
window = [period[0], stim_end_seconds+period[1]]
raster_data = []
for clu_num, clu in enumerate(cluIDs):
sptrain = get_spiketrain(rec, stim_start, clu, spikes, window, fs)
raster_data.append(sptrain)
if plot_params == None:
do_raster(raster_data, window, [0, stim_end_seconds], ax)
else:
do_raster(raster_data, window, [0, stim_end_seconds], ax,
spike_linewidth=plot_params['spike_linewidth'],
spike_color=plot_params['spike_color'],
tick_linewidth=plot_params['tick_linewidth'],
tick_color=plot_params['tick_color'])
def gaussian_psth_func(times, spike_data, sigma):
'''
Generates a gaussian psth from spike data
Parameters
------
times : numpy array
times to generate psth for
spike_data : list of floats
times of each spike
sigma : float
standard deviation of the gaussian
Return
------
output : numpy array
gaussian peristimulus time histogram
'''
output = np.zeros(len(times))
for spike_time in spike_data:
output = output + np.exp(-1.0 * np.square(times - spike_time) / (2 * sigma ** 2))
return output
def calc_avg_gaussian_psth(spikes, trials, clusterID, stim, period, rec, fs, sigma=0.05, alpha=0.95):
'''
Calculates a gaussian smoothed average psth over all trials of stim for a given cluster.
Parameters
------
spikes : dataframe
spike data
trials : dataframe
trial data
clusterID : int
cluster ID to compute
stim : str
Name of stimulus to calculate psth for
period : list of floats
Time window for the raster:
[Seconds_pre_stimulus_onset, Seconds_post_stimulus_end]
rec : int
recording id
fs : float
sampling rate
sigma : float
stand deviation for gaussian
alpha : float
confidence level
Returns
------
avg_psth : numpy array
the average gaussian psth
std_psth :
standard deviation of the psth
conf_ints :
confidence intervals
times :
times for the signals
'''
stim_trials = trials[trials['stimulus'] == stim]
ntrials = len(stim_trials)
stim_starts = stim_trials['time_samples'].values
stim_ends = stim_trials['stimulus_end'].values
stim_end_seconds = np.unique((stim_ends - stim_starts) / fs)[0]
window = [period[0], stim_end_seconds + period[1]]
npts = np.floor(1.0 * (window[1] - window[0]) * fs)
times = np.linspace(window[0], window[1], npts)
psths = np.zeros((ntrials, npts))
for trial, start in enumerate(stim_starts):
sptrain = get_spiketrain(rec, start, clusterID, spikes, window, fs)
psths[trial, :] = gaussian_psth_func(times, sptrain, sigma)
avg_psth = np.mean(psths, 0)
std_psth = np.std(psths, 0)
conf_ints = stats.t.interval(alpha, df=ntrials - 1, loc=avg_psth, scale=std_psth / np.sqrt(ntrials))
return (avg_psth, std_psth, conf_ints, times)
def plot_unit_raster(spikes, trials, clusterID, raster_window, rec, fs, subplot_xy, figsize, fontsize=20, **kwargs):
'''
Plots a raster of all trials of all stimuli from a given unit
Parameters
------
spikes : pandas dataframe
spike dataframe from core
trials : pandas dataframe
trials dataframe from events
clusterID : int
ID number of the cluster you wish to make the raster for
raster_window :
TODO
rec : int
Recording ID
fs : float
Sampling rate
subplot_xy : (int, int)
x and y indices of the working subplot
figsize : integer tuple
The figure plot size in tuple of integers, (width, height) in inches
fontsize : int
font size of the plot labels
kwargs :
keyword arguments are passed to the do_raster method
Return
------
figure :
matplotlib Figure
'''
stims = trials['stimulus'].unique()
f, pltaxes = plt.subplots(subplot_xy[0], subplot_xy[1], sharey=True, figsize=figsize)
for ind, stim in enumerate(stims):
ax = pltaxes.flatten()[ind]
plot_raster_cell_stim(spikes, trials, clusterID, stim,
raster_window, rec, fs, ax=ax, **kwargs)
ax.set_title('Unit: {} Stim: {}'.format(str(clusterID), stim))
ax.set_xlabel('Time (seconds)')
ax.set_ylabel('Repetition')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
return f
def plot_unit_raster_emily(spikes, trials, clusterID, raster_window, rec, fs, subplot_xy, figsize, fontsize=20,
**kwargs):
'''
Plots a raster of all trials of all stimuli from a given unit
Parameters
------
spikes : pandas dataframe
spike dataframe from core
trials : pandas dataframe
trials dataframe from events
clusterID : int
ID number of the trial you wish to make the raster for
raster_window : list of floats
Time window for the raster
rec : int
Recording ID
fs : float
Sampling rate
figsize : integer tuple
The figure plot size in tuple of integers, (width, height) in inches
fontsize : int
font size of the plot labels
kwargs :
keyword arguments are passed to the do_raster method
Returns
-------
figure : matplotlib.figure.Figure
the final figure with the plotted rasters
'''
stims = np.unique(trials['stimulus'].values)
# stims = stims[~np.isnan(stims)]
f, pltaxes = plt.subplots(subplot_xy[0], subplot_xy[1], sharey=True, figsize=figsize)
for ind, stim in enumerate(stims):
if str(stim) == 'nan':
continue
print(stim)
stimrecs = trials[trials['stimulus'] == stim]['recording']
ax = pltaxes.flatten()[ind]
plot_raster_cell_stim_emily(spikes, trials, clusterID, stim,
raster_window, rec, fs, ax=ax, **kwargs)
ax.set_title('Unit: {} Stim: {}'.format(str(clusterID), stim))
# ax.set_xlabel('Time (seconds)')
ax.set_ylabel('Repetition')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
return f
def plot_trial_raster_emily(spikes, trials, clusters, trialID, stim, period, rec, fs, ax=None, **kwargs):
'''
Plots TODO
Parameters
------
spikes : pandas dataframe
spike dataframe from core
trials : pandas dataframe
trials dataframe from events
clusters :
TODO
trialID : int
ID number of the trial you wish to make the raster for
stim : str
Name of the stimulus to plot raster for
period : list of floats
Time window for the raster:
[Seconds_pre_stimulus_onset, Seconds_post_stimulus_end]
fs : float
Sampling rate
ax : Matplotlib axes handle, optional
Axes on which to produce raster. Default gca.
kwargs :
keyword arguments are passed to the do_raster method
'''
stim_trials = trials[trials['stimulus'] == stim]
stim_recs = stim_trials['recording'].values
ntrials = len(stim_trials)
stim_starts = stim_trials['time_samples'].values
stim_ends = stim_trials['stimulus_end'].values
stim_start = stim_starts[trialID]
stim_end = stim_ends[trialID]
stim_end_seconds = (stim_end - stim_start) / fs
srec = stim_recs[trialID]
clusterIDs = clusters['cluster'].values
window = [period[0], stim_end_seconds + period[1]]
raster_data = []
for cluster in clusterIDs:
sptrain = get_spiketrain(srec, stim_start, cluster, spikes, window, fs)
raster_data.append(sptrain)
do_raster(raster_data, window, [0, stim_end_seconds], ax, **kwargs)
def plot_avg_gaussian_psth_cell_stim(spikes, trials, clusterID, stim, raster_window, rec, fs, ax=None):
return 0
def plot_unit_gaussian_psth(spikes, trials, clusterID, raster_window, rec, fs, subplot_xy, figsize):
'''
Plots average psth gaussian smoothed
Parameters
------
trials : pandas dataframe
trials dataframe from events
subplot_xy : tuple
location of the subplot to modify
figsize : integer tuple
The figure plot size in tuple of integers, (width, height) in inches
'''
stims = trials['stimulus'].unique()
f, pltaxes = plt.subplots(subplot_xy[0], subplot_xy[1], sharey=True, figsize=figsize)
|
#==============================================================================
# SM 2/2016
# ReaDATA CSV frame data generated from the excel files using Pandas
# and generates the association matrix for Attacker and Comer data.
# Input : sheet_ID <frame ID>
# Output : R_DIR > ADJ_A/C_***.csv
#==============================================================================
import numpy as np
import pandas as pd
import os
import SNA_compute_ADJ_matrix as AM
def read(sheet_ID, PARSE='Attacker'):
C = np.array(pd.read_csv('../datasets/color-coding/color_coding.csv', usecols=['color ID', 'code']).fillna(0)).T
INFO = pd.read_csv(sheet_ID, usecols=[0,1])
if int(INFO.ix[0,0]) < 10:
FRAME_ID = '%s_F0%r' %(INFO.ix[0,1].split(" ")[0], int(INFO.ix[0,0]))
else:
FRAME_ID = '%s_F%r' %(INFO.ix[0,1].split(" ")[0], int(INFO.ix[0,0]))
#---------------------------------------------------------------------
if PARSE=='Attacker':
#---------------------------------------------------------------------
data_columns = [4, 7, 9, 10]
DATA = pd.read_csv(sheet_ID, usecols=data_columns).fillna(0)
DATA = DATA[DATA.ix[:,1] != 1]
DATA = DATA[DATA.ix[:,2] != 0]
DATA.ix[:,2] = DATA.ix[:,2].str.strip().fillna(0)
DATA.ix[:,3] = DATA.ix[:,3].str.strip().fillna(0)
D = np.array(DATA).T
ID_A = D[2]
CF_R = D[3]
for h, i in enumerate(C[0]):
np.put(ID_A, np.where(ID_A == i), C[1][h])
for k in range(0, len(ID_A)):
if isinstance(ID_A[k], basestring) == True:
ID_A[k] = -1
for j, i in enumerate(CF_R):
if i == 'R':
CF_R[j] = 10.0
elif i == 'G':
CF_R[j] = 14.0
else:
CF_R[j] = 0.0
R = []
for i, j in enumerate(D[0]):
if j != 0:
R.append(i)
C = np.hstack((np.diff(R),len(D[2]) - np.sum(np.diff(R))))
A = np.unique(D[0])
A = A[A > 0]
B = np.repeat(A, C)
REC_DATA = np.vstack((B, ID_A, CF_R))
# XXX: Consider max 22 trials in all frames [Removing F10]
SREC_DATA = REC_DATA.T
SREC_DATA = SREC_DATA[SREC_DATA[:, 0] < 23]
REC_DATA = SREC_DATA.T
DATAFRAME = pd.DataFrame({'D0: Trial':REC_DATA[0],'D1: Attackers':REC_DATA[1],'D2: Retreat':REC_DATA[2]})
if not os.path.exists('../output/csv/sequence'):
os.makedirs('../output/csv/sequence')
print "Saving parsed attacker data to %s.csv" %(FRAME_ID.split(" ", 1)[0])
DATAFRAME.to_csv('../output/csv/sequence/A_%s.csv'%(FRAME_ID.split(" ", 1)[0].split("_")[1]), sep = ',')
return None
|
'''@package encoders
contains the encoders for encoder-decoder classifiers'''
from . import ed_encoder, ed_encoder_factory
|
#
# Copyright (c) 2017, Stephanie Wehner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Stephanie Wehner, QuTech.
# 4. Neither the name of the QuTech organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SimulaQron.virtNode.crudeSimulator import simpleEngine
def testSimpleEngine():
print("RUNNING TESTS: Simple Engine\n")
tracingTest()
gateTest()
measureTest()
def tracingTest():
print("Testing the partial trace...")
se = simpleEngine(10)
se2 = simpleEngine(10)
se.add_fresh_qubit()
se.add_fresh_qubit()
se.add_fresh_qubit()
se2.add_fresh_qubit()
se2.add_fresh_qubit()
se2.add_fresh_qubit()
se.apply_X(0)
se.apply_X(2)
se2.apply_X(0)
se2.apply_X(1)
se.remove_qubit(1)
se2.remove_qubit(2)
if se.qubitReg != se2.qubitReg:
print("ERROR: Partial trace failed\n")
print("ok\n")
def gateTest():
print("Testing the gates...")
se = simpleEngine(10)
se.add_fresh_qubit()
savedQubit = se.qubitReg
se.apply_H(0)
se.apply_Z(0)
se.apply_H(0)
se.apply_X(0)
if savedQubit != se.qubitReg:
print("ERROR: Gate test failed\n")
print("ok\n")
def measureTest():
print("Testing a measurement...")
se = simpleEngine()
se.add_fresh_qubit()
outcome = se.measure_qubit(0)
if outcome != 0:
print("ERROR: Measurement test failed\n")
se.add_fresh_qubit()
se.apply_X(0)
outcome = se.measure_qubit(0)
if outcome != 1:
print("ERROR: Measurement test failed\n")
print("ok\n")
def main():
testSimpleEngine()
main()
|
from .rrsm import try_mkdir, reasonable_random_structure_maker
from .super_cell import compute_super_cell_needed_for_rcut, super_cell, super_cell_if_needed
from .polymorphD3 import PolymorphD3
from .job_control import vasp_job_maker, outcar_to_traj
from .kgrid import get_kpts_from_kpd, kgrid_from_cell_volume, safe_kgrid_from_cell_volume
from .contour_exploration import contour_exploration
from .utils import (reorder_image_list_for_balanced_atom_counts, get_image_list, get_traj_file_list)
|
# Generated by Django 3.2.12 on 2022-05-13 02:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wormil', '0002_alter_specimen_description'),
]
operations = [
migrations.AddField(
model_name='fossil',
name='organism_id',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='fossil',
name='taxonomic_problem',
field=models.BooleanField(blank=True, null=True),
),
]
|
#coding:utf-8
#
# id: bugs.core_2893
# title: Expression in a subquery may be treated as invariant and produce incorrect results
# decription:
# Confirmed wrong resultset on 2.1.2.18118.
# Added sample from core-3031.
#
# tracker_id: CORE-2893
# min_versions: ['2.5.7']
# versions: 2.5.7
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.7
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table test_z (c varchar(10));
commit;
insert into test_z values (1);
insert into test_z values (1);
insert into test_z values (2);
insert into test_z values (3);
commit;
-- From CORE-3031:
create view v_test (f1, f2, f3) as
select '1.1', '1.2', '1.3' from rdb$database
union all
select '2.1', '2.2', '2.3' from rdb$database
union all
select '3.1', '3.2', '3.3' from rdb$database
;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set count on;
set list on;
select 'Test-1' as msg, t.*
from (
select (select case when R.RDB$Relation_ID = 0 then 0 else 1 end from RDB$Database) TypeID
from RDB$Relations R
where R.RDB$Relation_ID < 2
) t;
select 'Test-2' as msg, z.c
from test_z z
where
(
select z.c || '' from rdb$database
) = '1'
;
commit;
-- From CORE-3031:
select 'Test-3' as msg, t.*
from (
select
t.f1 || '; ' || t.f2 || '; ' || t.f3 as f123_concat,
(
select
'' || t.f3
from rdb$database
) as f3_concat
from v_test t
) t;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
MSG Test-1
TYPEID 0
MSG Test-1
TYPEID 1
Records affected: 2
MSG Test-2
C 1
MSG Test-2
C 1
Records affected: 2
MSG Test-3
F123_CONCAT 1.1; 1.2; 1.3
F3_CONCAT 1.3
MSG Test-3
F123_CONCAT 2.1; 2.2; 2.3
F3_CONCAT 2.3
MSG Test-3
F123_CONCAT 3.1; 3.2; 3.3
F3_CONCAT 3.3
Records affected: 3
"""
@pytest.mark.version('>=2.5.7')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
#!/usr/bin/env python
import os
import re
from setuptools import find_packages, setup
def get_version(*file_paths):
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
version = get_version("django_toolshed", "__init__.py")
readme = open("README.md").read()
setup(
name="django-toolshed",
version=version,
description="""Your project description goes here""",
long_description=readme,
author="Dani Hodovic",
author_email="you@example.com",
url="https://github.com/danihodovic/django-toolshed",
packages=find_packages(),
include_package_data=True,
install_requires=[],
license="MIT",
keywords="django,app",
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django :: 2.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
],
)
|
#!/usr/bin/python3
# buttcockles 1.0 - small scraper for findsubdomains.com
import requests
import argparse
import socket
from lxml import html
def geddem(args):
eh = "https://findsubdomains.com/subdomains-of/"
dom = args.domain
print("\033[1m[!] Looking up %s" % dom)
p = requests.get(eh+dom.strip())
print("\033[1m[!] Parsing shit...")
blergh = html.fromstring(p.content)
domains = blergh.find_class('row row-mobile')
as_blocks = blergh.xpath('//td[@data-field="AS"]/text()')
as_set = set(as_blocks)
as_list = list(as_set)
as_list.sort()
print("\033[91m[+] Found "+str(len(domains))+" domains")
print("\033[91m[+] Found "+str(len(as_list))+" AS")
if args.o:
ofile=open(args.o,"w")
# as
if args.a == True:
for ass in as_list:
if args.o:
ofile.write(ass+"\n")
else:
print("\033[94m"+ass)
rlist = []
for goodie in domains:
domain = goodie[0][1].text_content()
ip = goodie[1][1].text_content()
region = goodie[2][1].text_content()
AS = goodie[3][1].text_content()
org = goodie[4][1].text_content()
# full output
if args.f == True:
if args.o:
ofile.write("Domain: %s\nIP: %s\nRegion: %s\nAS: %s\nOrg: %s\n\n" % (domain, ip, region, AS, org))
else:
print("\033[92mDomain: %s\nIP: %s\nRegion: %s\nAS: %s\nOrg: %s\n" % (domain, ip, region, AS, org))
# domains only
if args.d == True:
rlist.append(domain.strip())
# ip
if args.i == True:
if not ip:
try:
rlist.append(socket.gethostbyname(domain))
except:
pass
else:
rlist.append(ip)
r_set = set(rlist)
rlistf = list(r_set)
rlistf.sort()
for res_elem in rlistf:
if args.o:
ofile.write(res_elem+"\n")
else:
print("\033[92m"+res_elem)
if args.o:
print("\033[93mWrote results to: "+args.o)
if __name__ == "__main__":
argps = argparse.ArgumentParser(prog="Buttcockles 1.0")
argps.add_argument("domain")
argps.add_argument("-f",action='store_true',help="full output")
argps.add_argument("-i",action='store_true',help="output ip addresses")
argps.add_argument("-d",action='store_true',help="output domains")
argps.add_argument("-a",action='store_true',help="output AS")
argps.add_argument("-o",help="output stuff to file")
args = argps.parse_args()
geddem(args)
|
from gwcconfig.GeoWebCacheServer import GeoWebCacheServer
try:
from config import *
except ImportError:
print("Failed to load settings")
def main():
server = GeoWebCacheServer(GWC_REST_API_URL,GWC_REST_API_USERNAME,GWC_REST_API_PASSWORD)
test_layer = server.get_layer(LAYER_NAME)
test_layer.fetch()
test_layer.expireCache = 604900
for gsu in test_layer.gridSubsets:
print gsu.gridSetName, gsu.extent_coords
print test_layer.message()
server.update(test_layer)
if __name__ == '__main__':
main()
|
import contextlib
import threading
from unittest.mock import MagicMock, Mock, patch
import paho.mqtt.client as paho
import pytest
from tavern._plugins.mqtt.client import MQTTClient, _handle_tls_args, _Subscription
from tavern._plugins.mqtt.request import MQTTRequest
from tavern.util import exceptions
def test_host_required():
"""Always needs a host, but it's the only required key"""
with pytest.raises(exceptions.MissingKeysError):
MQTTClient()
args = {"connect": {"host": "localhost"}}
MQTTClient(**args)
class TestClient(object):
@pytest.fixture(name="fake_client")
def fix_fake_client(self):
args = {"connect": {"host": "localhost"}}
return MQTTClient(**args)
def test_no_message(self, fake_client):
"""No message in queue returns None"""
assert fake_client.message_received(0) is None
def test_message_queued(self, fake_client):
"""Returns message in queue"""
message = "abc123"
fake_client._userdata["queue"].put(message)
assert fake_client.message_received(0) == message
def test_context_connection_failure(self, fake_client):
"""Unable to connect on __enter__ raises MQTTError"""
fake_client._connect_timeout = 0.3
with patch.object(fake_client._client, "loop_start"):
with pytest.raises(exceptions.MQTTError):
with fake_client:
pass
def test_context_connection_success(self, fake_client):
"""returns self on success"""
with patch.object(fake_client._client, "loop_start"), patch.object(
fake_client._client, "connect_async"
):
fake_client._client._state = paho.mqtt_cs_connected
with fake_client as x:
assert fake_client == x
def test_assert_message_published(self, fake_client):
"""If it couldn't immediately publish the message, error out"""
class FakeMessage:
is_published = False
rc = 1
with patch.object(fake_client._client, "subscribe"), patch.object(
fake_client._client, "publish", return_value=FakeMessage()
):
with pytest.raises(exceptions.MQTTError):
fake_client.publish("abc", "123")
def test_assert_message_published_unknown_err(self, fake_client):
"""Same, but with an unknown error code"""
class FakeMessage:
is_published = False
rc = 2342423
with patch.object(fake_client._client, "subscribe"), patch.object(
fake_client._client, "publish", return_value=FakeMessage()
):
with pytest.raises(exceptions.MQTTError):
fake_client.publish("abc", "123")
class TestTLS(object):
def test_missing_cert_gives_error(self):
"""Missing TLS cert gives an error"""
args = {"certfile": "/lcliueurhug/ropko3kork32"}
with pytest.raises(exceptions.MQTTTLSError):
_handle_tls_args(args)
def test_disabled_tls(self):
"""Even if there are other invalid options, disable tls and early exit
without checking other args
"""
args = {"certfile": "/lcliueurhug/ropko3kork32", "enable": False}
parsed_args = _handle_tls_args(args)
assert not parsed_args
def test_invalid_tls_ver(self):
"""Bad tls versions raise exception
"""
args = {"tls_version": "custom_tls"}
with pytest.raises(exceptions.MQTTTLSError):
_handle_tls_args(args)
@pytest.fixture(name="req")
def fix_example_request():
spec = {"topic": "{request_topic:s}", "payload": "abc123"}
return spec.copy()
class TestRequests:
def test_unknown_fields(self, req, includes):
"""Unkown args should raise an error
"""
req["fodokfowe"] = "Hello"
with pytest.raises(exceptions.UnexpectedKeysError):
MQTTRequest(Mock(), req, includes)
def test_missing_format(self, req, includes):
"""All format variables should be present
"""
del includes["variables"]["request_topic"]
with pytest.raises(exceptions.MissingFormatError):
MQTTRequest(Mock(), req, includes)
def test_correct_format(self, req, includes):
"""All format variables should be present
"""
MQTTRequest(Mock(), req, includes)
class TestSubscription(object):
@staticmethod
def get_mock_client_with(subcribe_action):
mock_paho = Mock(spec=paho.Client, subscribe=subcribe_action)
mock_client = Mock(
spec=MQTTClient,
_client=mock_paho,
_subscribed={},
_subscribe_lock=MagicMock(),
)
return mock_client
def test_handles_subscriptions(self):
def subscribe_success(topic, *args, **kwargs):
return (0, 123)
mock_client = TestSubscription.get_mock_client_with(subscribe_success)
MQTTClient.subscribe(mock_client, "abc")
assert mock_client._subscribed[123].topic == "abc"
assert mock_client._subscribed[123].subscribed == False
def test_no_subscribe_on_err(self):
def subscribe_err(topic, *args, **kwargs):
return (1, 123)
mock_client = TestSubscription.get_mock_client_with(subscribe_err)
MQTTClient.subscribe(mock_client, "abc")
assert mock_client._subscribed == {}
def test_no_subscribe_on_unrecognised_suback(self):
def subscribe_success(topic, *args, **kwargs):
return (0, 123)
mock_client = TestSubscription.get_mock_client_with(subscribe_success)
MQTTClient._on_subscribe(mock_client, "abc", {}, 123, 0)
assert mock_client._subscribed == {}
|
# -*- coding: utf-8-*-
import os
import logging
import pipes
import tempfile
import subprocess
import psutil
import signal
from abc import ABCMeta, abstractmethod
import yaml
import lib.diagnose
import lib.appPath
class AbstractVoiceEngine(object):
"""
Generic parent class for voice engine class
"""
__metaclass__ = ABCMeta
@classmethod
def get_config(cls):
return {}
@classmethod
def get_instance(cls):
config = cls.get_config()
instance = cls(**config)
return instance
@classmethod
@abstractmethod
def is_available(cls):
return lib.diagnose.check_executable('play')
def __init__(self, **kwargs):
self._logger = logging.getLogger(__name__)
config_path = os.path.join(lib.appPath.CONFIG_PATH, 'log.yml');
if os.path.exists(config_path):
with open(config_path, 'r') as f:
profile = yaml.safe_load(f)
if 'level' in profile:
self._logger.setLevel(eval("logging."+profile['level']))
@abstractmethod
def say(self, phrase):
self._logger.info("Saying '%s' with dummy speaker", phrase)
pass
@abstractmethod
def transcribe(self, fp):
pass
def play(self, filename,tag=None):
'''
tag: 给调用播放语音的speaker进程打个标签
'''
cmd = ['play', str(filename)]
self._logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
with tempfile.TemporaryFile() as f:
self._play_process = subprocess.Popen(cmd,stdout=f,stderr=f,preexec_fn=os.setsid)
self._logger.debug("play pid: '%d'", self._play_process.pid)
pid_name = self.__class__.__name__+"_"+tag+"_play.pid" if tag is not None else self.__class__.__name__+"_play.pid"
pid_file = os.path.join(lib.appPath.DATA_PATH,pid_name)
with open(pid_file, 'w') as pid_fp:
pid_fp.write(str(self._play_process.pid))
pid_fp.close()
self._play_process.wait()
#播放完删除
if os.path.exists(pid_file):
os.remove(pid_file)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
def kill_play_procsss(self,tag=None):
pid_name = self.__class__.__name__+"_"+tag+"_play.pid" if tag is not None else self.__class__.__name__+"_play.pid"
pid_file = os.path.join(lib.appPath.DATA_PATH,pid_name)
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("pgkill play pid: %d",pid)
os.killpg(pid,signal.SIGKILL)
def suspend_play_process(self,tag=None):
res = None
pid_name = self.__class__.__name__+"_"+tag+"_play.pid" if tag is not None else self.__class__.__name__+"_play.pid"
pid_file = os.path.join(lib.appPath.DATA_PATH,pid_name)
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("suspend play pid: %d",pid)
res = psutil.Process(pid).suspend()
return res
def resume_play_process(self,tag=None):
pid_name = self.__class__.__name__+"_"+tag+"_play.pid" if tag is not None else self.__class__.__name__+"_play.pid"
pid_file = os.path.join(lib.appPath.DATA_PATH,pid_name)
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("resume play pid: %d",pid)
res = psutil.Process(pid).resume()
return res
|
"""
Support for RainMachine devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/rainmachine/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_BINARY_SENSORS, CONF_IP_ADDRESS, CONF_PASSWORD,
CONF_PORT, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SSL,
CONF_MONITORED_CONDITIONS, CONF_SWITCHES)
from homeassistant.helpers import (
aiohttp_client, config_validation as cv, discovery)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
REQUIREMENTS = ['regenmaschine==1.0.2']
_LOGGER = logging.getLogger(__name__)
DATA_RAINMACHINE = 'data_rainmachine'
DOMAIN = 'rainmachine'
NOTIFICATION_ID = 'rainmachine_notification'
NOTIFICATION_TITLE = 'RainMachine Component Setup'
PROGRAM_UPDATE_TOPIC = '{0}_program_update'.format(DOMAIN)
SENSOR_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
ZONE_UPDATE_TOPIC = '{0}_zone_update'.format(DOMAIN)
CONF_PROGRAM_ID = 'program_id'
CONF_ZONE_ID = 'zone_id'
CONF_ZONE_RUN_TIME = 'zone_run_time'
DEFAULT_ATTRIBUTION = 'Data provided by Green Electronics LLC'
DEFAULT_ICON = 'mdi:water'
DEFAULT_PORT = 8080
DEFAULT_SCAN_INTERVAL = timedelta(seconds=60)
DEFAULT_SSL = True
DEFAULT_ZONE_RUN = 60 * 10
TYPE_FREEZE = 'freeze'
TYPE_FREEZE_PROTECTION = 'freeze_protection'
TYPE_FREEZE_TEMP = 'freeze_protect_temp'
TYPE_HOT_DAYS = 'extra_water_on_hot_days'
TYPE_HOURLY = 'hourly'
TYPE_MONTH = 'month'
TYPE_RAINDELAY = 'raindelay'
TYPE_RAINSENSOR = 'rainsensor'
TYPE_WEEKDAY = 'weekday'
BINARY_SENSORS = {
TYPE_FREEZE: ('Freeze Restrictions', 'mdi:cancel'),
TYPE_FREEZE_PROTECTION: ('Freeze Protection', 'mdi:weather-snowy'),
TYPE_HOT_DAYS: ('Extra Water on Hot Days', 'mdi:thermometer-lines'),
TYPE_HOURLY: ('Hourly Restrictions', 'mdi:cancel'),
TYPE_MONTH: ('Month Restrictions', 'mdi:cancel'),
TYPE_RAINDELAY: ('Rain Delay Restrictions', 'mdi:cancel'),
TYPE_RAINSENSOR: ('Rain Sensor Restrictions', 'mdi:cancel'),
TYPE_WEEKDAY: ('Weekday Restrictions', 'mdi:cancel'),
}
SENSORS = {
TYPE_FREEZE_TEMP: ('Freeze Protect Temperature', 'mdi:thermometer', '°C'),
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_START_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN):
cv.positive_int,
})
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SWITCH_SCHEMA = vol.Schema({vol.Optional(CONF_ZONE_RUN_TIME): cv.positive_int})
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN:
vol.Schema({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL):
cv.time_period,
vol.Optional(CONF_BINARY_SENSORS, default={}):
BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SWITCHES, default={}): SWITCH_SCHEMA,
})
},
extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the RainMachine component."""
from regenmaschine import Client
from regenmaschine.errors import RequestError
conf = config[DOMAIN]
ip_address = conf[CONF_IP_ADDRESS]
password = conf[CONF_PASSWORD]
port = conf[CONF_PORT]
ssl = conf[CONF_SSL]
try:
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(ip_address, websession, port=port, ssl=ssl)
await client.authenticate(password)
rainmachine = RainMachine(client)
await rainmachine.async_update()
hass.data[DATA_RAINMACHINE] = rainmachine
except RequestError as err:
_LOGGER.error('An error occurred: %s', str(err))
hass.components.persistent_notification.create(
'Error: {0}<br />'
'You will need to restart hass after fixing.'
''.format(err),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
for component, schema in [
('binary_sensor', conf[CONF_BINARY_SENSORS]),
('sensor', conf[CONF_SENSORS]),
('switch', conf[CONF_SWITCHES]),
]:
hass.async_add_job(
discovery.async_load_platform(hass, component, DOMAIN, schema,
config))
async def refresh_sensors(event_time):
"""Refresh RainMachine sensor data."""
_LOGGER.debug('Updating RainMachine sensor data')
await rainmachine.async_update()
async_dispatcher_send(hass, SENSOR_UPDATE_TOPIC)
async_track_time_interval(hass, refresh_sensors, conf[CONF_SCAN_INTERVAL])
async def start_program(service):
"""Start a particular program."""
await rainmachine.client.programs.start(service.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def start_zone(service):
"""Start a particular zone for a certain amount of time."""
await rainmachine.client.zones.start(service.data[CONF_ZONE_ID],
service.data[CONF_ZONE_RUN_TIME])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
async def stop_all(service):
"""Stop all watering."""
await rainmachine.client.watering.stop_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def stop_program(service):
"""Stop a program."""
await rainmachine.client.programs.stop(service.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def stop_zone(service):
"""Stop a zone."""
await rainmachine.client.zones.stop(service.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
for service, method, schema in [
('start_program', start_program, SERVICE_START_PROGRAM_SCHEMA),
('start_zone', start_zone, SERVICE_START_ZONE_SCHEMA),
('stop_all', stop_all, {}),
('stop_program', stop_program, SERVICE_STOP_PROGRAM_SCHEMA),
('stop_zone', stop_zone, SERVICE_STOP_ZONE_SCHEMA)
]:
hass.services.async_register(DOMAIN, service, method, schema=schema)
return True
class RainMachine(object):
"""Define a generic RainMachine object."""
def __init__(self, client):
"""Initialize."""
self.client = client
self.device_mac = self.client.mac
self.restrictions = {}
async def async_update(self):
"""Update sensor/binary sensor data."""
self.restrictions.update({
'current': await self.client.restrictions.current(),
'global': await self.client.restrictions.universal()
})
class RainMachineEntity(Entity):
"""Define a generic RainMachine entity."""
def __init__(self, rainmachine):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._name = None
self.rainmachine = rainmachine
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
|
from math import ceil, sqrt
#returns 1 as composite
def isPrime(x):
if x > 6 and (x % 6 == 1 or x % 6 == 5):
limit = ceil(sqrt(x))
for i in range(2, (limit+1)):
if x % i == 0:
return False
return True
return (x == 2 or x == 3 or x == 5)
|
"""
"""
# Full imports
import math
# Partial imports
from pytups import TupList, SuperDict
import numbers
def read_excel(path: str, param_tables_names: list = None) -> dict:
"""
Read an entire excel file.
:param path: path of the excel file
:param param_tables_names: names of the parameter tables
:return: a dict with a list of dict (records format) for each table.
"""
is_xl_type(path)
try:
import openpyxl
except (ModuleNotFoundError, ImportError) as e:
raise Exception("You must install openpyxl package to use this method")
try:
import pandas as pd
except (ModuleNotFoundError, ImportError):
raise Exception("You must install pandas package to use this method")
data = pd.read_excel(path, sheet_name=None)
data_tables = {
name: TupList(content.to_dict(orient="records")).vapply(
lambda v: SuperDict(v).vapply(lambda vv: format_value(vv))
)
for name, content in data.items()
if name not in param_tables_names
}
parameters_tables = {
t: SuperDict(read_param_table(path, t)).vapply(lambda v: format_value(v))
for t in param_tables_names
}
return {**data_tables, **parameters_tables}
def read_param_table(path: str, table: str) -> dict:
"""
Read a list of parameters and their values from excel as a dict.
:param path: the excel file path
:param table: the table name
:return: a dict {param1: val1}
"""
content = read_excel_table(path, table, header=None)
return {d[0]: d[1] for d in content}
def read_excel_table(path: str, table: str, **kwargs):
"""
Read a table from excel
:param path: the excel file path
:param table: the table name
:return: a list of dict
"""
try:
import pandas as pd
except (ModuleNotFoundError, ImportError):
raise Exception("You must install pandas package to use this method")
data = pd.read_excel(path, sheet_name=table, **kwargs)
return data.to_dict(orient="records")
def format_value(value):
""""""
if isinstance(value, str):
if value in ["TRUE", "True"]:
return True
if value in ["FALSE", "False"]:
return False
return value
if isinstance(value, bool):
return value
if not isinstance(value, numbers.Number):
return str(value)
if math.isnan(value):
return None
return value
def is_xl_type(path: str):
"""
Check if a path is an excel file. Raises an error if the file is not an excel file
:param path: path of the file
"""
if not any(ext in path for ext in [".xlsx", ".xls", ".xlsm"]):
raise ConnectionError(
"Excel_file argument should be a string with an excel extension"
)
|
"""
Provides class and functions to interact with FOBOS aperture deployment.
.. include:: ../include/links.rst
"""
import time
from itertools import chain, combinations
from IPython import embed
import numpy
from scipy.optimize import linear_sum_assignment
from matplotlib import pyplot, patches
from sklearn.neighbors import KDTree
from astropy import units
from astropy.coordinates import SkyCoord, SkyOffsetFrame
from . import data_file
from .collisions import remove_collisions
from .data.bitmask import BitMask
class FOBOSModeBitMask(BitMask):
def __init__(self):
mask_bits = dict(MOS='Single-fiber, multi-object spectroscopy mode',
IFU='Multiplexed IFU mode',
MONO='Monolithic IFU mode')
super().__init__(list(mask_bits.keys()), descr=list(mask_bits.values()))
def validate(self, mode):
_mode = numpy.atleast_1d(mode)
valid_modes = [0] + (numpy.array(list(self.bits.values()))+1).tolist()
if any([m not in valid_modes for m in _mode]):
raise ValueError('Modes invalide. Must be 0 (off), 1 (single-fiber), 2 (multi-IFU), '
'or 3 (monolithic IFU).')
class FOBOSApertureBitMask(BitMask):
def __init__(self):
mask_bits = dict(SCIENCE='Science aperture',
SKY='Designated sky fiber',
GUIDE='Tracking and focus-offset imaging bundle',
CALIB='Flux-calibration bundle')
super().__init__(list(mask_bits.keys()), descr=list(mask_bits.values()))
class FOBOSApertures:
module_src = data_file('deploy/fobos_modules.db')
"""
File providing the centers of all FOBOS starbug modules. This is currently
never used.
"""
starbug_src = data_file('deploy/fobos_starbugs.db')
"""
Main file with the distribution, types, etc of the FOBOS apertures.
"""
version = '0.1'
"""
Version of the aperture deployment design.
"""
mode_bm = FOBOSModeBitMask()
"""
Mode selection bit mask
"""
ap_bm = FOBOSApertureBitMask()
"""
Aperture selection bit mask
"""
fov = 20./60.
"""
Diameter of the field-of-view in decimal degrees.
"""
def __init__(self, mode=1, baseline=True, config=1):
"""
Args:
mode (:obj:`int`, array-like, optional):
The mode assignment for each spectrograph. Must be 1 for the
single-fiber apertures, 2 for multi-IFU mode, or 3 for
monolithic IFU mode. If a single integer, all spectrographs are
put in the same mode; otherwise, must provide the mode for each
of the 3 spectrographs separately.
baseline (:obj:`bool`, optional):
Flag to only return the apertures in the CoDR baseline
configuration.
config (:obj:`int`, optional):
Configuration selection for mapping aperture modules to each
spectrograph. Must be either: (1) modules mapped to each
spectrograph are spread over the entire focal plane; or (2)
modules mapped to each spectrograph fill coherent focal-plane
zones.
"""
# TODO: Check version?
_data = numpy.genfromtxt(str(self.starbug_src), dtype=str)
self.bid = _data[:,0].astype(int)
self.mid = _data[:,1].astype(int)
self.nap = self.bid.size
self.id = numpy.arange(self.nap, dtype=int)
self.id_name = numpy.array([f'{m}-{b}' for m, b in zip(self.mid, self.bid)])
self.spc = None
self.on = None
self.sky = None
self._spc1 = _data[:,2].astype(int)
self._on1 = _data[:,3].astype(int)
self._sky1 = _data[:,4].astype(int)
self._spc2 = _data[:,5].astype(int)
self._on2 = _data[:,6].astype(int)
self._sky2 = _data[:,7].astype(int)
self.coo = _data[:,8:10].astype(float)
self.in_baseline = _data[:,11].astype(int).astype(bool)
self.payload = _data[:,10].astype(int)
# Initialize the aperture types based only on the payload type.
self.type = numpy.zeros(self.nap, dtype=self.ap_bm.minimum_dtype(asuint=True))
indx = numpy.isin(self.payload, [0,1,2])
self.type[indx] = self.ap_bm.turn_on(self.type[indx], 'SCIENCE')
indx = self.payload == 3
self.type[indx] = self.ap_bm.turn_on(self.type[indx], 'GUIDE')
indx = self.payload == 4
self.type[indx] = self.ap_bm.turn_on(self.type[indx], 'CALIB')
self.mode = None
self.baseline = None
self.config = None
self.active = None
self.configure(mode=mode, baseline=baseline, config=config)
@staticmethod
def parse_mode(mode):
"""
Parse and validate the provided FOBOS spectrograph mode.
Args:
mode (:obj:`int`, array-like):
The mode assignment for each spectrograph. Must be 0 to turn
off all apertures, 1 to select single-fiber mode, 2 for
multi-IFU mode, or 3 for monolithic IFU mode. If a single
integer, all spectrographs are put in the same mode; otherwise,
must provide the mode for each of the 3 spectrographs
separately.
Returns:
`numpy.ndarray`_: A 3-element array with the integer mode identifier
for each of the three spectrographs.
Raises:
ValueError:
Raised if the mode number is not understood, or if the mode is
not provided as a single value or one for each spectrograph.
"""
# Check the mode input
_mode = numpy.atleast_1d(mode).astype(int)
if _mode.size == 1:
_mode = numpy.repeat(_mode, 3)
if _mode.size != 3:
raise ValueError(f'Mode not understood: {mode}. Must enter single mode for all '
f'spectrographs, or the mode for each of the three spectrographs.')
# Check the modes are valid
FOBOSApertures.mode_bm.validate(_mode)
return _mode
def configure(self, mode=None, baseline=None, config=None):
"""
Set the active apertures based on the provided mode.
Configuration sets internal attributes that can be accessed
Args:
mode (:obj:`int`, array-like):
The mode assignment for each spectrograph. Must be 1 for the
single-fiber apertures, 2 for multi-IFU mode, or 3 for
monolithic IFU mode. If a single integer, all spectrographs are
put in the same mode; otherwise, must provide the mode for each
of the 3 spectrographs separately.
baseline (:obj:`bool`, optional):
Flag to only return the apertures in the CoDR baseline
configuration.
config (:obj:`int`, optional):
Configuration selection for mapping aperture modules to each
spectrograph. Must be either: (1) modules mapped to each
spectrograph are spread over the entire focal plane; or (2)
modules mapped to each spectrograph fill coherent focal-plane
zones.
"""
if mode is None and self.mode is None:
# Assume this is the first configuration
self.mode = self.parse_mode(1)
elif mode is not None:
# Parse and check the mode
self.mode = self.parse_mode(mode)
if baseline is None and self.baseline is None:
# Assume this is the first configuration
self.baseline = True
elif baseline is not None:
self.baseline = baseline
if config is None and self.config is None:
# Assume this is the first configuration
self.config = 1
elif config is not None:
self.config = config
# Set the configuration
if self.config == 1:
self.spc = self._spc1
self.on = self._on1
self.sky = self._sky1
elif self.config == 2:
self.spc = self._spc2
self.on = self._on2
self.sky = self._sky2
else:
raise ValueError(f'Spectrograph configuration {config} unknown. Must be 1 or 2.')
# Reset all science and sky apertures
indx = numpy.isin(self.payload, [0,1,2])
self.type[indx] = self.ap_bm.turn_on(self.type[indx], 'SCIENCE')
self.type = self.ap_bm.turn_off(self.type, 'SKY')
# Loop through each spectrograph to find the active apertures and set
# their type.
self.active = numpy.zeros(self.nap, dtype=bool)
for i in range(3):
if self.mode[i] == 0:
continue
_mode = self.mode_bm.keys()[self.mode[i]-1]
self.active |= (self.spc == i+1) & self.mode_bm.flagged(self.on, _mode)
# Set sky fibers (if any)
indx = self.mode_bm.flagged(self.sky, _mode)
if numpy.any(indx):
self.type[indx] = self.ap_bm.turn_off(self.type[indx], 'SCIENCE')
self.type[indx] = self.ap_bm.turn_on(self.type[indx], 'SKY')
# Restrict based on baseline
if self.baseline:
self.active &= self.in_baseline
def select(self, payload):
r"""
Construct a boolean array that selects specific payload types.
Type must be:
- ``'science'`` for science apertures,
- ``'sky'`` for designated sky apertures,
- ``'guide'`` for imaging guide bundles, or
- ``'calib'`` for flux-calibration bundles.
Note that in MOS mode (mode=1), the only sky fibers are the ones
designated for the always-ready IFU in spectrograph 1.
Args:
payload (:obj:`str`):
The payload selection.
return_id (:obj:`bool`, optional):
Return the running (0-indexed) number for the aperture.
Returns:
`numpy.ndarray`_: A boolean array used to select the viable
apertures of the requested type.
"""
valid = list(self.ap_bm.bits.keys())
if payload.upper() not in valid:
raise ValueError(f"Unknown payload type: {payload.upper()}. Options are: "
f"{', '.join(valid)}")
return self.active & self.ap_bm.flagged(self.type, payload.upper())
def show(self, include_patrol=False, by_spec=False, legend=True):
"""
Make a plot of the currently active apertures.
Args:
include_patrol (:obj:`bool`, optional):
Use a light shaded region to show the patrol region of every
aperture.
by_spec (:obj:`bool`, optional):
Instead of coloring the aperture locations by their payload
type, color them by their spectrograph mapping.
legend (:obj:`bool`, optional):
Include the plot legend
"""
w,h = pyplot.figaspect(1)
fig = pyplot.figure(figsize=(1.5*w,1.5*h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_xlim([-14, 14])
ax.set_ylim([-14, 14])
ax.minorticks_on()
ax.grid(True, which='major', color='0.9', zorder=0, linestyle='-')
ax.tick_params(which='major', direction='in', length=8, top=True, right=True)
ax.tick_params(which='minor', direction='in', length=4, top=True, right=True)
# Single fibers
indx = self.active & (self.payload == 0)
if by_spec:
_indx = indx & (self.spc == 1)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='.', s=40, lw=0, color='C0', zorder=5, label='Single Fiber (Spec 1)')
_indx = indx & (self.spc == 2)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='.', s=40, lw=0, color='C2', zorder=5, label='Single Fiber (Spec 2)')
_indx = indx & (self.spc == 3)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='.', s=40, lw=0, color='C4', zorder=5, label='Single Fiber (Spec 3)')
else:
ax.scatter(self.coo[indx,0], self.coo[indx,1],
marker='.', s=40, lw=0, color='C1', zorder=5, label='Single Fiber')
# IFUs
indx = self.active & (self.payload == 1)
if by_spec:
_indx = indx & (self.spc == 1)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='s', s=20, lw=0.5, color='C0',zorder=5, label='IFU (Spec 1)')
_indx = indx & (self.spc == 2)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='s', s=20, lw=0.5, color='C2',zorder=5, label='IFU (Spec 2)')
_indx = indx & (self.spc == 3)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='s', s=20, lw=0.5, color='C4',zorder=5, label='IFU (Spec 3)')
else:
ax.scatter(self.coo[indx,0], self.coo[indx,1],
marker='s', s=20, lw=0.5, color='C2',zorder=5, label='IFU')
# Guide bundles
indx = self.active & (self.payload == 3)
ax.scatter(self.coo[indx,0], self.coo[indx,1],
marker='o', s=20, lw=0.5, color='C3', zorder=5, label='Guide Bundle')
# Flux-calibration bundles
indx = self.active & (self.payload == 4)
if by_spec:
_indx = indx & (self.spc == 1)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='x', s=20, lw=1, color='C0', zorder=5, label='Calib. Bundle (Spec 1)')
_indx = indx & (self.spc == 2)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='x', s=20, lw=1, color='C2', zorder=5, label='Calib. Bundle (Spec 1)')
_indx = indx & (self.spc == 3)
ax.scatter(self.coo[_indx,0], self.coo[_indx,1],
marker='x', s=20, lw=1, color='C4', zorder=5, label='Calib. Bundle (Spec 1)')
else:
ax.scatter(self.coo[indx,0], self.coo[indx,1],
marker='x', s=20, lw=1, color='C4', zorder=5, label='Calib. Bundle')
# Include the patrol region (this in combination with the legend can
# make the plot very slow)
if include_patrol:
indx = self.active & numpy.isin(self.payload, [0,1,3,4])
for x, y in self.coo[indx]:
ax.add_patch(patches.Circle((x,y), radius=138/60., facecolor='k',
edgecolor='none', zorder=3, alpha=0.05))
# FOBOS field-of-view
ax.add_patch(patches.Circle((0.,0.), radius=10., facecolor='none', edgecolor='C3',
zorder=4, label='FOBOS FOV'))
if legend:
ax.legend()
ax.set_xlabel(r'$\xi$ [arcmin]')
ax.set_ylabel(r'$\eta$ [arcmin]')
pyplot.show()
|
#!/usr/bin/env python3
"""Graficzny interfejs użytkownika"""
import pygame
import constants as c
# Ignore false positive pygame errors
# pylint: disable=no-member
class Gui:
"""Obsługa graficznego interfejsu użytkownika."""
def __init__(self, screen):
self.screen = screen
def draw_screen(self, action, rules, rules2=None, rules3=None):
"""Rysowanie ekranu z tytułem i komunikatem."""
self.draw_background(c.PROMPT_BACKGROUND_COLOR)
self.draw_text(self.screen, 100, 100, "Gomoku", 84, c.PROMPT_TEXT_COLOR)
self.draw_text(self.screen, 100, 250, rules, 26, c.PROMPT_TEXT_COLOR)
self.draw_text(self.screen, 100, 300, rules2, 26, c.PROMPT_TEXT_COLOR)
self.draw_text(self.screen, 100, 350, rules3, 26, c.PROMPT_TEXT_COLOR)
self.draw_text(self.screen, 100, 700, action, 26, c.PROMPT_TEXT_COLOR)
def draw_background(self, color):
"""Rysowanie tła."""
self.screen.fill(color)
def draw_grid(self):
"""Rysuje pionowe i poziome linie."""
for i in range(c.GRID_X_BEGIN, c.GRID_X_END, c.GRID_TILESIZE):
pygame.draw.line(self.screen, c.GRID_COLOR,
(i, c.GRID_Y_BEGIN), (i, c.GRID_Y_END), 2)
for i in range(c.GRID_Y_BEGIN, c.GRID_Y_END, c.GRID_TILESIZE):
pygame.draw.line(self.screen, c.GRID_COLOR,
(c.GRID_X_BEGIN, i), (c.GRID_X_END, i), 2)
def show_actual_player(self):
"""Pokazywanie aktualnego gracza w rogu ekranu podczas rozgrywki."""
rect1 = pygame.draw.rect(self.screen, c.GAME_BACKGROUND_COLOR,
((50, 25), (125, 40)))
pygame.display.update(rect1)
if self.next_player == c.HUMAN:
rect2 = self.draw_text(self.screen, 50, 25, "Human", 28,
c.HUMAN_STONES_COLOR)
elif self.next_player == c.COMPUTER:
rect2 = self.draw_text(self.screen, 50, 25, "Computer", 28,
c.COMPUTER_STONES_COLOR)
pygame.display.update(rect2)
def show_end_state_of_game(self):
"""Pokazywanie paska z informacją o zakończeniu gry."""
rect1 = pygame.draw.rect(self.screen, c.PROMPT_BACKGROUND_COLOR,
((0, 0), (800, 75)))
pygame.display.update(rect1)
if self.winner == c.HUMAN:
rect2 = self.draw_text(self.screen, 50, 15, "Human won", 36,
c.GRID_COLOR)
elif self.winner == c.COMPUTER:
rect2 = self.draw_text(self.screen, 50, 15, "Computer won", 36,
c.GRID_COLOR)
elif self.winner == c.PLAYER_DRAW:
rect2 = self.draw_text(self.screen, 50, 15, "Draw", 36,
c.GRID_COLOR)
pygame.display.update(rect2)
def draw_text(self, surface, x_position, y_position, text, size, color,
font_family=c.FONT_ICEBERG):
"""Rysowanie tekstu na ekranie."""
font = pygame.font.Font(font_family, size)
rendered_text = font.render(text, True, color)
rect = rendered_text.get_rect()
rect.topleft = (x_position, y_position)
surface.blit(rendered_text, rect)
return rect
def draw_welcome_screen(self):
"""Pokazywanie ekranu powitalnego po uruchomieniu aplikacji."""
rules = "The winner is the first player whose form an unbroken line"
rules2 = "of exactly 5 stones horizontally, vertically or diagonally"
action = "Click anywhere to start"
self.draw_screen(action, rules, rules2)
def draw_gameover_screen(self):
"""Pokazywanie ekranu informującego o zakończeniu gry."""
rules = None
if self.winner == c.HUMAN:
rules = "Human won"
elif self.winner == c.COMPUTER:
rules = "Computer won"
elif self.winner == c.PLAYER_DRAW:
rules = "Draw. There is no winner"
else:
rules = "You are still playing"
rules2 = f'Human {str(self.human_wins)} : {str(self.computer_wins)} Computer'
action = "Click anywhere to start next game"
self.draw_screen(action, rules, rules2)
if __name__ == "__main__":
print("You should run gomoku.py file")
|
my_foods = ['pizza', 'falafel', 'carrot cake']
friend_foods = my_foods[:]
print(my_foods)
print(friend_foods)
my_foods.append('cannoli')
friend_foods.append('ice cream')
print(my_foods)
print(friend_foods)
for food in my_foods:
print(food)
for food in friend_foods:
print(food)
friend_foods = my_foods
print(my_foods)
print(friend_foods)
my_foods.append('cannoli')
friend_foods.append('ice cream')
print(my_foods)
print(friend_foods)
|
import datetime
import os
from enum import Enum
from io import BytesIO, RawIOBase
from typing import IO, Any, Dict, List, Optional, Union, cast
import arrow
import requests
from typing_extensions import Protocol
try:
from mcap.mcap0.records import Schema as McapSchema
from mcap.mcap0.stream_reader import StreamReader as McapStreamReader
except ModuleNotFoundError:
McapSchema = None
McapStreamReader = None
try:
from mcap_ros1.decoder import Decoder as Ros1Decoder
except ModuleNotFoundError:
Ros1Decoder = None
try:
from mcap_protobuf.decoder import Decoder as ProtobufDecoder
except ModuleNotFoundError:
ProtobufDecoder = None
def decoder_for_schema(schema: Any):
if not McapSchema:
return None
if isinstance(schema, McapSchema) and schema.encoding == "ros1msg":
if not Ros1Decoder:
raise Exception(
"Mcap ROS1 library not found. Please install the mcap-ros1-support library."
)
return Ros1Decoder
if isinstance(schema, McapSchema) and schema.encoding == "protobuf":
if not ProtobufDecoder:
raise Exception(
"Mcap protobuf library not found. Please install the mcap-protobuf-support library."
)
return ProtobufDecoder
def camelize(snake_name: Optional[str]) -> Optional[str]:
"""
Convert a valid snake_case field name to camelCase for the API
"""
if not snake_name:
return snake_name
parts = snake_name.split("_")
return "".join([parts[0]] + [w.title() for w in parts[1:]])
class FoxgloveException(Exception):
pass
class ProgressCallback(Protocol):
def __call__(self, progress: int) -> None:
pass
class SizeProgressCallback(Protocol):
def __call__(self, size: int, progress: int) -> None:
pass
class OutputFormat(Enum):
bag = "bag1"
mcap0 = "mcap0"
class ProgressBufferReader(IO[Any]):
def __init__(
self,
buf: Union[bytes, IO[Any]],
callback: Optional[SizeProgressCallback] = None,
):
self.__callback = callback
self.__progress = 0
if isinstance(buf, bytes):
self.__length = len(buf)
self.__buf = BytesIO(buf)
else:
self.__length = os.fstat(buf.fileno()).st_size
self.__buf = buf
def __len__(self):
return self.__length
def read(self, n: int = -1) -> bytes:
chunk = self.__buf.read(n) or bytes()
self.__progress += int(len(chunk))
if self.__callback:
self.__callback(size=self.__length or 0, progress=self.__progress)
return chunk
def tell(self) -> int:
return self.__progress
def json_or_raise(response: requests.Response):
"""
Returns parsed JSON response, or raises if API returned an error.
For client errors (4xx), the server message is included.
"""
try:
json = response.json()
except ValueError:
raise requests.exceptions.HTTPError(
"500 Server Error: Unexpected format", response=response
)
if 400 <= response.status_code < 500:
response.reason = json.get("error", response.reason)
response.raise_for_status()
return json
class Client:
def __init__(self, token: str):
self.__token = token
self.__headers = {
"Content-type": "application/json",
"Authorization": "Bearer " + self.__token,
}
def __url__(self, path: str):
return f"https://api.foxglove.dev{path}"
def create_event(
self,
device_id: str,
time: datetime.datetime,
duration: int,
metadata: Optional[Dict[str, str]] = {},
):
"""
Creates a new event.
device_id: The unique of the device associated with this event.
time: The time at which the event occurred.
duration: The duration of the event. Zero for an instantaneous event.
metadata: Optional metadata attached to the event.
"""
response = requests.post(
self.__url__("/beta/device-events"),
headers=self.__headers,
json={
"deviceId": device_id,
"durationNanos": str(duration),
"metadata": metadata,
"timestamp": time.astimezone().isoformat(),
},
)
event = json_or_raise(response)
return {
"id": event["id"],
"device_id": event["deviceId"],
"timestamp_nanos": event["timestampNanos"],
"duration_nanos": event["durationNanos"],
"metadata": event["metadata"],
"created_at": arrow.get(event["createdAt"]).datetime,
"updated_at": arrow.get(event["updatedAt"]).datetime,
}
def delete_event(
self,
event_id: str,
):
"""
Deletes an event.
event_id: The id of the event to delete.
"""
request = requests.delete(
self.__url__(f"/beta/device-events/{event_id}"),
headers=self.__headers,
)
request.raise_for_status()
def get_events(
self,
device_id: Optional[str] = None,
device_name: Optional[str] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
key: Optional[str] = None,
value: Optional[str] = None,
):
"""
Retrieves events.
device_id: Id of the device associated with the events.
device_name: Name of the device associated with events.
Either device_id or device_name is required.
sort_by: Optionally sort records by this field name (e.g. "device_id").
sort_order: Optionally specify the sort order, either "asc" or "desc".
limit: Optionally limit the number of records return.
offset: Optionally offset the results by this many records.
start: Optionally exclude records before this time.
end: Optionally exclude records after this time.
key: Optionally only return records having this key = this value.
value: Optionally only return records having this key = this value.
"""
if not device_id and not device_name:
raise FoxgloveException("One of device_id or device_name is required.")
params = {
"deviceId": device_id,
"deviceName": device_name,
"sortBy": camelize(sort_by),
"sortOrder": sort_order,
"limit": limit,
"offset": offset,
"start": start.astimezone().isoformat() if start else None,
"end": end.astimezone().isoformat() if end else None,
"key": key,
"value": value,
}
response = requests.get(
self.__url__("/beta/device-events"),
headers=self.__headers,
params={k: v for k, v in params.items() if v},
)
json = json_or_raise(response)
return [
{
"id": e["id"],
"device_id": e["deviceId"],
"duration": int(e["durationNanos"]),
"metadata": e["metadata"],
# datetime doesn't support nanoseconds so we have to divide by 1e9 first.
"timestamp": arrow.get(int(e["timestampNanos"]) / 1e9).datetime,
"timestamp_nanos": int(e["timestampNanos"]),
"created_at": arrow.get(e["createdAt"]).datetime,
"updated_at": arrow.get(e["updatedAt"]).datetime,
}
for e in json
]
def get_messages(
self,
device_id: str,
start: datetime.datetime,
end: datetime.datetime,
topics: List[str] = [],
):
"""
Returns a list of tuples of (topic, raw mcap record, decoded message).
This will throw an exception if an appropriate message decoder can't be found.
device_id: The id of the device that originated the desired data.
start: The earliest time from which to retrieve data.
end: The latest time from which to retrieve data.
topics: An optional list of topics to retrieve.
All topics will be retrieved if this is omitted.
"""
if not McapSchema or not McapStreamReader:
raise Exception("Mcap library not found. Please install the mcap library.")
data = self.download_data(
device_id=device_id, start=start, end=end, topics=topics
)
reader = McapStreamReader(cast(RawIOBase, BytesIO(data)))
decoder = None
for r in reader.records:
decoder = decoder_for_schema(r)
if decoder:
break
if not decoder:
raise Exception("Unknown mcap file encoding encountered.")
return [
m
for m in decoder(McapStreamReader(cast(RawIOBase, BytesIO(data)))).messages
]
def download_data(
self,
device_id: str,
start: datetime.datetime,
end: datetime.datetime,
topics: List[str] = [],
output_format: OutputFormat = OutputFormat.mcap0,
callback: Optional[ProgressCallback] = None,
) -> bytes:
"""
Returns raw data bytes.
device_id: The id of the device that originated the desired data.
start: The earliest time from which to retrieve data.
end: The latest time from which to retrieve data.
topics: An optional list of topics to retrieve.
All topics will be retrieved if this is omitted.
output_format: The output format of the data, either .bag or .mcap, defaulting to .mcap.
"""
params = {
"deviceId": device_id,
"end": end.astimezone().isoformat(),
"outputFormat": output_format.value,
"start": start.astimezone().isoformat(),
"topics": topics,
}
link_response = requests.post(
self.__url__("/v1/data/stream"),
headers=self.__headers,
json={k: v for k, v in params.items() if v},
)
json = json_or_raise(link_response)
link = json["link"]
response = requests.get(link, stream=True)
data = bytes()
for chunk in response.iter_content(chunk_size=32 * 1024):
data += chunk
if callback:
callback(progress=len(data))
return data
def get_coverage(
self,
start: datetime.datetime,
end: datetime.datetime,
device_id: Optional[str] = None,
tolerance: Optional[int] = None,
):
"""
List coverage ranges for data.
:param start: The earliest time after which to retrieve data.
:param end: The latest time before which to retrieve data.
:param device_id: Optional device id to limit data by.
:param tolerance: Minimum interval (in seconds) that ranges must be separated by
to be considered discrete.
"""
params = {
"deviceId": device_id,
"tolerance": tolerance,
"start": start.astimezone().isoformat(),
"end": end.astimezone().isoformat(),
}
response = requests.get(
self.__url__("/v1/data/coverage"),
headers=self.__headers,
params={k: v for k, v in params.items() if v},
)
json = json_or_raise(response)
return [
{
"device_id": c["deviceId"],
"start": arrow.get(c["start"]).datetime,
"end": arrow.get(c["end"]).datetime,
}
for c in json
]
def get_device(self, device_id: str):
"""
Gets a single device by id.
:param device_id: The id of the device to retrieve.
"""
response = requests.get(
self.__url__(f"/v1/devices/{device_id}"),
headers=self.__headers,
)
device = json_or_raise(response)
return {
"id": device["id"],
"name": device["name"],
"serial_number": device["serialNumber"],
"created_at": arrow.get(device["createdAt"]).datetime,
"updated_at": arrow.get(device["updatedAt"]).datetime,
}
def get_devices(self):
"""
Returns a list of all devices.
"""
response = requests.get(
self.__url__("/v1/devices"),
headers=self.__headers,
)
json = json_or_raise(response)
return [
{
"id": d["id"],
"name": d["name"],
"serial_number": d["serialNumber"],
"created_at": arrow.get(d["createdAt"]).datetime,
"updated_at": arrow.get(d["updatedAt"]).datetime,
}
for d in json
]
def create_device(
self,
name: str,
serial_number: str,
):
"""
Creates a new device.
device_id: The name of the devicee.
serial_number: The unique serial number of the devicde.
"""
request = requests.post(
self.__url__("/v1/devices"),
headers=self.__headers,
json={
"name": name,
"serialNumber": serial_number,
},
)
request.raise_for_status()
device = request.json()
return {
"id": device["id"],
"name": device["name"],
"serial_number": device["serialNumber"],
}
def delete_import(self, device_id: str, import_id: str):
"""
Deletes an existing import.
:param device_id: The id of the device associated with the import.
:param import_id: The id of the import to delete.
"""
request = requests.delete(
self.__url__(f"/v1/data/imports/{import_id}"),
params={"deviceId": device_id},
headers=self.__headers,
)
request.raise_for_status()
def get_imports(
self,
device_id: Optional[str] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
data_start: Optional[datetime.datetime] = None,
data_end: Optional[datetime.datetime] = None,
include_deleted: bool = False,
filename: Optional[str] = None,
):
"""
Fetches imports.
:param device_id: The id of the device associated with the import.
:param start: Optionally filter by import start time.
:param end: Optionally filter by import end time.
:param data_start: Optionally filter by data start time.
:param data_end: Optionally filter by data end time.
:param include_deleted: Include deleted imports.
:param filename: Optionally filter by matching filename.
"""
all_params = {
"deviceId": device_id,
"start": start.astimezone().isoformat() if start else None,
"end": end.astimezone().isoformat() if end else None,
"dataStart": data_start.astimezone().isoformat() if data_start else None,
"dataEnd": data_end.astimezone().isoformat() if data_end else None,
"includeDeleted": include_deleted,
"filename": filename,
}
response = requests.get(
self.__url__("/v1/data/imports"),
params={k: v for k, v in all_params.items() if v},
headers=self.__headers,
)
json = json_or_raise(response)
return [
{
"import_id": i["importId"],
"device_id": i["deviceId"],
"import_time": arrow.get(i["importTime"]).datetime,
"start": arrow.get(i["start"]).datetime,
"end": arrow.get(i["end"]).datetime,
"metadata": i["metadata"],
"input_type": i["inputType"],
"output_type": i["outputType"],
"filename": i["filename"],
"input_size": i["inputSize"],
"total_output_size": i["totalOutputSize"],
}
for i in json
]
def get_topics(
self,
device_id: str,
start: datetime.datetime,
end: datetime.datetime,
):
response = requests.get(
self.__url__("/v1/data/topics"),
headers=self.__headers,
params={
"deviceId": device_id,
"start": start.astimezone().isoformat(),
"end": end.astimezone().isoformat(),
"includeSchemas": "false",
},
)
json = json_or_raise(response)
return [
{
"topic": t["topic"],
"version": t["version"],
"encoding": t["encoding"],
"schema_encoding": t["schemaEncoding"],
"schema_name": t["schemaName"],
}
for t in json
]
def upload_data(
self,
device_id: str,
filename: str,
data: Union[bytes, IO[Any]],
callback: Optional[SizeProgressCallback] = None,
):
"""
Uploads data in bytes.
device_id: Device id of the device from which this data originated.
filename: A filename to associate with the data. The data format will be
inferred from the file extension.
data: The raw data in .bag or .mcap format.
callback: An optional callback to report progress on the upload.
"""
link_response = requests.post(
self.__url__("/v1/data/upload"),
headers=self.__headers,
json={
"deviceId": device_id,
"filename": filename,
},
)
json = json_or_raise(link_response)
link = json["link"]
buffer = ProgressBufferReader(data, callback=callback)
upload_request = requests.put(
link,
data=buffer,
headers={"Content-Type": "application/octet-stream"},
)
return {
"link": link,
"text": upload_request.text,
"code": upload_request.status_code,
}
__all__ = ["Client", "FoxgloveException", "OutputFormat"]
|
from typing import (Callable,
Type)
from cfractions import Fraction
from shewchuk import Expansion
from ground.core.hints import (Multipoint,
Point)
def centroid(multipoint: Multipoint,
point_cls: Type[Point],
inverse: Callable[[int], Fraction] = Fraction(1).__truediv__
) -> Point:
result_x = result_y = Expansion()
for point in multipoint.points:
result_x += point.x
result_y += point.y
inverted_points_count = inverse(len(multipoint.points))
return point_cls(result_x * inverted_points_count,
result_y * inverted_points_count)
|
from django.http import HttpResponse
#from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.forms.models import model_to_dict
from django.db.models import Count, Q
from models import CollectItem, CollectTag
import json
import time
# TBD: rewrite to django-rest-framework.org later
def collect_item_create(request):
""" Create item """
#TBD: fix this dirty hack! Also, cannot use forms since we have a list value and data not in proper format
formdata = json.loads(request.body)
item = CollectItem(user=request.user,
url=formdata['url'],
title=formdata['title'],
description=formdata['description'])
item.save()
for t in formdata['tags']:
CollectTag(user=request.user, item=item, tag=t).save()
#serializers.serialize('json', [item, ]) # no, I don't want to expose all fiels
formdata['id'] = item.id # Add ID field to identify a new item on frontend
return HttpResponse(json.dumps(formdata), content_type="application/json")
def collect_item_delete(request, id):
""" Delete item """
CollectItem.objects.filter(user=request.user, id=id).delete()
return HttpResponse('', content_type="application/json")
def collect_item_update(request, id):
""" Update item """
try:
item = CollectItem.objects.filter(user=request.user).get(id=id)
except CollectItem.DoesNotExist:
return HttpResponse("{'error': 'The Item that is being updated does not exist!'}", status=400, content_type="application/json")
formdata = request.read() # no request.PUT in django, manual processing
formdata = json.loads(formdata)
item.url, item.title, item.description = formdata['url'], formdata['title'], formdata['description']
item.save()
CollectTag.objects.filter(item=item).delete()
for t in formdata['tags']:
tag = CollectTag(user=request.user, item=item, tag=t)
tag.save()
formdata['id'] = item.id
return HttpResponse(json.dumps(formdata), content_type="application/json")
def collect_items_get(request):
""" Get items """
if 'tag' in request.GET: # Form a Queryset
tags = CollectTag.objects.select_related().filter(tag__exact=request.GET['tag'], user__exact=request.user)
collection = [t.item for t in tags]
elif 'search' in request.GET: # does not work for sqllite
search = request.GET['search']
collection = CollectItem.objects.filter(user=request.user)\
.filter(Q(url__search=search) | Q(title__search=search) | Q(description__search=search))\
.order_by('-timestamp')
else:
collection = CollectItem.objects.filter(user=request.user)\
.order_by('-timestamp')
if 'offset' in request.GET:
offset = int(request.GET['offset'])
collection = collection[offset:offset + 50]
else:
collection = collection[:50]
items = []
for i in collection:
items.append(model_to_dict(i)) # , fields=[], exclude=[]) )
items[-1]['timestamp'] = time.mktime(i.timestamp.timetuple()),
tags = CollectTag.objects.filter(item=i).all()
items[-1]['tags'] = [t.tag for t in tags]
#TBD: cache control
return HttpResponse(json.dumps(items, default=dthandler), content_type="application/json")
@login_required
def collect_item(request, id):
""" Binds update and delete to the same URL -- requests with item ID"""
if request.method == 'DELETE':
return collect_item_delete(request, id)
elif request.method == 'PUT':
return collect_item_update(request, id)
return HttpResponse("{'error': 'Wrong request'}", status=400, content_type="application/json")
@login_required
def collect_items(request):
""" Binds list and crate requests to the same URL"""
if request.method == 'GET':
return collect_items_get(request)
elif request.method == 'POST':
return collect_item_create(request)
else:
return HttpResponse("{'error': 'Wrong request'}", status=400, content_type="application/json")
# =========================================================================
@login_required
def collect_tags(request):
""" Retrieve a user's tags """
tags = CollectTag.objects.filter(user__exact=request.user)\
.values('tag').annotate(Count('tag')).order_by('-tag__count')
response = [{'tag': t['tag'], 'count': t['tag__count']}\
for t in tags]
return HttpResponse(json.dumps(response), content_type="application/json")
@login_required
def autocomplete(request):
""" Autocomplete for tagging, returns tags matching input """
try:
term = request.GET['term']
except KeyError:
return HttpResponse(json.dumps({}), status=400, content_type="application/json")
#tags = CollectTag.objects.values('tag').distinct('tag').filter(tag__icontains=term, user__exact=request.user) # for advanced DB
tags = CollectTag.objects.values('tag').filter(tag__icontains=term, user__exact=request.user) # for sqllite
response = [t['tag'] for t in tags]
#TBD: turn on distinct for MySQL, remove manual
response = list(set(response)) # for sqllite
return HttpResponse(json.dumps(response), content_type="application/json")
|
#!/usr/bin/env python3
import os
import subprocess
import json
# Text styling class
class Text:
HEADER = '\033[1;34m'
SUCCESS = '\033[1;32m'
FAIL = '\033[1;21m'
ENDC = '\033[0m'
# Shell commands class
class Commands:
INSTALL_PY_DEPS = 'sudo apt-get install -y python3 python3-distutils python3-pip python3-setuptools python3-venv'
CLONE_PIIRBLASTER = 'git clone https://github.com/Electronya/PiirBlaster.git'
CREATE_PIIRBLASTER_SVC = 'sudo cp ./PiirBlaster/scripts/services/piirblaster.service /etc/systemd/system'
ENABLE_PIIRBLASTER_SVC = 'sudo systemctl enable piirblaster.service'
START_PIIRBLASTER_SVC = 'sudo systemctl start piirblaster.service'
CREATE_VRITUAL_ENV = 'python3 -m venv venv'
INSTALL_DEPENDENCIES = 'venv/bin/pip install -r requirements.txt'
DWNLD_PIGPIO = 'wget https://github.com/joan2937/pigpio/archive/master.zip'
UNZIP_PIGPIO = 'unzip master.zip'
BUILD_PIGPIO = 'make'
INSTALL_PIGPIO = 'sudo make install'
CREATE_PIGPIO_SVC = 'sudo cp ./PiirBlaster/scripts/services/pigpiod.service /etc/systemd/system'
ENABLE_PIGPIO_SVC = 'sudo systemctl enable pigpiod.service'
START_PIGPIO_SVC = 'sudo systemctl start pigpiod.service'
# Execute shell command
def execCommand(command):
process = subprocess.run(command.split(' '))
return process.returncode
# Download PIGPIO
def downloadPIGPIO():
print(f"{Text.HEADER}*** DOWNLOADING PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.DWNLD_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO DOWNLOAD FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO DOWNLOAD DONE{Text.ENDC}")
return True
# Unzip PIGPIO
def unzipPIGPIO():
print(f"{Text.HEADER}*** UNZIPPNG PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.UNZIP_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO UNZIP FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO UNZIP DONE{Text.ENDC}")
return True
# Building PIGPIO
def buildPIGPIO():
print(f"{Text.HEADER}*** BUILDING PIGPIO ***{Text.ENDC}")
os.chdir('pigpio-master')
cmdResult = execCommand(Commands.BUILD_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO BUILD FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO BUILD DONE{Text.ENDC}")
return True
# Install PIGPIO
def installPIGPIO():
print(f"{Text.HEADER}*** INSTALLING PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_PIGPIO)
if cmdResult !=0:
print(f"{Text.FAIL}PIGPIO INSTALL FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO INSTALL DONE{Text.ENDC}")
return True
# Creating PIGPIO service
def createPigpioSvc():
print(f"{Text.HEADER}*** CREATING PIGPIO SERVICE ***{Text.ENDC}")
os.chdir('..')
cmdResult = execCommand(Commands.CREATE_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Enabling PIGPIO service
def enablePigpioSvc():
print(f"{Text.HEADER}*** ENABLING PIGPIO SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.ENABLE_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}ENABLING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}ENABLING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Starting PIGPIO service
def startPigpioSvc():
print(f"{Text.HEADER}*** STARTING PIGPIO SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.START_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}STARTING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}STARTING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Setup PIGPIO service
def setupPigpioSvc():
# TODO: Check if sevice is already installed & Split in multiple functions
print(f"{Text.HEADER}*** SETTING UP PIGPIO SERVICE ***{Text.ENDC}")
if (downloadPIGPIO() and unzipPIGPIO() and buildPIGPIO() and installPIGPIO() and
createPigpioSvc() and enablePigpioSvc() and startPigpioSvc()):
print(f"{Text.SUCCESS}SETTING UP PIGPIO SERVICE DONE{Text.ENDC}")
return True
print(f"{Text.FAIL}SETTING UP PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
# Install Python dependencies
def installPythonDeps():
print(f"{Text.HEADER}*** INSTALLING PYTHON DEPENDENCIES ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_PY_DEPS)
if cmdResult != 0:
print(f"{Text.FAIL}INSTALLING PYTHON DEPS FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}INTALLING PYTHON DEPS DONE{Text.ENDC}")
return True
# Clone PiirBlaster repo
def clonePiirBlaster():
print(f"{Text.HEADER}*** CLONING PiirBlaster REPO ***{Text.ENDC}")
cmdResult = execCommand(Commands.CLONE_PIIRBLASTER)
if cmdResult != 0:
print(f"{Text.FAIL}CLONING PiirBlaster FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CLONING PiirBlaster DONE{Text.ENDC}")
return True
# Ask for MQTT configuration
def getMqttConfig():
print(f"{Text.HEADER}*** GATTERING MQTT CONFIGURATION INFO ***{Text.ENDC}")
os.chdir('PiirBlaster')
with open('config/components/mqtt.json', 'r+') as mqttConfigFile:
mqttConfig = json.loads(mqttConfigFile.read())
mqttConfig['broker']['hostname'] = input('Please enter the hostname/ip of the broker: ')
mqttConfig['broker']['port'] = int(input(f"Please enter the broker port [{mqttConfig['broker']['port']}]: ") or mqttConfig['broker']['port'])
mqttConfig['user']['name'] = input('Please enter the service username: ')
mqttConfig['user']['password'] = input('Please enter the service password: ')
newContent = json.dumps(mqttConfig, sort_keys=True, indent=2)
mqttConfigFile.write(newContent)
os.chdir('..')
# Creating virtual environment
def createVirtualEnv():
print(f"{Text.HEADER}*** CREATING VIRTUAL ENVIRONMENT ***{Text.ENDC}")
os.chdir('PiirBlaster')
cmdResult = execCommand(Commands.CREATE_VRITUAL_ENV)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING VIRTUAL ENVIRONEMENT FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING VIRTUAL ENVIRONMENT DONE{Text.ENDC}")
return True
# Install dependencies
def installDependencies():
print(f"{Text.HEADER}*** INSTALLING PiirBlaster DEPENDENCIES ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_DEPENDENCIES)
if cmdResult != 0:
print(f"{Text.FAIL}INSTALLING PiirBlaster DEPENDENCIES FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}INSTALLING PiirBlaster DEPENDENCIES DONE{Text.ENDC}")
os.chdir('..')
return True
# Create PiirBlaster service
def createPiirBlasterSvc():
print(f"{Text.HEADER}*** CREATING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.CREATE_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Enabling PiirBlaster Service
def enablePiirBlasterSvc():
print(f"{Text.HEADER}*** ENABLING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.ENABLE_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}ENALBING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}ENABLING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Start PiirBlaster Service
def startPiirBlasterSvc():
print(f"{Text.HEADER}*** STARTING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.START_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}STARTING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}STARTING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Setup PiirBlaster service
def setupPiirBlasterSvc():
# TODO: Check if sevice is already installed
getMqttConfig()
print(f"{Text.HEADER}*** SETTING UP PiirBlaster SERVICE ***{Text.ENDC}")
if (createVirtualEnv() and installDependencies() and createPiirBlasterSvc() and
enablePiirBlasterSvc() and startPiirBlasterSvc()):
print(f"{Text.SUCCESS}SETTING UP PiirBlaster SERVICE DONE{Text.ENDC}")
return True
print(f"{Text.FAIL}SETTING UP PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
# print(f"{Text.HEADER}*** SERVICE CONFIGURATION ***{Text.ENDC}")
# Ask for the hostname the service will use for advertising
# hostname = input(f"Please enter the hostname that the service will use for advertising:")
# Install PiirBlaster
def install():
if (installPythonDeps() and clonePiirBlaster() and setupPigpioSvc() and setupPiirBlasterSvc()):
print(f"{Text.SUCCESS}INSATALLING PiirBlaster SERVICE DONE{Text.ENDC}")
exit()
print(f"{Text.FAIL}INTALLING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
if __name__ == '__main__':
install()
|
from spinta import commands
from spinta.components import Context
from spinta.datasets.backends.sqldump.components import SqlDump
@commands.wait.register(Context, SqlDump)
def wait(context: Context, backend: SqlDump, *, fail: bool = False) -> bool:
# SqlDump is not a real backend.
return True
|
import pytest
from hat.event.server import common
import hat.event.server.backends.sqlite
pytestmark = pytest.mark.asyncio
@pytest.fixture
def db_path(tmp_path):
return tmp_path / 'sqlite.db'
async def test_create(db_path):
conf = {'module': 'hat.event.server.backends.sqlite',
'db_path': str(db_path),
'query_pool_size': 1}
backend = await hat.event.server.backends.sqlite.create(conf)
assert isinstance(backend, common.Backend)
assert backend.is_open
await backend.async_close()
assert backend.is_closed
async def test_get_last_event_id():
# TODO
pass
async def test_register():
# TODO
pass
async def test_query():
# TODO
pass
|
import codecs
import os
import tempfile
import pytest
from pji.service.dispatch import Dispatch, DispatchTemplate
from .base import DISPATCH_TEMPLATE, TASK_TEMPLATE_SUCCESS_1, TASK_TEMPLATE_SUCCESS_2, TASK_TEMPLATE_FAILURE_1
from ..section.section.base import COMPLEX_TEXT
# noinspection DuplicatedCode
@pytest.mark.unittest
class TestServiceDispatchDispatch:
def test_template_simple(self):
dt = DISPATCH_TEMPLATE
assert set(dt.tasks.items.keys()) == {'task2_${NAME}', 'task3_${NAME}', 'task1_${NAME}'}
assert dt.global_.environ == {
'K': 'tc', 'V': '233', 'VF': 'gtc',
'PATH': '/root/bin:${PATH}',
}
assert repr(dt) == "<DispatchTemplate tasks: ('task1_${NAME}', 'task2_${NAME}', 'task3_${NAME}')>"
def test_template_call(self):
d = DISPATCH_TEMPLATE(scriptdir='.')
assert isinstance(d, Dispatch)
assert d.global_.environ == {
'K': 'tc', 'V': '233', 'VF': 'gtc',
'PATH': '/root/bin:' + os.environ['PATH'],
}
assert set(d.tasks.items.keys()) == {'task3_xtcx', 'task1_xtcx', 'task2_xtcx'}
assert repr(d) == "<Dispatch tasks: ('task1_xtcx', 'task2_xtcx', 'task3_xtcx')>"
def test_template_loads(self):
assert DispatchTemplate.loads(DISPATCH_TEMPLATE) == DISPATCH_TEMPLATE
assert DispatchTemplate.loads({
'global': dict(
environ=dict(V='233', K='tc', VF='gtc', PATH='/root/bin:${PATH}'),
use_sys_env=['PATH'],
),
'tasks': [
TASK_TEMPLATE_SUCCESS_1,
TASK_TEMPLATE_SUCCESS_2,
TASK_TEMPLATE_FAILURE_1,
],
}).global_.environ == {
'K': 'tc', 'V': '233', 'VF': 'gtc',
'PATH': '/root/bin:${PATH}',
}
with pytest.raises(TypeError):
DispatchTemplate.loads(123)
def test_dispatch_call(self):
with tempfile.TemporaryDirectory() as scriptdir:
with codecs.open(os.path.join(scriptdir, 'README.md'), 'w') as of:
of.write(COMPLEX_TEXT)
d = DISPATCH_TEMPLATE(scriptdir=scriptdir)
_success, _results = d('task2_xtcx')
assert _success
_name_1, (_section_1_success, _section_1_results, _section_1_info) = _results[0]
assert _name_1 == 'name_233'
assert _section_1_success
assert len(_section_1_results) == 4
assert _section_1_results[0].ok
assert _section_1_results[1].ok
assert _section_1_results[2].ok
assert _section_1_results[3].ok
assert _section_1_info == {'static': 'this is v : 233', 'value': 233,
'local': 'I have a dream that one day, down in Alabama, with its '
'vicious racists, \nwith its governor having his lips '
'dripping with the words of "interposition" and "nullification"\n'
' -- one day right there in Alabama little black boys and black '
'girls will be able to join \n hands with little white boys and '
'white girls as sisters and brothers.',
'tag': 'I have a dream that one day, down in Alabama, with its vicious '
'racists, \nwith its governor having his lips dripping with the '
'words of "interposition" and "nullification"\n -- one day right '
'there in Alabama little black boys and black girls will be able to '
'join \n hands with little white boys and white girls as sisters '
'and brothers.',
'base64': 'SSBoYXZlIGEgZHJlYW0gdGhhdCBvbmUgZGF5LCBkb3duIGluIEFsYWJhbWEsIHd'
'pdGggaXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhh'
'dmluZyBoaXMgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50Z'
'XJwb3NpdGlvbiIgYW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaW'
'dodCB0aGVyZSBpbiBBbGFiYW1hIGxpdHRsZSBibGFjayBib3lzIGFuZCBi\nbGF'
'jayBnaXJscyB3aWxsIGJlIGFibGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dG'
'xlIHdoaXRl\nIGJveXMgYW5kIHdoaXRlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIGJ'
'yb3RoZXJzLg==\n'}
_name_2, (_section_2_success, _section_2_results, _section_2_info) = _results[1]
assert _name_2 == 'name_2_gtc233'
assert _section_2_success
assert len(_section_2_results) == 3
assert _section_2_results[0].ok
assert _section_2_results[1].ok
assert _section_2_results[2].ok
assert _section_2_info == {'static': 'this is vt : gtc233',
'tag_1': 'I have a dream that one day, down in Alabama, with its vicious '
'racists, \nwith its governor having his lips dripping with the '
'words of "interposition" and "nullification"\n -- one day right '
'there in Alabama little black boys and black girls will be able '
'to join \n hands with little white boys and white girls as sisters '
'and brothers.',
'tag_2': 'SSBoYXZlIGEgZHJlYW0gdGhhdCBvbmUgZGF5LCBkb3duIGluIEFsYWJhbWEsIHdpdGgg'
'aXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhhdmluZyBoaX'
'MgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50ZXJwb3NpdGlvbiIg'
'YW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaWdodCB0aGVyZSBpbiBBbG'
'FiYW1hIGxpdHRsZSBibGFjayBib3lzIGFuZCBi\nbGFjayBnaXJscyB3aWxsIGJlIGFi'
'bGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dGxlIHdoaXRl\nIGJveXMgYW5kIHdoaX'
'RlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIGJyb3RoZXJzLg==\n',
'tag_3t': 'sys\n',
'tag_4t': 'SSBoYXZlIGEgZHJlYW0gdGhhdCBvbmUgZGF5LCBkb3duIGluIEFsYWJhbWEsIHdpdGg'
'gaXRzIHZp\nY2lvdXMgcmFjaXN0cywgCndpdGggaXRzIGdvdmVybm9yIGhhdmluZyBo'
'aXMgbGlwcyBkcmlwcGlu\nZyB3aXRoIHRoZSB3b3JkcyBvZiAiaW50ZXJwb3NpdGlvb'
'iIgYW5kICJudWxsaWZpY2F0aW9uIgog\nLS0gb25lIGRheSByaWdodCB0aGVyZSBpbi'
'BBbGFiYW1hIGxpdHRsZSBibGFjayBib3lzIGFuZCBi\nbGFjayBnaXJscyB3aWxsIGJ'
'lIGFibGUgdG8gam9pbiAKIGhhbmRzIHdpdGggbGl0dGxlIHdoaXRl\nIGJveXMgYW5k'
'IHdoaXRlIGdpcmxzIGFzIHNpc3RlcnMgYW5kIGJyb3RoZXJzLg==\n',
'tag_5t': 'U1NCb1lYWmxJR0VnWkhKbFlXMGdkR2hoZENCdmJtVWdaR0Y1TENCa2IzZHVJR2x1SUV'
'Gc1lXSmhi\nV0VzSUhkcGRHZ2dhWFJ6SUhacApZMmx2ZFhNZ2NtRmphWE4wY3l3Z0Nu'
'ZHBkR2dnYVhSeklHZHZk\nbVZ5Ym05eUlHaGhkbWx1WnlCb2FYTWdiR2x3Y3lCa2Ntb'
'HdjR2x1Clp5QjNhWFJvSUhSb1pTQjNi\nM0prY3lCdlppQWlhVzUwWlhKd2IzTnBkR2'
'x2YmlJZ1lXNWtJQ0p1ZFd4c2FXWnBZMkYwYVc5dUln\nb2cKTFMwZ2IyNWxJR1JoZVN'
'CeWFXZG9kQ0IwYUdWeVpTQnBiaUJCYkdGaVlXMWhJR3hwZEhSc1pT\nQmliR0ZqYXlC'
'aWIzbHpJR0Z1WkNCaQpiR0ZqYXlCbmFYSnNjeUIzYVd4c0lHSmxJR0ZpYkdVZ2RH\nO'
'GdhbTlwYmlBS0lHaGhibVJ6SUhkcGRHZ2diR2wwZEd4bElIZG9hWFJsCklHSnZlWE1n'
'WVc1a0lI\nZG9hWFJsSUdkcGNteHpJR0Z6SUhOcGMzUmxjbk1nWVc1a0lHSnliM1JvW'
'lhKekxnPT0K\n'}
|
#!/usr/bin/env python
# ===============================================================================
# dMRIharmonization (2018) pipeline is written by-
#
# TASHRIF BILLAH
# Brigham and Women's Hospital/Harvard Medical School
# tbillah@bwh.harvard.edu, tashrifbillah@gmail.com
#
# ===============================================================================
# See details at https://github.com/pnlbwh/dMRIharmonization
# Submit issues at https://github.com/pnlbwh/dMRIharmonization/issues
# View LICENSE at https://github.com/pnlbwh/dMRIharmonization/blob/master/LICENSE
# ===============================================================================
from plumbum.cmd import antsApplyTransforms
from plumbum import FG
import multiprocessing
import numpy as np
from test_util import *
import argparse
from conversion import read_imgs, read_imgs_masks
from harm_plot import harm_plot, generate_csv
ROOTDIR = abspath(pjoin(LIBDIR, '..'))
mniTmp = pjoin(ROOTDIR, 'IITAtlas', 'IITmean_FA.nii.gz')
diffusionMeasures = ['MD', 'FA', 'GFA']
def antsReg(img, mask, mov, outPrefix, n_thread=1):
if mask:
p = Popen((' ').join(['antsRegistrationSyNQuick.sh',
'-d', '3',
'-f', img,
'-x', mask,
'-m', mov,
'-n', str(n_thread),
'-o', outPrefix]), shell=True)
p.wait()
else:
p = Popen((' ').join(['antsRegistrationSyNQuick.sh',
'-d', '3',
'-f', img,
'-m', mov,
'-n', str(n_thread),
'-o', outPrefix]), shell=True)
p.wait()
def register_subject(imgPath, warp2mni, trans2mni, templatePath, siteName):
print(f'Warping {imgPath} diffusion measures to standard space')
directory = dirname(imgPath)
outPrefix = pjoin(templatePath, imgPath.split(
'.nii')[0]) # should have _FA at the end
prefix = psplit(outPrefix)[-1].replace('_FA', '')
dmTmp = pjoin(templatePath, f'Mean_{siteName}_FA.nii.gz')
maskTmp = pjoin(templatePath, f'{siteName}_Mask.nii.gz')
warp2tmp = outPrefix + '1Warp.nii.gz'
trans2tmp = outPrefix + '0GenericAffine.mat'
# signal reconstruction might change with zero padding size, median filtering kernel size, and harmonized mask
# so in case multiple debug is needed, redo the registration
antsReg(dmTmp, maskTmp, imgPath, outPrefix)
for dm in diffusionMeasures:
output = pjoin(templatePath, prefix + f'_InMNI_{dm}.nii.gz')
moving = pjoin(directory, prefix + f'_{dm}.nii.gz')
# warp diffusion measure to template space first, then to MNI space
antsApplyTransforms[
'-d', '3',
'-i', moving,
'-o', output,
'-r', mniTmp,
'-t', warp2mni, trans2mni, warp2tmp, trans2tmp
] & FG
return pjoin(templatePath, prefix + f'_InMNI_FA.nii.gz')
def sub2tmp2mni(templatePath, siteName, faImgs, N_proc):
# obtain the transform
moving = pjoin(templatePath, f'Mean_{siteName}_FA.nii.gz')
outPrefix = pjoin(templatePath, f'TemplateToMNI_{siteName}')
warp2mni = outPrefix + '1Warp.nii.gz'
trans2mni = outPrefix + '0GenericAffine.mat'
# template is created once, it is expected that the user wants to keep the template same during debugging
# so in case multiple debug is needed, pass the registration
if not isfile(warp2mni):
antsReg(mniTmp, None, moving, outPrefix, 8)
pool = multiprocessing.Pool(N_proc)
res = []
for imgPath in faImgs:
res.append(
pool.apply_async(
func=register_subject,
args=(
imgPath,
warp2mni,
trans2mni,
templatePath,
siteName,
)))
mniFAimgs = [r.get() for r in res]
pool.close()
pool.join()
return mniFAimgs
def analyzeStat(faImgs):
'''
:param file: list of (FA or MD or GFA) that are already in MNI space
:return: mean of the images
'''
skel = load(pjoin(ROOTDIR, 'IITAtlas', 'IITmean_FA_skeleton.nii.gz'))
skel_mask = (skel.get_data() > 0) * 1.
meanAttr = []
for faImg in faImgs:
data = load(faImg).get_data()
temp = data * skel_mask
meanAttr.append(temp[temp > 0].mean())
return meanAttr
def main():
parser = argparse.ArgumentParser(
description='''Warps diffusion measures (FA, MD, GFA) to template space
and then to MNI space. Finally, calculates mean FA over IITmean_FA_skeleton.nii.gz''')
parser.add_argument(
'-i',
'--input',
type=str,
required=True,
help='a .txt/.csv file that you used in/obtained from harmonization.py having two columns for (img,mask) pair. '
'See documentation for more details')
parser.add_argument(
'-s',
'--site',
type=str,
required=True,
help='site name for locating template FA and mask in template directory')
parser.add_argument(
'-t',
'--template',
type=str,
required=True,
help='template directory where Mean_{site}_FA.nii.gz and {site}_Mask.nii.gz is located')
parser.add_argument('--ncpu', help='number of cpus to use', default='4')
args = parser.parse_args()
imgList = abspath(args.input)
siteName = args.site
templatePath = abspath(args.template)
N_proc = int(args.ncpu)
# read FA image list
try:
imgs, _ = read_imgs_masks(imgList)
print('(Img,Mask) list is provided. FA images are assumed to be directoryOfImg/dti/ImgPrefix_FA.nii.gz, make sure they are there\n')
faImgs = []
for imgPath in imgs:
directory = dirname(imgPath)
prefix = basename(imgPath).split('.nii')[0]
faImg = pjoin(directory, 'dti', prefix + '_FA.nii.gz')
if not isfile(faImg):
raise FileNotFoundError(
f'{faImg} not found. Did you run \"--create --debug\" and \"--process --debug\" before?')
faImgs.append(faImg)
except BaseException:
faImgs = read_imgs(imgList)
print('FA image list is provided.')
# register and obtain *_InMNI_FA.nii.gz
mniFAimgs = sub2tmp2mni(templatePath, siteName, faImgs, N_proc)
# target harmonized
if imgList.endswith('.modified.harmonized'):
header = siteName + '_after'
# reference
elif imgList.endswith('.modified'):
header = siteName
# target unprocessed
else:
header = siteName + '_before'
# FIXME user FA image list will use the header {siteName+'_before'}, which is not correct all the time
# as shown in the above block:
# reference should use {siteName} while harmonized target should use {siteName+'_after'}
# impact of this discrepancy is minor since we deprecated use of FA image
# list
outPrefix = pjoin(templatePath, header)
print('\n\nComputing statistics\n\n')
print(f'{siteName} site: ')
site_means = analyzeStat(mniFAimgs)
generate_csv(faImgs, site_means, outPrefix)
# save statistics for future
statFile = pjoin(templatePath, 'meanFAstat.csv')
with open(statFile, 'a') as f:
f.write(
datetime.now().strftime('%m/%d/%y %H:%M') +
',mean meanFA,std meanFA\n')
f.write(f'{header},{np.mean(site_means)},{np.std(site_means)}\n')
# print an empty line so future results, if appended, are visually
# separate
f.write('\n')
# print statistics on console
print('\n\nPrinting statistics\n\n')
with open(statFile) as f:
print(f.read())
# generate demonstrative plots
ebar = harm_plot([site_means], [header], outPrefix)
print(
f'\nDetailed statistics, summary results, and demonstrative plots are saved in:\n\n{outPrefix}_stat.csv'
f'\n{statFile}\n{ebar}\n')
if __name__ == '__main__':
main()
|
import math
import random
from contextlib import suppress
from decimal import Decimal
from functools import wraps
from django.apps import apps
from django.contrib.auth.models import AbstractUser, UserManager
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.base import ContentFile
from django.core.validators import MinLengthValidator
from django.db import models
from django.db.models import BooleanField, Case, Count, F, OuterRef, Q, Sum, When
from django.db.models.functions import Coalesce
from django.shortcuts import reverse
from django.template import defaultfilters
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext, gettext_lazy as _, ngettext
from uuslug import uuslug
from dictionary.conf import settings
from dictionary.models.category import Category
from dictionary.models.entry import Entry
from dictionary.models.m2m import DownvotedEntries, UpvotedEntries
from dictionary.models.managers.author import AccountTerminationQueueManager, AuthorManagerAccessible, InNoviceList
from dictionary.utils import get_generic_superuser, parse_date_or_none, time_threshold
from dictionary.utils.db import SubQueryCount
from dictionary.utils.decorators import cached_context
from dictionary.utils.serializers import ArchiveSerializer
from dictionary.utils.validators import validate_username_partial
def usercache(initial_func=None, *, timeout=86400):
"""
Caches model method, uses model instance in cache key.
Basically a wrapper around cached_context.
"""
def inner(method):
@wraps(method)
def wrapped(self, *args, **kwargs):
return cached_context(prefix="usercache_" + method.__name__, vary_on_user=True, timeout=timeout)(
lambda user=None: method(self, *args, **kwargs)
)(user=self)
return wrapped
if initial_func:
return inner(initial_func)
return inner
class AuthorNickValidator(UnicodeUsernameValidator):
regex = r"^[a-z0-9]+(\ [a-z0-9]+)*$"
message = _("unlike what you sent, an appropriate nickname would only consist of letters, numbers and spaces.")
class Author(AbstractUser):
class Gender(models.TextChoices):
MAN = "MN", _("male")
WOMAN = "WM", _("female")
OTHER = "OT", _("other")
UNKNOWN = "NO", _("forget it")
class Status(models.TextChoices):
PENDING = "PN", _("in novice list")
ON_HOLD = "OH", _("waiting for first ten entries")
APPROVED = "AP", _("authorship approved")
class MessagePref(models.TextChoices):
DISABLED = "DS", _("nobody")
ALL_USERS = "AU", _("authors and novices")
AUTHOR_ONLY = "AO", _("authors")
FOLLOWING_ONLY = "FO", _("people who i follow")
class Theme(models.TextChoices):
LIGHT = "light", _("Light")
DARK = "dark", _("Dark")
class EntryCount(models.IntegerChoices):
TEN = 10, "10"
THIRTY = 30, "30"
FIFTY = 50, "50"
HUNDRED = 100, "100"
class TopicCount(models.IntegerChoices):
THIRTY = 30, "30"
FIFTY = 50, "50"
SEVENTY_FIVE = 75, "75"
HUNDRED = 100, "100"
# Base auth related fields, notice: username field will be used for nicknames
username = models.CharField(
_("nickname"),
max_length=35,
unique=True,
help_text=_(
"the nickname that will represent you in the site."
" can be 3-35 characters long, can include only letters, numbers and spaces"
),
validators=[
validate_username_partial,
AuthorNickValidator(),
MinLengthValidator(3, _("this nickname is too tiny")),
],
error_messages={"unique": _("this nickname is already taken")},
)
slug = models.SlugField(max_length=35, unique=True, editable=False)
email = models.EmailField(_("e-mail"), unique=True)
is_active = models.BooleanField(default=False, verbose_name=_("active"))
# Base auth field settings
USERNAME_FIELD = "email"
# A list of the field names that will be prompted for when creating a user via the createsuperuser command.
REQUIRED_FIELDS = ["username", "is_active"]
# Novice application related fields
is_novice = models.BooleanField(db_index=True, default=True, verbose_name=_("Novice status"))
application_status = models.CharField(
max_length=2, choices=Status.choices, default=Status.ON_HOLD, verbose_name=_("Application status")
)
application_date = models.DateTimeField(null=True, blank=True, default=None, verbose_name=_("Application date"))
last_activity = models.DateTimeField(null=True, blank=True, default=None, verbose_name=_("Last activity as novice"))
queue_priority = models.PositiveSmallIntegerField(
default=0,
verbose_name=_("Queue priority"),
help_text=_("Novices with high priority are more likely to appear on the top of the novice list."),
)
# Accessibility details
suspended_until = models.DateTimeField(null=True, blank=True, default=None, verbose_name=_("Suspended until"))
is_frozen = models.BooleanField(default=False, verbose_name=_("Frozen status"))
is_private = models.BooleanField(default=False, verbose_name=_("Anonymous status"))
# User-user relations
following = models.ManyToManyField("self", blank=True, symmetrical=False, related_name="+")
blocked = models.ManyToManyField("self", blank=True, symmetrical=False, related_name="blocked_by")
# User-entry relations
favorite_entries = models.ManyToManyField(
"Entry", through="EntryFavorites", related_name="favorited_by", blank=True
)
upvoted_entries = models.ManyToManyField("Entry", through="UpvotedEntries", related_name="upvoted_by", blank=True)
downvoted_entries = models.ManyToManyField(
"Entry", through="DownvotedEntries", related_name="downvoted_by", blank=True
)
# User-category relations
following_categories = models.ManyToManyField("Category", blank=True)
allow_uncategorized = models.BooleanField(default=True)
# User-topic relations
following_topics = models.ManyToManyField("Topic", through="TopicFollowing", related_name="followers", blank=True)
# Personal info
birth_date = models.DateField(blank=True, null=True, verbose_name=_("Birth date"))
gender = models.CharField(max_length=2, choices=Gender.choices, default=Gender.UNKNOWN, verbose_name=_("Gender"))
# Preferences
entries_per_page = models.IntegerField(choices=EntryCount.choices, default=EntryCount.TEN)
topics_per_page = models.IntegerField(choices=TopicCount.choices, default=TopicCount.FIFTY)
message_preference = models.CharField(max_length=2, choices=MessagePref.choices, default=MessagePref.ALL_USERS)
pinned_entry = models.OneToOneField("Entry", blank=True, null=True, on_delete=models.SET_NULL, related_name="+")
allow_receipts = models.BooleanField(default=True)
allow_site_announcements = models.BooleanField(default=True)
theme = models.CharField(choices=Theme.choices, default=Theme.LIGHT, max_length=10)
# Other
karma = models.DecimalField(default=Decimal(0), max_digits=7, decimal_places=2, verbose_name=_("Karma points"))
badges = models.ManyToManyField("Badge", blank=True, verbose_name=_("Badges"))
announcement_read = models.DateTimeField(auto_now_add=True)
# https://docs.djangoproject.com/en/3.0/topics/db/managers/#django.db.models.Model._default_manager
objects = UserManager()
objects_accessible = AuthorManagerAccessible()
in_novice_list = InNoviceList()
class Meta:
permissions = (
("can_activate_user", _("Can access to the novice list")),
("suspend_user", _("Can suspend users")),
("can_clear_cache", _("Can clear cache")),
("can_comment", _("Can comment on entries")),
("can_suggest_categories", _("Can suggest categories for topics")),
)
verbose_name = _("author")
verbose_name_plural = _("authors")
def __str__(self):
return str(self.username)
def save(self, *args, **kwargs):
created = self.pk is None # If True, the user is created (not updated).
if created:
self.slug = uuslug(self.username, instance=self)
super().save(*args, **kwargs)
if created:
self.following_categories.add(*Category.objects.filter(is_default=True))
def delete(self, *args, **kwargs):
# Archive conversations of target users.
targeted_conversations = self.targeted_conversations.select_related("holder", "target").prefetch_related(
"messages"
)
for conversation in targeted_conversations:
conversation.archive()
return super().delete(*args, **kwargs)
def get_absolute_url(self):
return reverse("user-profile", kwargs={"slug": self.slug})
def get_following_topics_with_receipt(self):
"""Get user's following topics with read receipts."""
new_entries = (
Entry.objects.filter(topic=OuterRef("pk"), date_created__gte=OuterRef("topicfollowing__read_at"))
.exclude(Q(author=self) | Q(author__in=self.blocked.all()))
.only("id")
)
return self.following_topics.annotate(
count=SubQueryCount(new_entries),
last_read_at=F("topicfollowing__read_at"),
is_read=Case(When(Q(count__gt=0), then=False), default=True, output_field=BooleanField()),
)
def get_entry_count_by_threshold(self, **timedelta_kwargs):
return (
self.entry_set(manager="objects_published")
.filter(date_created__gte=time_threshold(**timedelta_kwargs))
.count()
)
@usercache
def get_best_entries(self):
return tuple(self.entry_set(manager="objects_published").filter(vote_rate__gt=0).order_by("-vote_rate")[:50])
def has_exceeded_vote_limit(self, against=None):
"""Check vote limits. This is done before the vote is registered."""
# Notice: couldn't filter on unions, so both models are explicitly written.
h24 = {"date_created__gte": time_threshold(hours=24)} # Filter objects that has been created in last 24 hours.
upvoted = UpvotedEntries.objects.filter(author=self)
downvoted = DownvotedEntries.objects.filter(author=self)
daily_vote_count = upvoted.filter(**h24).count() + downvoted.filter(**h24).count()
if daily_vote_count >= settings.DAILY_VOTE_LIMIT:
return True, gettext("you have used up all the vote claims you have today. try again later.")
if against:
upvoted_against = upvoted.filter(entry__author=against).count()
downvoted_against = downvoted.filter(entry__author=against).count()
total_votes_against = upvoted_against + downvoted_against
if total_votes_against >= settings.TOTAL_VOTE_LIMIT_PER_USER:
return True, gettext("sorry, you have been haunting this person for a long time.")
daily_upvoted_against = upvoted.filter(entry__author=against, **h24).count()
daily_downvoted_against = downvoted.filter(entry__author=against, **h24).count()
daily_votes_against = daily_upvoted_against + daily_downvoted_against
if daily_votes_against >= settings.DAILY_VOTE_LIMIT_PER_USER:
return True, gettext("this person has taken enough of your votes today, maybe try other users?")
return False, None
def can_send_message(self, recipient=None):
if self == recipient:
return False
if self.username == settings.GENERIC_SUPERUSER_USERNAME:
return True
if (
(recipient.is_frozen or recipient.is_private or (not recipient.is_active))
or (recipient.message_preference == Author.MessagePref.DISABLED)
or (self.is_novice and recipient.message_preference == Author.MessagePref.AUTHOR_ONLY)
or (
recipient.message_preference == Author.MessagePref.FOLLOWING_ONLY
and not recipient.following.filter(pk=self.pk).exists()
)
or (recipient.blocked.filter(pk=self.pk).exists() or self.blocked.filter(pk=recipient.pk).exists())
):
return False
return True
@property
def entry_publishable_status(self):
""":return None if can publish new entries, else return apt error message."""
if not self.is_accessible:
return gettext("you lack the required permissions.")
latest_entry_date = self.last_entry_date
if latest_entry_date is None:
return None
interval = settings.NOVICE_ENTRY_INTERVAL if self.is_novice else settings.AUTHOR_ENTRY_INTERVAL
delta = timezone.now() - latest_entry_date
if delta <= timezone.timedelta(seconds=interval):
remaining = interval - delta.seconds
return (
ngettext(
"you are sending entries too frequently. try again in a second.",
"you are sending entries too frequently. try again in %(remaining)d seconds.",
remaining,
)
% {"remaining": remaining}
)
return None
@property
def generation(self):
if settings.DISABLE_GENERATIONS:
return None
gen_start_date = parse_date_or_none(settings.FIRST_GENERATION_DATE)
if gen_start_date is None:
raise ValueError("Invalid configuration for 'FIRST_GENERATION_DATE'. Please provide a valid date.")
delta = self.date_joined - gen_start_date
return math.ceil((delta.days / settings.GENERATION_GAP_DAYS) or 1)
@cached_property
def karma_flair(self):
karma = round(self.karma)
if karma <= settings.KARMA_BOUNDARY_LOWER:
return f"{settings.UNDERWHELMING_KARMA_EXPRESSION} ({karma})"
if karma >= settings.KARMA_BOUNDARY_UPPER:
return f"{settings.OVERWHELMING_KARMA_EXPRESSION} ({karma})"
for key in settings.KARMA_EXPRESSIONS:
if karma in key:
return f"{settings.KARMA_EXPRESSIONS[key]} ({karma})"
return None
@property
def is_karma_eligible(self):
"""Eligible users will be able to influence other users' karma points by voting."""
return not (self.is_novice or self.is_suspended or self.karma <= settings.KARMA_BOUNDARY_LOWER)
@cached_property
@usercache
def entry_count(self):
return self.entry_set(manager="objects_published").count()
@cached_property
@usercache
def entry_count_month(self):
return self.get_entry_count_by_threshold(days=30)
@cached_property
@usercache
def entry_count_week(self):
return self.get_entry_count_by_threshold(days=7)
@cached_property
@usercache
def entry_count_day(self):
return self.get_entry_count_by_threshold(days=1)
@cached_property
@usercache
def last_entry_date(self):
with suppress(ObjectDoesNotExist):
return self.entry_set(manager="objects_published").latest("date_created").date_created
return None
def invalidate_entry_counts(self):
names = ("entry_count", "entry_count_month", "entry_count_week", "entry_count_day", "last_entry_date")
for name in names:
key = f"usercache_{name}_context__<lambda>_usr{self.pk}"
cache.delete(key)
@property
def followers(self):
return Author.objects.filter(following=self)
@cached_property
def entry_nice(self):
"""A random entry selected from the best entries of the user."""
best_entries = self.get_best_entries()
if not best_entries:
return None
return random.choice(best_entries)
@property
def email_confirmed(self):
return not self.userverification_set.filter(expiration_date__gte=time_threshold(hours=24)).exists()
@property
def is_suspended(self):
return self.suspended_until is not None and self.suspended_until > timezone.now()
@property
def is_accessible(self):
return not (self.is_hidden or self.is_suspended)
@property
def is_hidden(self):
return self.is_frozen or (not self.is_active) or self.is_private
@cached_property
def unread_message_count(self):
return self.conversations.aggregate(
count=Count("messages", filter=Q(messages__recipient=self, messages__read_at__isnull=True))
)["count"]
@cached_property
@usercache(timeout=60)
def unread_topic_count(self):
"""
Find counts for unread topics and announcements (displayed in header when apt).
This query seems to be too expensive to be called in every request. So it is called
every <timeout> seconds. In following topic list, the cache gets invalidated each
request, making that page fresh every time. Actions which might change this data
also invalidates this cache. e.g. reading an unread topic/announcement.
"""
unread_announcements = (
(
apps.get_model("dictionary.Announcement")
.objects.filter(notify=True, date_created__lte=timezone.now(), date_created__gte=self.announcement_read)
.count()
)
if self.allow_site_announcements
else 0
)
unread_topics = self.get_following_topics_with_receipt().aggregate(sum=Coalesce(Sum("count"), 0))["sum"]
return {
"sum": unread_announcements + unread_topics,
"announcements": unread_announcements,
"topics": unread_topics,
}
def invalidate_unread_topic_count(self):
"""Invalidates cache of unread_topic_count, set by cached_context."""
return cache.delete(f"usercache_unread_topic_count_context__<lambda>_usr{self.pk}")
@cached_property
def novice_queue(self):
if self.last_activity < time_threshold(hours=24):
# NoviceActivityMiddleware ensures that logged in novices will always
# pass this check. It is not possible to see the queue of users
# with no activity in last one day.
return None
def interqueue(user):
active_siblings = Author.in_novice_list.annotate_activity(
Author.in_novice_list.exclude(pk=user.pk).filter(queue_priority=user.queue_priority)
).filter(is_active_today=True)
if active_siblings.exists():
return active_siblings.filter(application_date__lt=user.application_date).count() + 1
return 1
equal_and_superior = Author.in_novice_list.exclude(pk=self.pk).filter(queue_priority__gte=self.queue_priority)
if equal_and_superior.exists():
superior = equal_and_superior.filter(queue_priority__gt=self.queue_priority)
if superior_count := superior.count():
return superior_count + interqueue(self)
return interqueue(self)
return 1
class Memento(models.Model):
body = models.TextField(blank=True)
holder = models.ForeignKey(Author, on_delete=models.CASCADE)
patient = models.ForeignKey(Author, on_delete=models.CASCADE, related_name="+")
class Meta:
constraints = [models.UniqueConstraint(fields=["holder", "patient"], name="unique_memento")]
def __str__(self):
return f"{self.__class__.__name__}#{self.id}, from {self.holder} about {self.patient}"
class UserVerification(models.Model):
author = models.ForeignKey(Author, on_delete=models.CASCADE)
verification_token = models.CharField(max_length=128)
new_email = models.EmailField(blank=True) # new e-mail if it is subject to change
expiration_date = models.DateTimeField()
def save(self, *args, **kwargs):
UserVerification.objects.filter(author=self.author).delete()
super().save(*args, **kwargs)
class AccountTerminationQueue(models.Model):
class State(models.TextChoices):
NO_TRACE = "NT", _("delete account completely")
LEGACY = "LE", _("delete account with legacy")
FROZEN = "FZ", _("freeze account")
author = models.OneToOneField(Author, on_delete=models.CASCADE)
state = models.CharField(max_length=2, choices=State.choices, default=State.FROZEN, verbose_name=_("last words?"))
termination_date = models.DateTimeField(null=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True)
objects = AccountTerminationQueueManager()
def __str__(self):
return f"{self.author}, status={self.state} to be terminated after: {self.termination_date or 'N/A'}"
def save(self, *args, **kwargs):
created = self.pk is None
if created:
self.author.is_frozen = True
self.author.save()
if self.state != self.State.FROZEN:
self.termination_date = timezone.now() + timezone.timedelta(hours=120)
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.author.is_frozen = False
self.author.save()
super().delete(*args, **kwargs)
class Badge(models.Model):
name = models.CharField(max_length=36, verbose_name=_("Name"))
description = models.TextField(blank=True, verbose_name=_("Description"))
url = models.URLField(
blank=True,
verbose_name=_("Link"),
help_text=_(
"The link to follow when users click the badge. If no link is provided, related topic will be used."
),
)
def __str__(self):
return str(self.name)
class Meta:
verbose_name = _("badge")
verbose_name_plural = _("badges")
def user_directory_backup(instance, _filename):
date_str = defaultfilters.date(timezone.localtime(timezone.now()), "Y-m-d")
return f"backup/{instance.author.pk}/backup-{date_str}.json"
class BackUp(models.Model):
author = models.ForeignKey("Author", on_delete=models.CASCADE)
file = models.FileField(upload_to=user_directory_backup)
is_ready = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now_add=True)
def process(self):
if self.is_ready:
return
serializer = ArchiveSerializer()
entries = self.author.entry_set(manager="objects_published").select_related("topic")
conversations = self.author.conversationarchive_set.all()
entries_text = serializer.serialize(entries, fields=("topic__title", "content", "date_created", "date_edited"))
conversations_text = (
"[%s]"
% "".join('{"target": "%s", "messages": %s},' % (item.target, item.messages) for item in conversations)[:-1]
) # Formatting already-serialized data ([:-1] removes the trailing comma).
content = '{"entries": %s, "conversations": %s}' % (entries_text, conversations_text)
self.is_ready = True
self.file.save("backup", ContentFile(content.encode("utf-8")), save=True)
settings.get_model("Message").objects.compose(
get_generic_superuser(),
self.author,
gettext(
"your backup is now ready. you may download your backup"
" file using the link provided in the backup tab of settings."
),
)
def process_async(self):
from dictionary.tasks import process_backup # noqa
process_backup.delay(self.pk)
def delete(self, **kwargs):
super().delete(**kwargs)
self.file.delete(save=False)
|
"""
Author: Yao Feng
Copyright (c) 2020, Yao Feng
All rights reserved.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage.io import imread
from pytorch3d.structures import Meshes
from pytorch3d.io import load_obj
from pytorch3d.renderer.mesh import rasterize_meshes
import util
class Pytorch3dRasterizer(nn.Module):
"""
This class implements methods for rasterizing a batch of heterogenous
Meshes.
Notice:
x,y,z are in image space
"""
def __init__(self, image_size=224):
"""
Args:
raster_settings: the parameters for rasterization. This should be a
named tuple.
All these initial settings can be overridden by passing keyword
arguments to the forward function.
"""
super().__init__()
raster_settings = {
'image_size': image_size,
'blur_radius': 0.0,
'faces_per_pixel': 1,
'bin_size': None,
'max_faces_per_bin': None,
'perspective_correct': False,
}
raster_settings = util.dict2obj(raster_settings)
self.raster_settings = raster_settings
def forward(self, vertices, faces, attributes=None):
"""
Args:
meshes_world: a Meshes object representing a batch of meshes with
coordinates in world space.
Returns:
Fragments: Rasterization outputs as a named tuple.
"""
fixed_vetices = vertices.clone()
fixed_vetices[..., :2] = -fixed_vetices[..., :2]
meshes_screen = Meshes(verts=fixed_vetices.float(), faces=faces.long())
raster_settings = self.raster_settings
pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
meshes_screen,
image_size=raster_settings.image_size,
blur_radius=raster_settings.blur_radius,
faces_per_pixel=raster_settings.faces_per_pixel,
bin_size=raster_settings.bin_size,
max_faces_per_bin=raster_settings.max_faces_per_bin,
perspective_correct=raster_settings.perspective_correct,
)
vismask = (pix_to_face > -1).float()
D = attributes.shape[-1]
attributes = attributes.clone()
attributes = attributes.view(attributes.shape[0] * attributes.shape[1], 3, attributes.shape[-1])
N, H, W, K, _ = bary_coords.shape
mask = pix_to_face == -1 # []
pix_to_face = pix_to_face.clone()
pix_to_face[mask] = 0
idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D)
pixel_face_vals = attributes.gather(0, idx).view(N, H, W, K, 3, D)
pixel_vals = (bary_coords[..., None] * pixel_face_vals).sum(dim=-2)
pixel_vals[mask] = 0 # Replace masked values in output.
pixel_vals = pixel_vals[:, :, :, 0].permute(0, 3, 1, 2)
pixel_vals = torch.cat([pixel_vals, vismask[:, :, :, 0][:, None, :, :]], dim=1)
# import ipdb; ipdb.set_trace()
return pixel_vals
class Renderer(nn.Module):
def __init__(self, image_size, obj_filename, uv_size=256):
super(Renderer, self).__init__()
self.image_size = image_size
self.uv_size = uv_size
verts, faces, aux = load_obj(obj_filename)
uvcoords = aux.verts_uvs[None, ...] # (N, V, 2)
uvfaces = faces.textures_idx[None, ...] # (N, F, 3)
faces = faces.verts_idx[None, ...]
self.rasterizer = Pytorch3dRasterizer(image_size)
self.uv_rasterizer = Pytorch3dRasterizer(uv_size)
# faces
self.register_buffer('faces', faces)
self.register_buffer('raw_uvcoords', uvcoords)
# uv coordsw
uvcoords = torch.cat([uvcoords, uvcoords[:, :, 0:1] * 0. + 1.], -1) # [bz, ntv, 3]
uvcoords = uvcoords * 2 - 1
uvcoords[..., 1] = -uvcoords[..., 1]
face_uvcoords = util.face_vertices(uvcoords, uvfaces)
self.register_buffer('uvcoords', uvcoords)
self.register_buffer('uvfaces', uvfaces)
self.register_buffer('face_uvcoords', face_uvcoords)
# shape colors
colors = torch.tensor([74, 120, 168])[None, None, :].repeat(1, faces.max() + 1, 1).float() / 255.
face_colors = util.face_vertices(colors, faces)
self.register_buffer('face_colors', face_colors)
## lighting
pi = np.pi
constant_factor = torch.tensor(
[1 / np.sqrt(4 * pi), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), \
((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))),
(pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), \
(pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), (pi / 4) * (3 / 2) * (np.sqrt(5 / (12 * pi))),
(pi / 4) * (1 / 2) * (np.sqrt(5 / (4 * pi)))])
self.register_buffer('constant_factor', constant_factor)
def forward(self, vertices, transformed_vertices, albedos, lights=None, light_type='point'):
'''
lihgts:
spherical homarnic: [N, 9(shcoeff), 3(rgb)]
vertices: [N, V, 3], vertices in work space, for calculating normals, then shading
transformed_vertices: [N, V, 3], range(-1, 1), projected vertices, for rendering
'''
batch_size = vertices.shape[0]
## rasterizer near 0 far 100. move mesh so minz larger than 0
# transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10
# Attributes
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1))
face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1))
transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))
# render
attributes = torch.cat([self.face_uvcoords.expand(batch_size, -1, -1, -1), transformed_face_normals.detach(),
face_vertices.detach(), face_normals.detach()], -1)
# import ipdb;ipdb.set_trace()
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
# albedo
uvcoords_images = rendering[:, :3, :, :]
grid = (uvcoords_images).permute(0, 2, 3, 1)[:, :, :, :2]
albedo_images = F.grid_sample(albedos, grid, align_corners=False)
# remove inner mouth region
transformed_normal_map = rendering[:, 3:6, :, :].detach()
pos_mask = (transformed_normal_map[:, 2:, :, :] < -0.05).float()
# shading
if lights is not None:
normal_images = rendering[:, 9:12, :, :].detach()
if lights.shape[1] == 9:
shading_images = self.add_SHlight(normal_images, lights)
else:
if light_type == 'point':
vertice_images = rendering[:, 6:9, :, :].detach()
shading = self.add_pointlight(vertice_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),
normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),
lights)
shading_images = shading.reshape(
[batch_size, lights.shape[1], albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 1,
4, 2,
3)
shading_images = shading_images.mean(1)
else:
shading = self.add_directionlight(normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),
lights)
shading_images = shading.reshape(
[batch_size, lights.shape[1], albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 1,
4, 2,
3)
shading_images = shading_images.mean(1)
images = albedo_images * shading_images
else:
images = albedo_images
shading_images = images.detach() * 0.
outputs = {
'images': images * alpha_images,
'albedo_images': albedo_images,
'alpha_images': alpha_images,
'pos_mask': pos_mask,
'shading_images': shading_images,
'grid': grid,
'normals': normals
}
return outputs
def add_SHlight(self, normal_images, sh_coeff):
'''
sh_coeff: [bz, 9, 3]
'''
N = normal_images
sh = torch.stack([
N[:, 0] * 0. + 1., N[:, 0], N[:, 1], \
N[:, 2], N[:, 0] * N[:, 1], N[:, 0] * N[:, 2],
N[:, 1] * N[:, 2], N[:, 0] ** 2 - N[:, 1] ** 2, 3 * (N[:, 2] ** 2) - 1
],
1) # [bz, 9, h, w]
sh = sh * self.constant_factor[None, :, None, None]
# import ipdb; ipdb.set_trace()
shading = torch.sum(sh_coeff[:, :, :, None, None] * sh[:, :, None, :, :], 1) # [bz, 9, 3, h, w]
return shading
def add_pointlight(self, vertices, normals, lights):
'''
vertices: [bz, nv, 3]
lights: [bz, nlight, 6]
returns:
shading: [bz, nv, 3]
'''
light_positions = lights[:,:,:3]; light_intensities = lights[:,:,3:]
directions_to_lights = F.normalize(light_positions[:,:,None,:] - vertices[:,None,:,:], dim=3)
# normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)
normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)
shading = normals_dot_lights[:,:,:,None]*light_intensities[:,:,None,:]
return shading.mean(1)
def add_directionlight(self, normals, lights):
'''
normals: [bz, nv, 3]
lights: [bz, nlight, 6]
returns:
shading: [bz, nlgiht, nv, 3]
'''
light_direction = lights[:, :, :3];
light_intensities = lights[:, :, 3:]
directions_to_lights = F.normalize(light_direction[:, :, None, :].expand(-1, -1, normals.shape[1], -1), dim=3)
normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)
shading = normals_dot_lights[:, :, :, None] * light_intensities[:, :, None, :]
return shading
def render_shape(self, vertices, transformed_vertices, images=None, lights=None):
batch_size = vertices.shape[0]
if lights is None:
light_positions = torch.tensor([[-0.1, -0.1, 0.2],
[0, 0, 1]]
)[None, :, :].expand(batch_size, -1, -1).float()
light_intensities = torch.ones_like(light_positions).float()
lights = torch.cat((light_positions, light_intensities), 2).to(vertices.device)
## rasterizer near 0 far 100. move mesh so minz larger than 0
transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10
# Attributes
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1));
face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1));
transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))
# render
attributes = torch.cat(
[self.face_colors.expand(batch_size, -1, -1, -1), transformed_face_normals.detach(), face_vertices.detach(),
face_normals.detach()], -1)
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
# albedo
albedo_images = rendering[:, :3, :, :]
# shading
normal_images = rendering[:, 9:12, :, :].detach()
if lights.shape[1] == 9:
shading_images = self.add_SHlight(normal_images, lights)
else:
print('directional')
shading = self.add_directionlight(normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]), lights)
shading_images = shading.reshape(
[batch_size, lights.shape[1], albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 1, 4, 2, 3)
shading_images = shading_images.mean(1)
images = albedo_images * shading_images
return images
def render_normal(self, transformed_vertices, normals):
'''
-- rendering normal
'''
batch_size = normals.shape[0]
# Attributes
attributes = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
# rasterize
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
normal_images = rendering[:, :3, :, :]
return normal_images
def world2uv(self, vertices):
'''
sample vertices from world space to uv space
uv_vertices: [bz, 3, h, w]
'''
batch_size = vertices.shape[0]
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1)).clone().detach()
uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1),
self.uvfaces.expand(batch_size, -1, -1), face_vertices)[:, :3]
return uv_vertices
def save_obj(self, filename, vertices, textures):
'''
vertices: [nv, 3], tensor
texture: [3, h, w], tensor
'''
util.save_obj(filename, vertices, self.faces[0], textures=textures, uvcoords=self.raw_uvcoords[0],
uvfaces=self.uvfaces[0])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'AJay'
__mtime__ = '2019/5/9 0009'
"""
#配置信息
'''
爬取手机端的接口信息,
"http://v2.sohu.com/integration-api/mix/region/84?size=100",
爬取方式,
获取每天的最新的新闻,一般200篇就
一般情况可以获取100多个页面新闻的链接
对比对应的set集合有没有连接
没有连接就存入数据库,存入待爬取的数据库
然后集合中加入此链接
爬取模块,定期从数据库中找到是否存在带爬取的链接 - 时间间隔可以程序自动调整
'''
# 可以遍历1-133篇文章的url
new_url_list=["http://v2.sohu.com/integration-api/mix/region/"]
# 每天的新闻量
size=200
type_name='souhu'
|
import unittest
from unittest import mock
from pathlib import Path
import thonnycontrib.thonny_black_format as plugin
tests_folder = Path(__file__).parent
class TestPlugin(unittest.TestCase):
def test_plugin(self):
"""
Test that `show_info` is displaying the correct messages and
`load_plugin` is called with the expected arguments.
"""
with mock.patch(
"thonnycontrib.thonny_black_format.get_workbench"
) as mock_workbench:
filename = (
mock_workbench.return_value.get_editor_notebook.return_value.get_current_editor.return_value.get_filename
)
black_plugin = plugin.BlackFormat()
with mock.patch(
"thonnycontrib.thonny_black_format.showinfo"
) as mock_showinfo:
filename.side_effect = AttributeError
black_plugin.format_black()
mock_showinfo.assert_called_with(
title=plugin.NO_TEXT_TO_FORMAT.error_type,
message=plugin.NO_TEXT_TO_FORMAT.description,
)
filename.side_effect = None
filename.return_value = "notcompatible"
black_plugin.format_black()
mock_showinfo.assert_called_with(
title=plugin.NOT_COMPATIBLE.error_type,
message=plugin.NOT_COMPATIBLE.description,
)
filename.return_value = str(Path(f"{tests_folder}/unchanged.py"))
black_plugin.format_black()
mock_showinfo.assert_called_with(
title=plugin.SUCCESS, message="1 file left unchanged."
)
filename.return_value = str(Path(f"{tests_folder}/with_errors.py"))
black_plugin.format_black()
mock_showinfo.assert_called_with(
title="Oh no!",
message=f"Error: cannot format {filename()}: Cannot parse: 1:12: print(Hello world!)\n1 file failed to reformat.",
)
filename.return_value = str(Path(f"{tests_folder}/successful.py"))
black_plugin.format_black()
mock_showinfo.assert_called_with(
title="All done!", message="1 file reformatted."
)
with mock.patch(
"thonnycontrib.thonny_black_format.subprocess"
) as mock_subprocess:
mock_subprocess.sdterr = "No module named black"
black_plugin.format_black()
mock_showinfo.assert_called_with(
title=plugin.PACKAGE_NOT_FOUND.error_type,
message=plugin.PACKAGE_NOT_FOUND.description,
)
black_plugin.load_plugin()
mock_workbench.return_value.add_command.assert_called_with(
command_id="format_black",
menu_name="tools",
command_label="Format with Black",
handler=black_plugin.format_black,
default_sequence="<Control-Alt-c>",
extra_sequences=["<<CtrlAltCInText>>"],
)
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from builtins import range
import logging
import keras.backend as ker
from numpy import pi as pi_const
from keras.layers import Lambda, Multiply, Add
from keras.models import Input
from keras.models import Model
from .sampling import sample_standard_normal_noise
from .architectures import get_network_by_name
from ...utils.config import load_config
config = load_config('global_config.yaml')
logger = logging.getLogger(__name__)
class BaseEncoder(object):
def __init__(self, data_dim, noise_dim, latent_dim, network_architecture='synthetic', name='encoder'):
logger.info("Initialising {} model with {}-dimensional data and {}-dimensional noise input "
"and {} dimensional latent output".format(name, data_dim, noise_dim, latent_dim))
self.name = name
self.data_dim = data_dim
self.noise_dim = noise_dim
self.latent_dim = latent_dim
self.network_architecture = network_architecture
self.data_input = Input(shape=(data_dim,), name='enc_data_input')
self.standard_normal_sampler = Lambda(sample_standard_normal_noise, name='enc_standard_normal_sampler')
self.standard_normal_sampler.arguments = {'data_dim': self.data_dim, 'noise_dim': self.noise_dim,
'seed': config['seed']}
self.standard_normal_sampler2 = Lambda(sample_standard_normal_noise, name='enc_standard_normal_sampler2')
self.standard_normal_sampler2.arguments = {'data_dim': self.data_dim, 'noise_dim': self.noise_dim,
'seed': config['seed']}
def __call__(self, *args, **kwargs):
return None
class StandardEncoder(BaseEncoder):
"""
An Encoder model is trained to parametrise an arbitrary posterior approximate distribution given some
input x, i.e. q(z|x). The model takes as input concatenated data samples and arbitrary noise and produces
a latent encoding:
Data Input
- - - - - - - - -
| Noise
| |
----------- <-- concatenation
| Encoder model
-----------
| Encoder |
-----------
|
Latent space Output
"""
def __init__(self, data_dim, noise_dim, latent_dim, network_architecture='synthetic'):
"""
Args:
data_dim: int, flattened data space dimensionality
noise_dim: int, flattened noise space dimensionality
latent_dim: int, flattened latent space dimensionality
network_architecture: str, the architecture name for the body of the Encoder model
"""
super(StandardEncoder, self).__init__(data_dim=data_dim, noise_dim=noise_dim, latent_dim=latent_dim,
network_architecture=network_architecture, name='Standard Encoder')
noise_input = self.standard_normal_sampler(self.data_input)
encoder_body_model = get_network_by_name['encoder'][network_architecture](data_dim, noise_dim, latent_dim)
latent_factors = encoder_body_model([self.data_input, noise_input])
self.encoder_model = Model(inputs=self.data_input, outputs=latent_factors, name='encoder')
def __call__(self, *args, **kwargs):
"""
Make the Encoder model callable on a list of Input layers.
Args:
*args: a list of input layers from the super-model or numpy arrays in case of test-time inference.
**kwargs:
Returns:
An Encoder model.
"""
return self.encoder_model(args[0])
class MomentEstimationEncoder(BaseEncoder):
"""
An Encoder model is trained to parametrise an arbitrary posterior approximate distribution given some
input x, i.e. q(z|x). The model takes as input concatenated data samples and arbitrary noise and produces
a latent encoding. Additionally the first two moments (mean and variance) are estimated empirically, which is
necessary for the Adaptive Contrast learning algorithm. Schematically it can be represented as follows:
Data Noise
| |
| |
| |
-----------
| Encoder | ----> empirical mean and variance
-----------
|
Latent space
"""
def __init__(self, data_dim, noise_dim, noise_basis_dim, latent_dim, network_architecture='mnist'):
"""
Args:
data_dim: int, flattened data space dimensionality
noise_dim: int, flattened noise space dimensionality
noise_basis_dim: int, noise basis vectors dimensionality
latent_dim: int, flattened latent space dimensionality
network_architecture: str, the architecture name for the body of the moment estimation Encoder model
"""
super(MomentEstimationEncoder, self).__init__(data_dim=data_dim, noise_dim=noise_dim, latent_dim=latent_dim,
network_architecture=network_architecture,
name='Posterior Moment Estimation Encoder')
models = get_network_by_name['moment_estimation_encoder'][network_architecture](
data_dim=data_dim, noise_dim=noise_dim, noise_basis_dim=noise_basis_dim, latent_dim=latent_dim)
data_feature_extraction, noise_basis_extraction = models
self.standard_normal_sampler.arguments['n_basis'] = noise_basis_dim
noise = self.standard_normal_sampler(self.data_input)
noise_basis_vectors = noise_basis_extraction(noise)
coefficients_and_z0 = data_feature_extraction(self.data_input)
coefficients = coefficients_and_z0[:-1]
z_0 = coefficients_and_z0[-1]
latent_factors = []
for i, (a, v) in enumerate(zip(coefficients, noise_basis_vectors)):
latent_factors.append(Multiply(name='enc_elemwise_coeff_vecs_mult_{}'.format(i))([a, v]))
latent_factors = Add(name='enc_add_weighted_vecs')(latent_factors)
latent_factors = Add(name='add_z0_to_linear_combination')([z_0, latent_factors])
self.standard_normal_sampler2.arguments['n_basis'] = noise_basis_dim
self.standard_normal_sampler2.arguments['n_samples'] = 100
more_noise = self.standard_normal_sampler2(self.data_input)
sampling_basis_vectors = noise_basis_extraction(more_noise)
posterior_mean = []
posterior_var = []
for i in range(noise_basis_dim):
# compute empirical mean as the batchsize-wise mean of all sampling vectors for each basis dimension
mean_basis_vectors_i = Lambda(lambda x: ker.mean(x, axis=0),
name='enc_noise_basis_vectors_mean_{}'.format(i))(sampling_basis_vectors[i])
# and do the same for the empirical variance and compute similar posterior parametrization for the variance
var_basis_vectors_i = Lambda(lambda x: ker.var(x, axis=0),
name='enc_noise_basis_vectors_var_{}'.format(i))(sampling_basis_vectors[i])
# and parametrise the posterior moment as described in the AVB paper
posterior_mean.append(Lambda(lambda x: x[0] * x[1],
name='enc_moments_mult_mean_{}'.format(i))([coefficients[i],
mean_basis_vectors_i]))
# compute similar posterior parametrization for the variance
posterior_var.append(Lambda(lambda x: x[0]*x[0]*x[1],
name='enc_moments_mult_var_{}'.format(i))([coefficients[i],
var_basis_vectors_i]))
posterior_mean = Add(name='enc_moments_mean')(posterior_mean)
posterior_mean = Add(name='enc_moments_mean_add_z0')([posterior_mean, z_0])
posterior_var = Add(name='enc_moments_var')(posterior_var)
normalised_latent_factors = Lambda(lambda x: (x[0] - x[1]) / ker.sqrt(x[2] + 1e-5),
name='enc_norm_posterior')([latent_factors, posterior_mean, posterior_var])
log_latent_space = Lambda(lambda x: -0.5 * ker.sum(x**2 + ker.log(2*pi_const), axis=1),
name='enc_log_approx_posterior')(latent_factors)
log_adaptive_prior = Lambda(lambda x: -0.5 * ker.sum(x[0]**2 + ker.log(x[1]) + ker.log(2*pi_const), axis=1),
name='enc_log_adaptive_prior')([normalised_latent_factors, posterior_var])
self.encoder_inference_model = Model(inputs=self.data_input, outputs=latent_factors,
name='encoder_inference_model')
self.encoder_trainable_model = Model(inputs=self.data_input,
outputs=[latent_factors, normalised_latent_factors,
posterior_mean, posterior_var,
log_adaptive_prior, log_latent_space],
name='encoder_trainable_model')
def __call__(self, *args, **kwargs):
"""
Make the Encoder model callable on a list of Input layers.
Args:
*args: a list of input layers from the super-model or numpy arrays in case of test-time inference.
**kwargs:
Returns:
An Encoder model.
"""
is_learning = kwargs.get('is_learning', True)
if is_learning:
return self.encoder_trainable_model(args[0])
return self.encoder_inference_model(args[0])
class ReparametrisedGaussianEncoder(BaseEncoder):
"""
A ReparametrisedGaussianEncoder model is trained to parametrise a Gaussian latent variables:
Data
|
-----------
| Encoder |
-----------
|
mu + sigma * Noise <--- Reparametrised Gaussian latent space
"""
def __init__(self, data_dim, noise_dim, latent_dim, network_architecture='synthetic'):
"""
Args:
data_dim: int, flattened data space dimensionality
noise_dim: int, flattened noise space dimensionality
latent_dim: int, flattened latent space dimensionality
network_architecture: str, the architecture name for the body of the reparametrised Gaussian Encoder model
"""
super(ReparametrisedGaussianEncoder, self).__init__(data_dim=data_dim,
noise_dim=noise_dim,
latent_dim=latent_dim,
network_architecture=network_architecture,
name='Reparametrised Gaussian Encoder')
latent_mean, latent_log_var = get_network_by_name['reparametrised_encoder'][network_architecture](
self.data_input, latent_dim)
noise = self.standard_normal_sampler(self.data_input)
# due to some BUG in Keras, the module name `ker` is not visible within the lambda expression
# as a workaround, define the function outside the Lambda layer
def lin_transform_standard_gaussian(params):
from keras.backend import exp
mu, log_sigma, z = params
transformed_z = mu + exp(log_sigma / 2.0) * z
return transformed_z
latent_factors = Lambda(lin_transform_standard_gaussian,
name='enc_reparametrised_latent')([latent_mean, latent_log_var, noise])
self.encoder_inference_model = Model(inputs=self.data_input, outputs=latent_factors, name='encoder_inference')
self.encoder_learning_model = Model(inputs=self.data_input,
outputs=[latent_factors, latent_mean, latent_log_var],
name='encoder_learning')
def __call__(self, *args, **kwargs):
"""
Make the Encoder model callable on a list of Input layers.
Args:
*args: a list of input layers from the super-model or numpy arrays in case of test-time inference.
Keyword Args:
is_learning: bool, whether the model is used for training or inference. The output is either
the latent space or the latent space and the means and variances from which it is reparametrised.
Returns:
An Encoder model.
"""
is_learning = kwargs.get('is_learning', True)
if is_learning:
return self.encoder_learning_model(args[0])
else:
return self.encoder_inference_model(args[0])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.