content
stringlengths 5
1.05M
|
|---|
# from .test_api import *
# from .test_data import *
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.xmlWriter import XMLWriter
from fontTools.misc.loggingTools import CapturingLogHandler
import struct
import unittest
from fontTools.ttLib.tables._n_a_m_e import (
table__n_a_m_e, NameRecord, nameRecordFormat, nameRecordSize, makeName, log)
class NameTableTest(unittest.TestCase):
def test_getDebugName(self):
table = table__n_a_m_e()
table.names = [
makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English
makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French
makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German
makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese
]
self.assertEqual("Bold", table.getDebugName(258))
self.assertEqual("Sem Fracções", table.getDebugName(292))
self.assertEqual(None, table.getDebugName(999))
def test_setName(self):
table = table__n_a_m_e()
table.setName("Regular", 2, 1, 0, 0)
table.setName("Version 1.000", 5, 3, 1, 0x409)
table.setName("寬鬆", 276, 1, 2, 0x13)
self.assertEqual("Regular", table.getName(2, 1, 0, 0).toUnicode())
self.assertEqual("Version 1.000", table.getName(5, 3, 1, 0x409).toUnicode())
self.assertEqual("寬鬆", table.getName(276, 1, 2, 0x13).toUnicode())
self.assertTrue(len(table.names) == 3)
table.setName("緊縮", 276, 1, 2, 0x13)
self.assertEqual("緊縮", table.getName(276, 1, 2, 0x13).toUnicode())
self.assertTrue(len(table.names) == 3)
# passing bytes issues a warning
with CapturingLogHandler(log, "WARNING") as captor:
table.setName(b"abc", 0, 1, 0, 0)
self.assertTrue(
len([r for r in captor.records if "string is bytes" in r.msg]) == 1)
# anything other than unicode or bytes raises an error
with self.assertRaises(TypeError):
table.setName(1.000, 5, 1, 0, 0)
def test_addName(self):
table = table__n_a_m_e()
nameIDs = []
for string in ("Width", "Weight", "Custom"):
nameIDs.append(table.addName(string))
self.assertEqual(nameIDs[0], 256)
self.assertEqual(nameIDs[1], 257)
self.assertEqual(nameIDs[2], 258)
self.assertEqual(len(table.names), 6)
self.assertEqual(table.names[0].string, "Width")
self.assertEqual(table.names[1].string, "Width")
self.assertEqual(table.names[2].string, "Weight")
self.assertEqual(table.names[3].string, "Weight")
self.assertEqual(table.names[4].string, "Custom")
self.assertEqual(table.names[5].string, "Custom")
with self.assertRaises(ValueError):
table.addName('Invalid nameID', minNameID=32767)
with self.assertRaises(TypeError):
table.addName(b"abc") # must be unicode string
def test_decompile_badOffset(self):
# https://github.com/behdad/fonttools/issues/525
table = table__n_a_m_e()
badRecord = {
"platformID": 1,
"platEncID": 3,
"langID": 7,
"nameID": 1,
"length": 3,
"offset": 8765 # out of range
}
data = bytesjoin([
struct.pack(">HHH", 1, 1, 6 + nameRecordSize),
sstruct.pack(nameRecordFormat, badRecord)])
table.decompile(data, ttFont=None)
self.assertEqual(table.names, [])
class NameRecordTest(unittest.TestCase):
def test_toUnicode_utf16be(self):
name = makeName("Foo Bold", 111, 0, 2, 7)
self.assertEqual("utf_16_be", name.getEncoding())
self.assertEqual("Foo Bold", name.toUnicode())
def test_toUnicode_macroman(self):
name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman
self.assertEqual("mac_roman", name.getEncoding())
self.assertEqual("Foo Italic", name.toUnicode())
def test_toUnicode_macromanian(self):
name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian
self.assertEqual("mac_romanian", name.getEncoding())
self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode())
def test_toUnicode_UnicodeDecodeError(self):
name = makeName(b"\1", 111, 0, 2, 7)
self.assertEqual("utf_16_be", name.getEncoding())
self.assertRaises(UnicodeDecodeError, name.toUnicode)
def toXML(self, name):
writer = XMLWriter(BytesIO())
name.toXML(writer, ttFont=None)
xml = writer.file.getvalue().decode("utf_8").strip()
return xml.split(writer.newlinestr.decode("utf_8"))[1:]
def test_toXML_utf16be(self):
name = makeName("Foo Bold", 111, 0, 2, 7)
self.assertEqual([
'<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
' Foo Bold',
'</namerecord>'
], self.toXML(name))
def test_toXML_utf16be_odd_length1(self):
name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7)
self.assertEqual([
'<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
' Foo',
'</namerecord>'
], self.toXML(name))
def test_toXML_utf16be_odd_length2(self):
name = makeName(b"\0Fooz", 111, 0, 2, 7)
self.assertEqual([
'<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
' Fooz',
'</namerecord>'
], self.toXML(name))
def test_toXML_utf16be_double_encoded(self):
name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7)
self.assertEqual([
'<namerecord nameID="111" platformID="0" platEncID="2" langID="0x7">',
' Fo',
'</namerecord>'
], self.toXML(name))
def test_toXML_macroman(self):
name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman
self.assertEqual([
'<namerecord nameID="222" platformID="1" platEncID="0" langID="0x7" unicode="True">',
' Foo Italic',
'</namerecord>'
], self.toXML(name))
def test_toXML_macroman_actual_utf16be(self):
name = makeName("\0F\0o\0o", 222, 1, 0, 7)
self.assertEqual([
'<namerecord nameID="222" platformID="1" platEncID="0" langID="0x7" unicode="True">',
' Foo',
'</namerecord>'
], self.toXML(name))
def test_toXML_unknownPlatEncID_nonASCII(self):
name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID
self.assertEqual([
'<namerecord nameID="333" platformID="1" platEncID="9876" langID="0x7" unicode="False">',
' BŠrli',
'</namerecord>'
], self.toXML(name))
def test_toXML_unknownPlatEncID_ASCII(self):
name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID
self.assertEqual([
'<namerecord nameID="333" platformID="1" platEncID="9876" langID="0x7" unicode="True">',
' Barli',
'</namerecord>'
], self.toXML(name))
def test_encoding_macroman_misc(self):
name = makeName('', 123, 1, 0, 17) # Mac Turkish
self.assertEqual(name.getEncoding(), "mac_turkish")
name.langID = 37
self.assertEqual(name.getEncoding(), "mac_romanian")
name.langID = 45 # Other
self.assertEqual(name.getEncoding(), "mac_roman")
def test_extended_mac_encodings(self):
name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese
self.assertEqual(name.toUnicode(), unichr(0x2122))
def test_extended_unknown(self):
name = makeName(b'\xfe', 123, 10, 11, 12)
self.assertEqual(name.getEncoding(), "ascii")
self.assertEqual(name.getEncoding(None), None)
self.assertEqual(name.getEncoding(default=None), None)
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
import numpy as np
import pandas as pd
import multiprocessing
from multiprocessing import Pool
from datetime import date
def construct_OD(process_name, from_ind, to_ind, bart_data, stop_table, bart_OD):
print('Start process' + process_name)
for i in bart_data.index[from_ind:to_ind]:
# [from_ind, to_ind)
d = bart_data.loc[i, 'Date']
doy = get_doy(d) # day of the year, starting from 1
doy -= 1
hod = bart_data.loc[i, 'Hour'] # hour of the day, starting from 0
hour_abs = doy * 24 + hod # hour index, the 3rd dim of bart_OD
org = bart_data.loc[i, 'Origin']
org_ind = stop_table.loc[org, 'stop_index']
dest = bart_data.loc[i, 'Dest']
dest_ind = stop_table.loc[dest, 'stop_index']
pax_flow = bart_data.loc[i, 'Count']
bart_OD[org_ind, dest_ind, hour_abs] = pax_flow
print('End process' + process_name)
return bart_OD
def get_doy(d):
# input date, get day of the year
return date.fromisoformat(d).timetuple().tm_yday
if __name__ == '__main__':
bart_path = '/Volumes/Google Drive/My Drive/Graduate/SP22 CE 299/data/BART/hour data/date-hour-soo-dest-2020.csv'
bart_data = pd.read_csv(bart_path, header=None)
bart_data.columns = ['Date', 'Hour', 'Origin', 'Dest', 'Count']
bart_data.head(2)
stops = bart_data['Origin'].drop_duplicates()
num_stops = stops.shape[0] # =50
# dims: (org, dest, time[hr])
bart_OD = np.zeros([num_stops, num_stops, 24*366]) # 366 or 365 days
stop_table = pd.DataFrame(
range(num_stops),
index=stops.values,
columns=['stop_index']
)
num_interval = multiprocessing.cpu_count()
interval = len(bart_data)//num_interval * np.arange(num_interval)
interval = np.append(interval, bart_data.shape[0])
interval
n_cpu = num_interval
pool = Pool(processes=n_cpu)
params = []
for i in range(len(interval)-1):
from_ = interval[i]
to_ = interval[i+1]
process_name = 'P' + str(i)
params.append((process_name, from_, to_, bart_data, stop_table, bart_OD))
bart_OD_set = pool.starmap(func=construct_OD, iterable=params)
# please set a breakpoint here, then store the data manually
print('end')
|
from roblib import *
def draw_crank(x):
θ1=x[0,0]
θ2=x[1,0]
z=L1*array([[cos(θ1)],[sin(θ1)]])
y=z+L2*array([[cos(θ1+θ2)],[sin(θ1+θ2)]])
plot( [0,z[0,0],y[0,0]],[0,z[1,0],y[1,0]],'magenta', linewidth = 2)
draw_disk(c,r,ax,"cyan")
L1,L2 = 4,3
c = array([[1],[2]])
r=4
dt = 0.05
x = array([[-1],[1]])
def consigne(r,c,t):
w = c + r*array([[cos(t)],[sin(t)]])
dw = array([[-r*sin(t)], [r*cos(t)]])
return w,dw
def regulation(l1,l2,θ1,θ2,w,dw):
# Observation
z=l1*array([[cos(θ1)],[sin(θ1)]])
y=z+l2*array([[cos(θ1+θ2)],[sin(θ1+θ2)]])
# Regulation
A = array([[1.,2.],[3.,4.]])
A[0][0] = -y[1,0] #-l1*sin(θ1) - l2*sin(θ1+θ2)
A[0][1] = -l2*sin(θ1+θ2)
A[1][0] = y[0,0] #l1*cos(θ1) + l2*cos(θ1+θ2)
A[1][1] = l2*cos(θ1+θ2)
Ainv = inv(A)
v = w-y +dw
u = dot(Ainv,v)
return u
def f(x,w,dw):
θ1=x[0,0]
θ2=x[1,0]
u = regulation(L1,L2,θ1,θ2,w,dw)
dθ1=u[0,0]
dθ2=u[1,0]
return(array([[dθ1],[dθ2]]))
fig = figure(0)
ax = fig.add_subplot(111, aspect='equal')
for t in arange(0,10,dt) :
pause(0.01)
cla()
ax.set_xlim(-4,8)
ax.set_ylim(-4,8)
draw_crank(x)
w, dw = consigne(r,c,t)
x = x + dt*f(x,w,dw)
show()
|
#--------------------------------
# Name: et_numpy.py
# Purpose: NumPy ET functions
#--------------------------------
# import logging
import math
import numpy as np
try:
import et_common
import et_image
import et_numpy
import python_common as dripy
except ModuleNotFoundError:
import sys
sys.path.append('/home/dgketchum/PycharmProjects/pymetric/code')
from support import et_common
from support import et_image
from support import et_numpy
from support import python_common as dripy
def cos_theta_spatial_func(time, doy, dr, lon, lat):
"""
Parameters
----------
time
doy
dr
lon
lat
Returns
-------
"""
sc = et_common.seasonal_correction_func(doy)
delta = et_common.delta_func(doy)
omega = et_common.omega_func(et_common.solar_time_rad_func(time, lon, sc))
cos_theta = ((math.sin(delta) * np.sin(lat)) +
(math.cos(delta) * np.cos(lat) * np.cos(omega)))
return cos_theta
def cos_theta_mountain_func(time, doy, dr, lon, lat, slope, aspect):
"""
Parameters
----------
time
doy
dr
lon
lat
slope
aspect
Returns
-------
ndarray
"""
sc = et_common.seasonal_correction_func(doy)
delta = et_common.delta_func(doy)
omega = et_common.omega_func(et_common.solar_time_rad_func(time, lon, sc))
sin_omega = np.sin(omega)
cos_omega = np.cos(omega)
del omega
sin_slope = np.sin(slope)
cos_slope = np.cos(slope)
# Aspect is 0 as north, function is expecting 0 as south
sin_aspect = np.sin(aspect - math.pi)
cos_aspect = np.cos(aspect - math.pi)
sin_lat = np.sin(lat)
cos_lat = np.cos(lat)
cos_theta_unadjust_array = (
(math.sin(delta) * sin_lat * cos_slope) -
(math.sin(delta) * cos_lat * sin_slope * cos_aspect) +
(math.cos(delta) * cos_lat * cos_slope * cos_omega) +
(math.cos(delta) * sin_lat * sin_slope * cos_aspect * cos_omega) +
(math.cos(delta) * sin_aspect * sin_slope * sin_omega))
del sin_lat, cos_lat, sin_slope
del sin_aspect, cos_aspect, sin_omega, cos_omega
cos_theta_array = np.maximum(
(cos_theta_unadjust_array / cos_slope), 0.1)
del cos_slope
return cos_theta_array
# DEADBEEF - Trying to reduce memory usage in calculation
# def cos_theta_mountain_func(time, doy, dr, lon, lat, slope, aspect):
# """
#
# Parameters
# ----------
# time
# doy
# dr
# lon
# lat
# slope
# aspect
#
# Returns
# -------
#
# """
# cos_theta_array = 0
# # Term 1 (sin(Delta)*sin(Latitude)*cos(Slope))
# temp_array = math.sin(delta)
# temp_array *= np.sin(lat)
# temp_array *= np.cos(slope)
# temp_array *= np.cos(aspect)
# temp_array *= np.cos(omega)
# cos_theta_array += temp_array
# del temp_array
# # Term 2 (-sin(Delta)*cos(Latitude)*sin(Slope)*cos(Aspect))
# temp_array = math.sin(delta)
# temp_array *= np.cos(lat)
# temp_array *= np.sin(slope)
# temp_array *= np.cos(aspect
# cos_theta_array -= temp_array
# del temp_array
# # Term 3 (+cos(Delta)*cos(Latitude)*cos(Slope)*cos(Omega))
# temp_array = math.cos(delta)
# temp_array *= np.cos(lat)
# temp_array *= np.cos(slope)
# temp_array *= np.cos(omega)
# cos_theta_array += temp_array
# del temp_array
# # Term 4 (+cos(Delta)*sin(Latitude)*sin(Slope)*cos(Aspect)*cos(Omega))
# temp_array = math.cos(delta)
# temp_array *= np.sin(lat)
# temp_array *= np.sin(slope)
# temp_array *= np.cos(aspect)
# temp_array *= np.cos(omega)
# cos_theta_array += temp_array
# del temp_array
# # Term 5 (+cos(Delta)*sin(Slope)*sin(Aspect)*sin(Omega))
# temp_array = math.cos(delta)
# temp_array *= np.sin(slope)
# temp_array *= np.sin(aspect)
# temp_array *= np.sin(omega)
# cos_theta_array += temp_array
# del temp_array
# # Adjust
# cos_theta_array /= np.cos(slope)
# cos_theta_array = np.maximum(
# cos_theta_array, 0.1, dtype=np.float32)
# # ((sin(Delta)*sin(Latitude)*cos(Slope))
# # -(sin(Delta)*cos(Latitude)*sin(Slope)*cos(Aspect))
# # +(cos(Delta)*cos(Latitude)*cos(Slope)*cos(Omega))
# # +(cos(Delta)*sin(Latitude)*sin(Slope)*cos(Aspect)*cos(Omega))
# # +(cos(Delta)*sin(Slope)*sin(Aspect)*sin(Omega)))
# # cos_theta_array = (
# # (sin_delta * sin_lat * cos_slope) -
# # (sin_delta * cos_lat * sin_slope * cos_aspect) +
# # (cos_delta * cos_lat * cos_slope * cos_omega) +
# # (cos_delta * sin_lat * sin_slope * cos_aspect * cos_omega) +
# # (cos_delta * sin_slope * sin_aspect * sin_omega))
# # del sin_lat, cos_lat, sin_slope
# # del sin_aspect, cos_aspect, sin_omega, cos_omega
# # cos_theta_array /= cos_slope
# # del cos_slope
# # cos_theta_array = np.maximum(
# # cos_theta_array, 0.1, dtype=np.float32)
# return cos_theta_array
def l457_refl_toa_func(dn, cos_theta, dr, esun,
lmin, lmax, qcalmin, qcalmax,
band_toa_sur_mask):
"""Calculate Landsat 4, 5, or 7 TOA reflectance for all bands
Parameters
----------
dn : array_like
Landsat raw digital number values
cos_theta
dr
esun
lmin
lmax
qcalmin
qcalmax
band_toa_sur_mask
Returns
-------
ndarray
References
----------
.. [1] Chander, G., Markham, B., & Helder, D. (2009). Summary of current
radiometric calibration coefficients for Landsat MSS, TM, ETM+, and EO-1
ALI sensors. Remote Sensing of Environment, 113(5)
https://doi.org/10.1016/j.rse.2009.01.007
"""
refl_toa = np.copy(dn).astype(np.float64)
refl_toa -= qcalmin
refl_toa *= ((lmax - lmin) / (qcalmax - qcalmin))
refl_toa += lmin
refl_toa /= esun
refl_toa[:, :, band_toa_sur_mask] /= cos_theta[
:, :, np.newaxis].repeat(band_toa_sur_mask.size, 2)
refl_toa[:, :, band_toa_sur_mask] *= (math.pi / dr)
# Don't clip thermal band since it is not scaled from 0-1
refl_toa[:, :, band_toa_sur_mask] = np.clip(
refl_toa[:, :, band_toa_sur_mask], 0.0001, 1)
return refl_toa.astype(np.float32)
def l457_refl_toa_band_func(dn, cos_theta, dr, esun,
lmin, lmax, qcalmin, qcalmax):
"""Landsat 4, 5, or 7 DN -> TOA reflectance (single band)
Parameters
----------
dn : array_like
Landsat raw digital number values
cos_theta
dr
esun
lmin
lmax
qcalmin
qcalmax
Returns
-------
ndarray
References
----------
.. [1] Chander, G., Markham, B., & Helder, D. (2009). Summary of current
radiometric calibration coefficients for Landsat MSS, TM, ETM+, and EO-1
ALI sensors. Remote Sensing of Environment, 113(5)
https://doi.org/10.1016/j.rse.2009.01.007
"""
refl_toa = np.copy(dn).astype(np.float64)
refl_toa -= qcalmin
refl_toa *= ((lmax - lmin) / (qcalmax - qcalmin))
refl_toa += lmin
refl_toa /= cos_theta
refl_toa *= (math.pi / (dr * esun))
np.clip(refl_toa, 0.0001, 1, out=refl_toa)
return refl_toa.astype(np.float32)
def l457_ts_bt_band_func(dn, lmin, lmax, qcalmin, qcalmax, k1, k2):
"""Landsat 4, 5, or 7 DN -> brightness temperature (single band)
Parameters
----------
dn : ndarray
lmin : array_like
lmax : array_like
qcalmin : array_like
qcalmax : array_like
k1 : float
k2 : float
Returns
-------
ndarray
References
----------
.. [1] Chander, G., Markham, B., & Helder, D. (2009). Summary of current
radiometric calibration coefficients for Landsat MSS, TM, ETM+, and EO-1
ALI sensors. Remote Sensing of Environment, 113(5)
https://doi.org/10.1016/j.rse.2009.01.007
"""
ts_bt = np.copy(dn).astype(np.float64)
ts_bt -= qcalmin
ts_bt *= ((lmax - lmin) / (qcalmax - qcalmin))
ts_bt += lmin
return ts_bt_func(ts_bt, k1, k2).astype(np.float32)
def l8_refl_toa_band_func(dn, cos_theta, refl_mult, refl_add):
"""Landsat 8 DN -> TOA reflectance (single band)
Parameters
----------
dn : ndarray
cos_theta : array_like
refl_mult : array_like
Reflectance multiplicative scaling factors
refl_add : array_like
Reflectance additive scaling factors
Returns
-------
ndarray
References
----------
.. [1] Landsat 8 Data Users Handbook
https://landsat.usgs.gov/landsat-8-l8-data-users-handbook
"""
refl_toa = np.copy(dn).astype(np.float64)
refl_toa *= refl_mult
refl_toa += refl_add
refl_toa /= cos_theta
np.clip(refl_toa, 0.0001, 1, out=refl_toa)
return refl_toa
def l8_ts_bt_band_func(dn, rad_mult, rad_add, k1, k2):
"""Landsat 8 -> brightness temperature (single band)
Parameters
----------
dn
rad_mult
rad_add
k1
k2
Returns
-------
ndarray
References
----------
.. [1] Landsat 8 Data Users Handbook
https://landsat.usgs.gov/landsat-8-l8-data-users-handbook
"""
ts_bt = np.copy(dn).astype(np.float64)
ts_bt *= rad_mult
ts_bt += rad_add
return ts_bt_func(ts_bt, k1, k2).astype(np.float32)
def bqa_fmask_func(qa):
"""Construct Fmask array from Landsat Collection 1 TOA QA array
Parameters
----------
qa : ndarray
Returns
-------
ndarray
Notes
-----
https://landsat.usgs.gov/collectionqualityband
https://code.earthengine.google.com/356a3580096cca315785d0859459abbd
Confidence values:
00 = "Not Determined" = Algorithm did not determine the status of this condition
01 = "No" = Algorithm has low to no confidence that this condition exists
(0-33 percent confidence)
10 = "Maybe" = Algorithm has medium confidence that this condition exists
(34-66 percent confidence)
11 = "Yes" = Algorithm has high confidence that this condition exists
(67-100 percent confidence
"""
# Extracting cloud masks from BQA using np.right_shift() and np.bitwise_and()
# Cloud (med & high confidence), then snow, then shadow, then fill
# Low confidence clouds tend to be the FMask buffer
fill_mask = np.bitwise_and(np.right_shift(qa, 0), 1) >= 1
cloud_mask = np.bitwise_and(np.right_shift(qa, 4), 1) >= 1 # cloud bit
cloud_mask &= np.bitwise_and(np.right_shift(qa, 5), 3) >= 2 # cloud conf.
cloud_mask |= np.bitwise_and(np.right_shift(qa, 11), 3) >= 3 # cirrus
shadow_mask = np.bitwise_and(np.right_shift(qa, 7), 3) >= 3
snow_mask = np.bitwise_and(np.right_shift(qa, 9), 3) >= 3
fmask = (fill_mask != True).astype(np.uint8)
fmask[shadow_mask] = 2
fmask[snow_mask] = 3
fmask[cloud_mask] = 4
return fmask
def tau_broadband_func(pair, w, cos_theta, kt=1):
"""Broadband transmittance
Parameters
----------
pair : array_like
Air pressure [kPa].
w : array_like
Precipitable water in the atmosphere [mm]
cos_theta : array_like
kt : float
Returns
-------
ndarray
References
----------
"""
tau_broadband = tau_direct_func(pair, w, cos_theta, kt)
tau_broadband += tau_diffuse_func(tau_broadband)
return tau_broadband.astype(np.float32)
def tau_direct_func(pair, w, cos_theta, kt=1):
"""
Parameters
----------
pair : array_like
w : array_like
cos_theta : array_like
kt : float
Returns
-------
ndarray
Notes
-----
0.98 * np.exp((-0.00146 * pair / kt) - (0.075 * np.power(w, 0.4)))
References
----------
.. [1] Tasumi, M., Allen, R., and Trezza, R. (2008). At-surface reflectance
and albedo from satellite for operational calculation of land surface
energy balance. Journal of Hydrologic Engineering 13(2):51-63.
https://doi.org/10.1061/(ASCE)1084-0699(2008)13:2(51)
"""
t1 = np.copy(pair).astype(np.float64)
t1 /= kt
t1 *= -0.00146
t1 /= cos_theta
t2 = np.copy(w).astype(np.float64)
t2 /= cos_theta
np.power(t2, 0.4, out=t2)
t2 *= 0.075
t1 -= t2
del t2
np.exp(t1, out=t1)
t1 *= 0.98
return t1
def tau_diffuse_func(tau_direct):
"""
Parameters
----------
tau_direct : array_like
Returns
-------
ndarray
Notes
-----
Model differs from formulas in METRIC manual.
Eqn is not applied, per Rick Allen it is not needed.
References
----------
.. [1] Tasumi, M., Allen, R., and Trezza, R. (2008). At-surface reflectance
and albedo from satellite for operational calculation of land surface
energy balance. Journal of Hydrologic Engineering 13(2):51-63.
https://doi.org/10.1061/(ASCE)1084-0699(2008)13:2(51)
"""
tau = np.copy(tau_direct).astype(np.float64)
tau *= -0.36
tau += 0.35
return tau
# return np.where(tau_direct_array >= 0.15),
# (0.35-0.36*tau_direct_array),
# (0.18-0.82*tau_direct_array))
def tau_narrowband_func(pair, w, cos_theta, kt, c1, c2, c3, c4, c5):
"""Narrowband transmittance
Parameters
----------
pair : array_like
Air pressure [kPa].
w : array_like
Precipitable water in the atmosphere [mm]
cos_theta : array_like
kt : float
c1 : float
c2 : float
c3 : float
c4 : float
c5 : float
Returns
-------
ndarray
Notes
-----
IN: c1 * exp(((c2*pair) / (kt*cos_theta)) - ((c3*w+c4) / cos_theta)) + c5
OUT: c1 * exp(((c2*pair) / (kt*1.0)) - ((c3*w+c4) / 1.0)) + c5
References
----------
.. [1] Tasumi, M., Allen, R., and Trezza, R. (2008). At-surface reflectance
and albedo from satellite for operational calculation of land surface
energy balance. Journal of Hydrologic Engineering 13(2):51-63.
https://doi.org/10.1061/(ASCE)1084-0699(2008)13:2(51)
"""
t1 = np.copy(pair).astype(np.float64)
t1 /= kt
t1 *= c2
t2 = np.copy(w)
t2 *= c3
t2 += c4
t1 -= t2
del t2
t1 /= cos_theta
np.exp(t1, out=t1)
t1 *= c1
t1 += c5
return t1.astype(np.float32)
def refl_sur_tasumi_func(refl_toa, pair, w, cos_theta, kt,
c1, c2, c3, c4, c5, cb, band_cnt):
"""Tasumi at-surface reflectance
Parameters
----------
refl_toa : ndarray
Top-of-atmosphere reflectance.
pair : array_like
Air pressure [kPa].
w : array_like
Precipitable water in the atmosphere [mm]
cos_theta : array_like
kt : float
Clearness coefficient.
c1 : float
c2 : float
c3 : float
c4 : float
c5 : float
cb : float
band_cnt : int
Returns
-------
ndarray
Notes
-----
refl_sur = (refl_toa - cb * (1 - tau_in)) / (tau_in * tau_out)
References
----------
.. [1] Tasumi, M., Allen, R., and Trezza, R. (2008). At-surface reflectance
and albedo from satellite for operational calculation of land surface
energy balance. Journal of Hydrologic Engineering 13(2):51-63.
https://doi.org/10.1061/(ASCE)1084-0699(2008)13:2(51)
"""
if np.all(np.isnan(refl_toa)):
return refl_toa
# Reshape arrays to match the surface reflectance arrays
pair_mod = pair[:, :, np.newaxis].repeat(band_cnt, 2).astype(np.float64)
w_mod = w[:, :, np.newaxis].repeat(band_cnt, 2).astype(np.float64)
cos_theta_mod = cos_theta[:, :, np.newaxis].repeat(band_cnt, 2).astype(np.float64)
tau_in = tau_narrowband_func(
pair_mod, w_mod, cos_theta_mod, kt, c1, c2, c3, c4, c5)
tau_out = tau_narrowband_func(
pair_mod, w_mod, 1, kt, c1, c2, c3, c4, c5)
del cos_theta_mod, pair_mod, w_mod
refl_sur = np.copy(tau_in)
refl_sur *= -1
refl_sur += 1
refl_sur *= -cb
refl_sur += refl_toa
refl_sur /= tau_in
refl_sur /= tau_out
np.clip(refl_sur, 0.0001, 1, out=refl_sur)
return refl_sur.astype(np.float32)
def albedo_sur_func(refl_sur, wb):
"""Tasumi at-surface albedo
Parameters
----------
refl_sur : ndarray
wb :
Returns
-------
ndarray
References
----------
.. [1] Tasumi, M., Allen, R., and Trezza, R. (2008). At-surface reflectance
and albedo from satellite for operational calculation of land surface
energy balance. Journal of Hydrologic Engineering 13(2):51-63.
https://doi.org/10.1061/(ASCE)1084-0699(2008)13:2(51)
"""
return np.sum(refl_sur * wb, axis=2)
# Vegetation Indices
def ndi_func(a, b, l=0.0):
"""Normalized difference index function
Parameters
----------
a : array_like
b : array_like
l :
Returns
-------
array_like
Notes
-----
Function can be used to calculate SAVI ([1]_, [2]_) by setting l != 0.
References
----------
.. [1] Huete, A. (1988). A soil-adjusted vegetation index (SAVI).
Remote Sensing of Environment, 25(3).
https://doi.org/10.1016/0034-4257(88)90106-X
.. [2] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
ndi = ((1. + l) * (a - b) / (l + a + b))
# Manually set output value when a and b are zero
# ndi[((l+a+b) != 0)] = 0
return ndi
def savi_lai_func(savi):
"""Compute leaf area index (LAI) from SAVI
Parameters
----------
savi : array_like
Soil adjusted vegetation index.
Returns
-------
ndarray
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
return np.clip((11. * np.power(savi, 3)), 0, 6)
def ndvi_lai_func(ndvi):
"""Compute leaf area index (LAI) from NDVI
Parameters
----------
ndvi : array_like
Normalized difference vegetation index.
Returns
-------
ndarray
References
----------
.. [1] Trezza and Allen 2014?
"""
return np.clip((7. * np.power(ndvi, 3)), 0, 6)
def ratio_func(a, b):
"""Compute ratio of two values
Parameters
----------
a : array_like
b : array_like
Returns
-------
array_like
"""
return a / b
def evi_func(blue, red, nir):
"""Compute enhanced vegetation index
Parameters
----------
blue : array_like
Blue band (band 1 on Landsat 5/7, band 2 on Landsat 8).
red :
Red band (band 3 on Landsat 5/7, band 4 on Landsat 8).
nir : array_like
Near infrared band (band 4 on Landsat 5/7, band 5 on Landsat 8).
Returns
-------
array_like
References
----------
.. [1] Huete et al. (2002).
Overview of the radiometric and biophysical performance of the MODIS
vegetation indices. Remote Sensing of Environment, 83.
https://doi.org/10.1016/S0034-4257(02)00096-2
"""
return (2.5 * (nir - red)) / (nir + 6 * red - 7.5 * blue + 1)
def tc_bright_func(reflectance, image_type='TOA'):
"""Tasseled cap brightness
Parameters
----------
reflectance : array_like
Reflectance.
image_type : {'TOA' (default), 'SUR'}, optional
Reflectance type.
Returns
-------
ndarray
References
----------
DEADBEEF - Check these URLs and generate actual references and copy
to all functions
LT04/LT05 - http://www.gis.usu.edu/~doug/RS5750/assign/OLD/RSE(17)-301.pdf
LE07 - http://landcover.usgs.gov/pdf/tasseled.pdf
LC08 - http://www.tandfonline.com/doi/abs/10.1080/2150704X.2014.915434
https://www.researchgate.net/publication/262005316_Derivation_of_a_tasselled_cap_transformation_based_on_Landsat_8_at-_satellite_reflectance
"""
if image_type == 'SUR':
tc_bright = np.array([0.2043, 0.4158, 0.5524, 0.5741, 0.3124, 0, 0.2303])
elif image_type == 'TOA':
tc_bright = np.array([0.3561, 0.3972, 0.3904, 0.6966, 0.2286, 0, 0.1596])
return np.sum(reflectance * tc_bright, axis=2).astype(np.float32)
def tc_green_func(reflectance, image_type='TOA'):
"""Tasseled cap greenness
Parameters
----------
reflectance : array_like
image_type : {'TOA' (default), 'SUR'}, optional
Reflectance type.
Returns
-------
ndarray
References
----------
"""
if image_type == 'SUR':
tc_green = np.array([-0.1063, -0.2819, -0.4934, 0.7940, -0.0002, 0, -0.1446])
elif image_type == 'TOA':
tc_green = np.array([-0.3344, -0.3544, -0.4556, 0.6966, -0.0242, 0, -0.2630])
return np.sum(reflectance * tc_green, axis=2).astype(np.float32)
def tc_wet_func(reflectance, image_type='TOA'):
"""Tasseled cap wetness
Parameters
----------
reflectance : array_like
image_type : {'TOA' (default), 'SUR'}, optional
Reflectance type.
Returns
-------
ndarray
References
----------
"""
if image_type == 'SUR':
tc_wet = np.array([
0.0315, 0.2021, 0.3102, 0.1594, -0.6806, 0, -0.6109])
elif image_type == 'TOA':
tc_wet = np.array([
0.2626, 0.2141, 0.0926, 0.06564, -0.7629, 0, -0.5388])
return np.sum(reflectance * tc_wet, axis=2).astype(np.float32)
def etstar_func(evi, etstar_type='mean'):
"""Compute ET*
Parameters
----------
evi : array_like
Enhanced vegetation index.
etstar_type : {'mean', 'lpi', 'upi', 'lci', 'uci'}, optional
Returns
-------
ndarray
References
----------
.. [1] Beamer, J., Huntington, J., Morton, C., & Pohll, G. (2011).
Estimating annual groundwater evapotranspiration from phreatophytes in
the Great Basin using Landsat and flux tower measurements.
Journal of the American Water Resources Association, 49(3).
https://doi.org/10.1111/jawr.12058
"""
c_dict = dict()
c_dict['mean'] = np.array([-0.1955, 2.9042, -1.5916]).astype(np.float32)
c_dict['lpi'] = np.array([-0.2871, 2.9192, -1.6263]).astype(np.float32)
c_dict['upi'] = np.array([-0.1039, 2.8893, -1.5569]).astype(np.float32)
c_dict['lci'] = np.array([-0.2142, 2.9175, -1.6554]).astype(np.float32)
c_dict['uci'] = np.array([-0.1768, 2.891, -1.5278]).astype(np.float32)
try:
c = c_dict[etstar_type]
except KeyError:
raise SystemExit()
# ET* calculation
etstar = np.copy(evi)
etstar *= c[2]
etstar += c[1]
etstar *= evi
etstar += c[0]
np.maximum(etstar, 0., out=etstar)
return etstar
def etstar_etg_func(etstar, eto, ppt):
"""Computed ET* based groundwater evapotranspiration (ETg)
Parameters
----------
etstar : array_like
ET*
eto : array_like
Reference ET [mm].
ppt : array_like
Precipitation [mm].
Returns
-------
ndarray
"""
return np.copy(etstar) * (eto - ppt)
def etstar_et_func(etstar, eto, ppt):
"""Compute ET* based evapotranspiration (ET)
Parameters
----------
etstar : array_like
ET*
eto : array_like
Reference ET [mm]
ppt : array_like
Precipitation [mm]
Returns
-------
ndarray
"""
return np.copy(etstar) * (eto - ppt) + ppt
def em_nb_func(lai, water_index, water_threshold=0):
"""Narrowband emissivity
Parameters
----------
lai : array_like
Leaf area index
water_index : array_like
Normalized index used to identify water pixels (typically NDVI).
water_threshold : float, optional
Pixels with water_index values less than this value will have the water
emissivity value applied.
Returns
-------
ndarray
Notes
-----
em_0 = (0.97 + (lai / 300.)) for LAI <= 3
em_0 = 0.98 for LAI > 3
em_0 = 0.985 for water
DEADBEEF - Check 0.99 value in code for water
References
----------
.. [1] Tasumi, M. (2003). Progress in operational estimation of regional
evapotranspiration using satellite imagery. Ph.D. dissertation.
.. [2] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
em_nb = np.copy(lai).astype(np.float32)
em_nb /= 300.
em_nb += 0.97
em_nb[(water_index > water_threshold) & (lai > 3)] = 0.98
em_nb[water_index < water_threshold] = 0.99
return em_nb
def em_0_func(lai, water_index, water_threshold=0):
"""Broadband emissivity
Parameters
----------
lai : array_like
Leaf area index.
water_index : array_like
Normalized index used to identify water pixels (typically NDVI).
water_threshold : float, optional
Pixels with water_index values less than this value will have the water
emissivity value applied.
Returns
-------
ndarray
Notes
-----
em_0 = (0.95 + (lai / 100.)) for LAI <= 3
em_0 = 0.98 for LAI > 3
em_0 = 0.985 for water
References
----------
.. [1] Tasumi, M. (2003). Progress in operational estimation of regional
evapotranspiration using satellite imagery. Ph.D. dissertation.
.. [2] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
em_0 = np.copy(lai).astype(np.float32)
em_0 /= 100.
em_0 += 0.95
em_0[(water_index > water_threshold) & (lai > 3)] = 0.98
em_0[water_index <= water_threshold] = 0.985
return em_0
def rc_func(thermal_rad, em_nb, rp, tnb, rsky):
"""Corrected Radiance
Parameters
----------
thermal_rad : array_like
Thermal band spectral radiance [W m-2 sr-1 um-1].
em_nb : array_like
Narrow band emissivity.
rp : float
Path radiance (in the thermal band) [W m-2 sr-1 um-1].
tnb : float
Transmissivity of air (in the thermal band).
rsky : float
Clear sky downward thermal radiance [W m-2 sr-1 um-1].
Returns
-------
ndarray
Notes
-----
rc = ((thermal_rad - rp) / tnb) - ((1.0 - em_nb) * rsky)
References
----------
.. [1] Wukelic et al. (1989).
.. [2] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
# DEADBEEF - Why is ndmin=1 being set here?
rc = np.array(thermal_rad, copy=True, ndmin=1).astype(np.float64)
# rc = np.copy(thermal_rad_toa).astype(np.float32)
rc -= rp
rc /= tnb
rc -= rsky
rc += (em_nb * rsky)
return rc.astype(np.float32)
def ts_func(em_nb, rc, k1, k2):
"""Surface Temperature
Parameters
----------
em_nb : array_like
Narrow band emissivity.
rc : array_like
Corrected thermal radiance [W m-2 sr-1 um-1].
k1 : float
Calibration constant.
k2 : float
Calibration constant.
Returns
-------
ndarray
Notes
-----
ts = k2 / log(((em_nb * k1) / rc) + 1.0)
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
.. [2] Markham and Barker (1986).
"""
ts = np.copy(em_nb).astype(np.float64)
ts *= k1
ts /= rc
ts += 1.0
np.log(ts, out=ts)
np.reciprocal(ts, out=ts)
ts *= k2
return ts.astype(np.float32)
def ts_bt_func(thermal_rad, k1, k2):
"""Calculate brightness temperature from thermal radiance
Parameters
----------
thermal_rad : array_like
Thermal band spectral radiance [W m-2 sr-1 um-1].
k1 : float
Calibration constant.
k2 : float
Calibration constant.
Returns
-------
ndarray
Notes
-----
ts_bt = k2 / log((k1 / L) + 1.0)
References
----------
.. [1] Chander, G., Markham, B., & Helder, D. (2009). Summary of current
radiometric calibration coefficients for Landsat MSS, TM, ETM+, and EO-1
ALI sensors. Remote Sensing of Environment, 113(5)
https://doi.org/10.1016/j.rse.2009.01.007
"""
ts_bt = np.copy(thermal_rad).astype(np.float64)
ts_bt[ts_bt <= 0] = np.nan
np.reciprocal(ts_bt, out=ts_bt)
ts_bt *= k1
ts_bt += 1.0
np.log(ts_bt, out=ts_bt)
np.reciprocal(ts_bt, out=ts_bt)
ts_bt *= k2
return ts_bt.astype(np.float32)
def thermal_rad_func(ts_bt, k1, k2):
"""Back calculate thermal radiance from brightness temperature
Parameters
----------
ts_bt : array_like
Brightness temperature [K].
k1 : float
Calibration constant.
k2 : float
Calibration constant.
Returns
-------
ndarray
Notes
-----
thermal_rad = k1 / (exp(k2 / ts_bt) - 1.0)
References
----------
.. [1] Chander, G., Markham, B., & Helder, D. (2009). Summary of current
radiometric calibration coefficients for Landsat MSS, TM, ETM+, and EO-1
ALI sensors. Remote Sensing of Environment, 113(5)
https://doi.org/10.1016/j.rse.2009.01.007
"""
thermal_rad = np.copy(ts_bt).astype(np.float64)
np.reciprocal(thermal_rad, out=thermal_rad)
thermal_rad *= k2
np.exp(thermal_rad, out=thermal_rad)
thermal_rad -= 1.0
np.reciprocal(thermal_rad, out=thermal_rad)
thermal_rad *= k1
return thermal_rad.astype(np.float32)
def ts_lapsed_func(ts, elevation, datum, lapse_rate=6.0):
"""Lapse surface temperature based on elevation
Parameters
----------
ts : array_like
Surface temperature [K].
elevation : array_like
Elevation [m].
datum : float
lapse_rate : float
Returns
-------
ndarray
Notes
-----
References
----------
"""
ts_adjust = np.copy(elevation).astype(np.float64)
ts_adjust -= datum
ts_adjust *= (lapse_rate * -0.001)
ts_adjust += ts
return ts_adjust.astype(np.float32)
def ts_delapsed_func(ts, elevation, datum, lapse_rate=6.0):
"""Delapse surface temperature based on elevation
Parameters
----------
ts : array_like
Surface temperature [K].
elevation : array_like
Elevation [m].
datum : float
lapse_rate : float
Returns
-------
ndarray
Notes
-----
References
----------
"""
ts_adjust = np.copy(elevation).astype(np.float64)
ts_adjust -= datum
ts_adjust *= (lapse_rate * 0.001)
ts_adjust += ts
return ts_adjust.astype(np.float32)
def rl_in_func(tau, ts, ea_coef1=0.85, ea_coef2=0.09):
"""Incoming Longwave Radiation
Parameters
----------
tau : array_like
Broadband atmospheric transmissivity.
ts : array_like
Surface temperature [K].
ea_coef1 : float, optional
Empirical coefficient for computing ea (the default is 0.85 per [1]_).
ea_coef2 : float, optional
Empirical coefficient for computing ea (the default is 0.09 per [1]_).
Returns
-------
ndarray
Notes
-----
ea = 0.85 * (-log(tau) ** 0.09)
rl_in = 5.67E-8 * ea * (ts ** 4)
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
rl_in = np.copy(tau).astype(np.float64)
np.log(rl_in, out=rl_in)
np.negative(rl_in, out=rl_in)
np.power(rl_in, ea_coef2, out=rl_in)
rl_in *= (ea_coef1 * 5.67E-8)
rl_in *= np.power(ts, 4)
return rl_in.astype(np.float32)
def rl_out_func(rl_in, ts, em_0):
"""Outgoing Longwave Radiation (Emitted + Reflected)
Parameters
----------
rl_in : array_like
Incoming longwave radiation [W m-2].
ts : array_like
Surface temperature [K].
em_0 : array_like
Broadband surface emissivity.
Returns
-------
ndarray
Notes
-----
rl_out = 5.67E-8 * em_0 * (ts ** 4) + rl_in * (1 - em_0)
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
rl_out = np.copy(ts).astype(np.float64)
np.power(rl_out, 4, out=rl_out)
rl_out *= em_0
rl_out *= 5.67E-8
rl_out += rl_in
rl_out -= em_0 * rl_in
return rl_out.astype(np.float32)
def rs_in_func(cos_theta, tau, dr, gsc=1367.0):
"""Incoming Shortwave Radiation
Parameters
----------
cos_theta : array_like
tau : array_like
dr : float
gsc : float, optional
Solar constant [W m-2] (the default is 1367.0).
Returns
-------
ndarray
Notes
-----
rs_in = gsc * cos_theta * tau / dr
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
rs_in = np.copy(cos_theta).astype(np.float64)
rs_in *= tau
rs_in *= (gsc * dr)
return rs_in.astype(np.float32)
def rs_out_func(rs_in, albedo_sur):
"""Outgoing Shortwave Radiation
Parameters
----------
rs_in : array_like
Incoming shortwave radiation [W m-2].
albedo_sur : array_like
Surface albedo.
Returns
-------
ndarray
Notes
-----
rs_out = rs_in * albedo
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
rs_out = np.copy(rs_in).astype(np.float64)
rs_out *= albedo_sur
return rs_out.astype(np.float32)
def rn_func(rs_in, rs_out, rl_in, rl_out):
"""Net Radiation
Parameters
----------
rs_in : array_like
Incoming shortwave radiation [W m-2].
rs_out : array_like
Outgoing shortwave radiation [W m-2].
rl_in : array_like
Incoming longwave radiation [W m-2].
rl_out : array_like
Outgoing longwave radiation [W m-2].
Returns
-------
ndarray
Notes
-----
rn = rs_in - rs_out + rl_in - rl_out
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
rn = np.copy(rs_in)
rn -= rs_out
rn += rl_in
rn -= rl_out
return rn
def rn_24_func(albedo_sur, rs_in, lat, doy, cs=110):
"""Daily Net Radiation
Parameters
----------
albedo_sur : array_like
Surface albedo.
rs_in : array_like
Incoming shortwave radiation [W m-2]
lat : array_like
Latitude [rad].
doy : int
Day of year.
cs : float
Slob calibration coefficient (the default is 110 W m-2 [1]_).
Returns
-------
ndarray
Notes
-----
This function is calling the et_common.ra_daily_func() but could be
changed to use the refet.calcs._ra_daily() function instead.
rnl_24 = cs * (rs_in / ra)
rn_24 = (1 - albedo_sur) * rs_in - rnl_24
References
----------
.. [1] de Bruin, H.A.R. (1987). From Penman to Makkink. Proceedings and
Information: TNO Committee on Hydrological Research No. 39,
J. C. Hooghart, Ed., Netherlands Organization for Applied Scientific
Research, 5-30.
.. [2] de Bruin and Stricker (2000).
.. [3] Bastiaanssen, W., Noordman, E., Pelgrum, H., Davids, G., Thoreson, B.,
Allen, R. (2005). SEBAL model with remotely sensed data to improve
water-resources management under actual field conditions.
Journal of Irrigation and Drainage Engineering, 131(1).
https://doi.org/10.1061/(ASCE)0733-9437(2005)131:1(85)
.. [4] de Bruin, H.A.R, Trigo, I.F., Bosveld, F.C., & Meirink, J.F. (2016).
A thermodynamically based model for actual evapotranspiration of an
extensive grass field close to FAO reference, suitable for remote
sensing application. Journal of Hydrometeorology 17.
https://doi.org/10.1175/JHM-D-15-0006.1
"""
# Net longwave radiation at the cold and hot calibration points
rnl_24 = et_common.ra_daily_func(lat=lat, doy=doy)
np.reciprocal(rnl_24, out=rnl_24)
rnl_24 *= rs_in
rnl_24 *= cs
rn_24 = 1 - albedo_sur
rn_24 *= rs_in
rn_24 -= rnl_24
return rn_24
def g_ag_func(lai, ts, rn, coef1=1.80, coef2=0.084):
"""Calculate ground heat flux for agriculture using METRIC approach
Parameters
----------
lai : array_like
Leaf area index.
ts : array_like
Surface temperature [K].
rn : array_like
Net radiation [W m-2].
coef1 : float
Coefficient (the default is 1.80).
coef2 : float
Coefficient (the default is 0.084).
Returns
-------
ndarray
Notes
-----
Coef1 and coef2 are exposed in order to apply a custom G function.
g = np.where(
lai_toa >= 0.5,
(0.05 + (0.18 * exp(-0.521 * lai))) * rn,
coef1 * (ts - 273.16) + (coef2 * rn))
References
----------
.. [1] Tasumi (2003)
.. [2] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
a = np.copy(lai).astype(np.float64)
a *= -0.521
np.exp(a, out=a)
a *= 0.18
a += 0.05
a *= rn
b = ts - 273.16
b *= coef1
b /= rn
b += coef2
b *= rn
return np.where(lai >= 0.5, a, b).astype(np.float32)
def g_sebal_func(ts, albedo_sur, ndvi):
"""Calculate ground heat flux using SEBAL approach
Parameters
----------
ts : array_like
Surface temperature [K].
albedo_sur : array_like
Surface albedo.
ndvi : array_like
Normalized difference vegetation index.
Returns
-------
ndarray
Notes
-----
In [1]_, ts is listed as "radiometric surface temperature".
g = (ts - 273.15) * (0.0038 + 0.0074 * albedo) * (1 - 0.98 * ndvi ** 4)
References
----------
.. [1] Bastiaanssen, W. (2000). SEBAL-based sensible and latent heat fluxes
in the irrigated Gediz Basin, Turkey. Journal of Hydrology, 229(1-2).
https://doi.org/10.1016/S0022-1694(99)00202-4
.. [2] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
g = np.copy(ndvi).astype(np.float64)
np.power(g, 4, out=g)
g *= -0.98
g += 1
g *= ts
g *= (albedo_sur * 0.0074 + 0.0038)
return g
def zom_func(lai, landuse, zom_remap):
"""Generate Zom (roughness) values based on the landuse type
Parameters
----------
lai : ndarray
Leaf area index.
landuse : ndarray
Landuse.
zom_remap : dict
Mapping of landuse types to zom values in JSON format with key/value
both string type (i.e. "11" : "0.005").
Returns
-------
ndarray
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
zom = np.full(lai.shape, 0.005, dtype=np.float32)
for lu_code in np.unique(landuse):
# What should be the default zom value?
# Convert the landuse array values to strings for now.
try:
lu_value = zom_remap[str(lu_code)]
except:
lu_value = 'lai'
if lu_value.lower() == 'perrier':
zom[landuse == lu_code] = perrier_zom_func(lai[landuse == lu_code])
elif lu_value.lower() == 'lai':
zom[landuse == lu_code] = np.maximum(
lai[landuse == lu_code] * 0.018, 0.005)
else:
zom[landuse == lu_code] = float(lu_value)
zom[np.isnan(lai)] = np.nan
return zom
def perrier_zom_func(lai):
"""Perrier Zom
Parameters
----------
lai : ndarray
Leaf area index.
Returns
-------
ndarray
Notes
-----
Minimum zom is 0.005 m equal to bare soil. Dec 28 09, JK
The use of the function is applicable for tall vegetation (forests).
The canopy distribution coefficient, a, is assumed to be a=0.6,
i.e. slightly top heavy canopy.
The vegetation height is estimated as h=2.5LAI (LAImax=6 -> 2.5*6=15 m),
compared to h=0.15LAI for agriculture crops.
References
----------
.. [1] Perrier, A. (1982). Land surface processes: Vegetation.
In Land Surface Processes in Atmospheric General Circulation Models;
Eagelson, P.S., Ed.; Cambridge University Press: Cambridge, UK;
pp. 395-448.
.. [2] Allen, R., Irmak, A., Trezza, R., Hendrickx, J., Bastiaanssen, W.,
& Kjaersgaard, J. (2011). Satellite-based ET estimation in agriculture
using SEBAL and METRIC. Hydrologic Processes, 25, 4011-4027.
https://doi.org/10.1002/hyp.8408
.. [3] Santos (2012)
"""
perrier = -1.2 * lai
perrier /= 2.
np.exp(perrier, out=perrier)
perrier = ((1 - perrier) * perrier) * (2.5 * lai)
return np.maximum(perrier, 0.005, dtype=np.float32)
# The following equations are float specific, separate from equations below.
# This is indicated by having "calibration" in the function name.
def le_calibration_func(etr, kc, ts):
"""Latent heat flux at the calibration points
Parameters
----------
etr : scalar or array_like
kc : scalar or array_like
ts : scalar or array_like
Surface temperature [K].
Returns
-------
scalar or array_like
Notes
-----
1000000 / 3600 in [1] was simplified to 2500 / 9
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
return etr * kc * (2.501 - 2.361E-3 * (ts - 273)) * 2500 / 9
def dt_calibration_func(h, rah, density):
"""
Parameters
----------
h : scalar or array_like
Sensible heat flux [W m-3].
rah : scalar or array_like
Aerodynamic resistance to heat transport [s m-1].
density : scalar or array_like
Air density [kg m-3].
Returns
-------
scalar or array_like
Notes
-----
The 1004.0 term is the specific heat capacity of air [J kg-1 K-1].
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
return (h * rah) / (density * 1004.)
def l_calibration_func(h, air_density, u_star, ts):
"""
Parameters
----------
h : scalar or array_like
Sensible heat flux [W m-3].
air_density : scalar or array_like
Air density [kg m-3].
u_star : scalar or array_like
Friction velocity [m s-1].
ts : scalar or array_like
Surface temperature [K].
Returns
-------
scalar or array_like
Notes
-----
Return -1000 if h is zero to avoid dividing by zero.
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
return np.where(
h != 0,
((-1004. * air_density * (u_star ** 3.0) * ts) / (0.41 * 9.81 * h)),
-1000)
def h_func(air_density, dt, rah):
"""Sensible Heat Flux [W/m^2]
Parameters
----------
air_density : array_like
Air density [kg m-3].
dt : array_like
Near surface temperature difference [K].
rah : array_like
Aerodynamic resistance to heat transport [s m-1].
Returns
-------
ndarray
Notes
-----
h = air_density * 1004 * dt / rah
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
h = np.array(air_density, copy=True, ndmin=1)
h *= 1004.
h *= dt
h /= rah
return h
def u_star_func(u3, z3, zom, psi_z3, wind_coef=1):
"""
Parameters
----------
u3 : array_like
z3 : array_like
zom : array_like
psi_z3 : array_like
wind_coef : float, optional
(the default is 1).
Returns
-------
u_star : ndarray
Friction velocity [m s-1].
Notes
-----
u_star = (u3 * wind_coef * 0.41) / (log(z3 / zom) - psi_z3)
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
u_star = np.array(zom, copy=True, ndmin=1)
np.reciprocal(u_star, out=u_star)
u_star *= z3
oldsettings = np.geterr()
np.seterr(invalid='ignore')
np.log(u_star, out=u_star)
np.seterr(invalid=oldsettings['invalid'])
u_star -= psi_z3
np.reciprocal(u_star, out=u_star)
u_star *= (u3 * wind_coef * 0.41)
return u_star
def rah_func(z_flt_dict, psi_z2, psi_z1, u_star):
"""
Parameters
----------
z_flt_dict : dict
psi_z2 : array_like
psi_z1 : array_like
u_star : array_like
Friction velocity [m s-1].
Returns
-------
rah : ndarray
Aerodynamic resistance to heat transport [s m-1].
Notes
-----
rah = ((log(z2 / z1) - psi_z2 + psi_z1) / (0.41 * u_star))
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
rah = np.array(psi_z1, copy=True, ndmin=1)
rah -= psi_z2
rah += math.log(z_flt_dict[2] / z_flt_dict[1])
rah /= 0.41
rah /= u_star
return rah
def density_func(elev, ts, dt):
"""
Parameters
----------
elev : array_like
Elevation [m].
ts : array_like
Surface temperature [K].
dt : array_like
Near surface temperature difference [K].
Returns
-------
air_density : ndarray
Air density [kg m-3].
Notes
-----
den = (1000. * 101.3 * (((293.15 - 0.0065 * elev) / 293.15) ** 5.26) /
(1.01 * (ts - dt) * 287))
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
air_density = np.array(elev, copy=True, ndmin=1).astype(np.float64)
air_density *= -0.0065
air_density += 293.15
air_density /= 293.15
np.power(air_density, 5.26, out=air_density)
air_density *= ((1000 * 101.3) / (1.01 * 287))
air_density /= (ts - dt)
return air_density.astype(np.float32)
def x_func(l, z):
"""
Parameters
----------
l : array_like
z : array_like
Returns
-------
ndarray
Notes
-----
x = np.where(l < 0, power((1 - 16 * z / l), 0.25), 0)
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
x = np.array(l, copy=True, ndmin=1)
l_mask = (x > 0)
np.reciprocal(x, out=x)
x *= (-16 * z)
x += 1
np.power(x, 0.25, out=x)
x[l_mask] = 0
del l_mask
return x
def psi_func(l, z_index, z):
"""
Parameters
----------
l : array_like
z_index : int
z : array_like
Returns
-------
ndarray
Notes
-----
psi(3) = np.where(
l > 0,
(-5 * 2 / l),
((2 * log((1 + x) / 2)) + log((1 + (x ** 2)) / 2) - (2 * atan(x)) + (pi / 2)))
psi = np.where(l > 0, (-5 * z / l), (2 * log((1 + (x ** 2)) / 2.)))
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
# Begin calculation of Psi unstable
x = x_func(l, z)
psi = np.array(x, copy=True, ndmin=1)
np.power(x, 2, out=psi)
psi += 1
psi /= 2.
oldsettings = np.geterr()
np.seterr(invalid='ignore')
np.log(psi, out=psi)
np.seterr(invalid=oldsettings['invalid'])
# Adjust Psi unstable calc based on height
if z_index == 3:
psi_temp = np.copy(x)
psi_temp += 1
psi_temp /= 2.
oldsettings = np.geterr()
np.seterr(invalid='ignore')
np.log(psi_temp, out=psi_temp)
np.seterr(invalid=oldsettings['invalid'])
psi_temp *= 2.
psi += psi_temp
del psi_temp
psi_temp = np.copy(x)
np.arctan(x, out=psi_temp)
psi_temp *= 2.
psi -= psi_temp
del psi_temp
psi += (0.5 * math.pi)
else:
psi *= 2.
del x
# Calculate Psi stable for all pixels
psi_stable = np.array(l, copy=True, ndmin=1)
np.reciprocal(psi_stable, out=psi_stable)
if z_index == 3:
psi_stable *= (-5 * 2)
else:
psi_stable *= (-5 * z)
# Only keep Psi stable for pixels with l > 0
l_mask = np.array(l, copy=True, ndmin=1) > 0
psi[l_mask] = psi_stable[l_mask]
return psi
# return np.where((l > 0), psi_stable, psi_unstable)
# The following equations are array specific and are separate from the
# "calibration" functions above
def dt_func(ts, a, b):
"""
Parameters
----------
ts : array_like
Surface temperature [K]. As described in [1]_, this should be the
delapsed surface temperature.
a : float
Calibration parameter.
b : float
Calibration parameter.
Returns
-------
ndarray
Notes
-----
dt = a * ts + b
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
dt = np.copy(ts)
dt *= a
dt += b
return dt
def l_func(dt, u_star, ts, rah):
"""
Parameters
----------
dt : array_like
Near surface temperature difference [K].
u_star : array_like
Friction velocity [m s-1].
ts : array_like
Surface temperature [K].
rah : array_like
Aerodynamic resistance to heat transport [s m-1].
Returns
-------
l : ndarray
Notes
-----
dt_mod = np.where((np.absolute(dt)==0.), -1000., dt)
l = -((u_star ** 3) * ts * rah) / (0.41 * 9.81 * dt_mod)
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
# Change zeros to -1000 to avoid divide by zero
dt[dt == 0] = -1000
l = np.power(u_star, 3)
l *= ts
l *= rah
l /= -(0.41 * 9.81)
l /= dt
return l
def le_func(rn, g, h):
"""Latent Heat Flux [W/m^2]
Parameters
----------
rn : array_like
Net radiation [W m-2].
g : array_like
Ground heat flux [W m-2].
h : array_like
Sensible heat flux into the air [W m-2]
Returns
-------
ndarray
Notes
-----
le = rn - g - h
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
le = np.copy(rn)
le -= g
le -= h
return le
def ef_func(le, rn, g):
"""Evaporative fraction
Parameters
----------
le : array_like
Latent heat flux [W m-2].
rn : array_like
Net radiation [W m-2].
g : array_like
Ground heat flux [W m-2].
Returns
-------
ndarray
Notes
-----
ef = le / (rn - g)
References
----------
.. [1] Bastiaanssen, W., Noordman, E., Pelgrum, H., Davids, G., Thoreson, B.,
Allen, R. (2005). SEBAL model with remotely sensed data to improve
water-resources management under actual field conditions.
Journal of Irrigation and Drainage Engineering, 131(1).
https://doi.org/10.1061/(ASCE)0733-9437(2005)131:1(85)
.. [2] Allen, R., Irmak, A., Trezza, R., Hendrickx, J., Bastiaanssen, W.,
& Kjaersgaard, J. (2011). Satellite-based ET estimation in agriculture
using SEBAL and METRIC. Hydrologic Processes, 25, 4011-4027.
https://doi.org/10.1002/hyp.8408
"""
ef = np.copy(rn)
ef -= g
np.reciprocal(ef, out=ef)
ef *= le
return ef
def heat_vaporization_func(ts):
"""Latent heat of vaporization [J kg-1]
Parameters
----------
ts : array_like
Surface temperature [K].
Returns
-------
ndarray
Notes
-----
lambda = (2.501 - 0.00236 * (ts - 273.15)) * 1E6
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
heat_vaporization = np.copy(ts).astype(np.float64)
heat_vaporization -= 273.15
heat_vaporization *= -0.00236
heat_vaporization += 2.501
heat_vaporization *= 1E6
return heat_vaporization.astype(np.float32)
def et_inst_func(le, ts):
"""ET instantaneous [mm/hr]
Parameters
----------
le : array_like
Latent heat flux [W m-2].
ts : array_like
Surface temperature [K].
Returns
-------
ndarray
Notes
-----
et_inst = 3600 * le / heat_vaporization
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
et_inst = np.copy(le).astype(np.float64)
et_inst *= 3600
et_inst /= heat_vaporization_func(ts)
return et_inst.astype(np.float32)
def etrf_func(et_inst, etr):
"""ET Reference Fraction - ETrF
Parameters
----------
et_inst : array_like
ET at time of overpass [mm hr-1].
etr : array_like
Reference ET at time of overpass [mm hr-1].
Returns
-------
array_like
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
return et_inst / etr
def et_24_func(etr_24hr, etrf):
"""ET 24hr [mm/day]
Parameters
----------
etr_24hr : array_like
Daily reference ET [mm].
etrf : array_like
Fraction of reference ET (ETrF).
Returns
-------
array_like
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
return etr_24hr * etrf
|
from PyQt5 import QtCore, QtGui, QtWidgets
class InstaLog(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(960, 540)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(960, 540))
MainWindow.setMaximumSize(QtCore.QSize(960, 540))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("script/gui/img/logo1.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet("background-color: rgb(245, 245, 245);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(320, 80, 300, 120))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setMinimumSize(QtCore.QSize(200, 120))
self.label_2.setMaximumSize(QtCore.QSize(300, 300))
self.label_2.setStyleSheet("image:url(script/gui/img/instagram letter icon1.png);")
self.label_2.setText("")
self.label_2.setScaledContents(True)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.textEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.textEdit_3.setGeometry(QtCore.QRect(280, 205, 400, 40))
self.textEdit_3.setMinimumSize(QtCore.QSize(400, 40))
self.textEdit_3.setMaximumSize(QtCore.QSize(400, 40))
self.textEdit_3.setStyleSheet(" border-radius: 10px;\n"
"\n"
"background-color: rgb(234, 234, 234);")
self.textEdit_3.setObjectName("textEdit_3")
self.textEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.textEdit_4.setGeometry(QtCore.QRect(280, 270, 400, 40))
self.textEdit_4.setMinimumSize(QtCore.QSize(400, 40))
self.textEdit_4.setMaximumSize(QtCore.QSize(400, 40))
self.textEdit_4.setEchoMode(QtWidgets.QLineEdit.Password)
self.textEdit_4.setStyleSheet("\n"
"border-radius: 10px;\n"
"background-color: rgb(234, 234, 234);")
self.textEdit_4.setObjectName("textEdit_4")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(350, 330, 251, 41))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("background-color: rgb(16, 140, 255);\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 10px;")
self.pushButton.setAutoDefault(False)
self.pushButton.setDefault(False)
self.pushButton.setFlat(False)
self.pushButton.setObjectName("pushButton")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Albie"))
self.textEdit_3.setPlaceholderText(_translate("MainWindow", "Username"))
self.textEdit_4.setPlaceholderText(_translate("MainWindow", "Password"))
self.pushButton.setText(_translate("MainWindow", "Log in"))
if __name__ == "__main__":
import sys
import os
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = InstaLog()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-18 14:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('numbas_lti', '0015_attempt_deleted'),
]
operations = [
migrations.AddField(
model_name='resource',
name='report_incomplete_marks',
field=models.BooleanField(default=True, verbose_name='Count scores for incomplete attempts?'),
),
migrations.AlterField(
model_name='resource',
name='include_incomplete_attempts',
field=models.BooleanField(default=True, verbose_name='Include incomplete attempts in grading?'),
),
migrations.AlterField(
model_name='resource',
name='show_incomplete_marks',
field=models.BooleanField(default=True, verbose_name='Show score of in-progress attempts to students?'),
),
]
|
#!/usr/bin/env python
import serial
import signal
import sys
import time
import re
import config as cfg
import os
# Pretty print for debug messages
from debug_message import DebugMessage
dm = DebugMessage(enable_logging=True)
is_running = True
def signal_handler(*args):
dm.print_warning("SIGINT detected, closing...")
global is_running
is_running = False
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def make_data_folder(base_path):
# Make a new dir to store data.
base_path = os.path.expanduser(base_path)
session_dir_name = time.strftime('%Y_%m_%d__%H_%M_%S_%p')
session_full_path = os.path.join(base_path, session_dir_name)
logging_path = session_full_path + "_imu.log"
dm.init_logging(logging_path)
if not os.path.exists(session_full_path):
os.makedirs(session_full_path)
return session_full_path
# Helper for rate limiting
def rate_limit(start, rate=0.5):
end = time.time()
delta = end - start
sleep_for = rate - delta
if sleep_for > delta:
time.sleep(sleep_for)
def setup_serial_ports():
port_in = serial.Serial(port=cfg.port_in,
baudrate= cfg.port_in_baud,
timeout=0.0)
port_out = serial.Serial(port=cfg.port_out,
baudrate= cfg.port_out_baud,
timeout=0.0)
imu_port = serial.Serial(port=cfg.imu_port,
baudrate=cfg.imu_baud,
timeout=0.0)
port_in.flush()
port_out.flush()
imu_port.flush()
dm.print_info("Serial port ready")
return port_in, port_out, imu_port
def send_vehicle_commands(old_steering, old_throttle, steering, throttle, port):
"""
Sends steering and throttle to the kart
Steering: Full CC - CW
(180 - 0)
Throttle: Dead - Full - Brake
(92 - 180 - <90)
"""
# dm.print_info("S: {} T: {}".format(steering, throttle))
# Steering
if old_steering != steering:
steering_out = ('S%d\n' % int(steering)).encode('ascii')
port.write(steering_out)
# Clamp throttle
if old_throttle != throttle:
if 88 <= throttle <= 92:
throttle = 90
else:
throttle = min(throttle, 110)
throttle_out = ('D%d\n' % int(throttle)).encode('ascii')
port.write(throttle_out)
port.flush()
buffer_in =''
buffer_out = ''
button_arduino_in = 0
button_arduino_out = 0
odometer_ticks=0
# def process_input(port_in, port_out):
# """Reads steering, throttle, aux1 and button data reported from the arduinos.
# Returns: (steering, throttle, button_arduino_in, button_arduino_out)
# Return values may be None if the data from the arduino isn't related to the
# steering or throttle.
# """
# # Input is buffered because sometimes partial lines are read
# global button_arduino_in, button_arduino_out, buffer_in, buffer_out, odometer_ticks, milliseconds
# try:
# buffer_in += port_in.read(port_in.in_waiting).decode('ascii')
# buffer_out += port_out.read(port_out.in_waiting).decode('ascii')
# except UnicodeDecodeError:
# # We can rarely get bad data over the serial port. The error looks like this:
# # buffer_in += port_in.read(port_in.in_waiting).decode('ascii')
# # UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 0: ordinal not in range(128)
# buffer_in = ''
# buffer_out = ''
# dm.print_warning("Mysterious serial port error. Let's pretend it didn't happen. :)")
# # Init steering, throttle and aux1.
# steering, throttle, aux1 = None, None, None
# # Read lines from input Arduino
# while '\n' in buffer_in:
# line, buffer_in = buffer_in.split('\n', 1)
# match = re.search(r'(\d+) (\d+) (\d+)', line)
# if match:
# steering = int(match.group(1))
# throttle = int(match.group(2))
# aux1 = int(match.group(3))
# if line[0:1] == 'S':
# # This is just a toggle button
# button_arduino_in = 1 - button_arduino_in
# # Read lines from output Arduino
# while '\n' in buffer_out:
# line, buffer_out = buffer_out.split('\n', 1)
# if line[0:3] == 'Mil':
# sp = line.split('\t')
# milliseconds = int(sp[1])
# odometer_ticks += 1
# if line[0:6] == 'Button':
# sp = line.split('\t')
# button_arduino_out = int(sp[1])
# return steering, throttle, aux1, button_arduino_in, button_arduino_out
def process_input(port_in, port_out):
"""Reads steering, throttle, aux1 and button data reported from the arduinos.
Returns: (steering, throttle, button_arduino_in, button_arduino_out)
Return values may be None if the data from the arduino isn't related to the
steering or throttle.
"""
# Input is buffered because sometimes partial lines are read
global button_arduino_in, button_arduino_out, buffer_in, buffer_out, odometer_ticks, milliseconds
try:
buffer_in += port_in.read(port_in.in_waiting).decode('ascii')
buffer_out += port_out.read(port_out.in_waiting).decode('ascii')
except UnicodeDecodeError:
# We can rarely get bad data over the serial port. The error looks like this:
# buffer_in += port_in.read(port_in.in_waiting).decode('ascii')
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 0: ordinal not in range(128)
buffer_in = ''
buffer_out = ''
print("Mysterious serial port error. Let's pretend it didn't happen. :)")
# Init steering, throttle and aux1.
steering, throttle, aux1 = None, None, None
telemetry = None
# Read lines from input Arduino
while '\n' in buffer_in:
line, buffer_in = buffer_in.split('\n', 1)
match = re.search(r'(\d+) (\d+) (\d+)', line)
if match:
steering = int(match.group(1))
throttle = int(match.group(2))
aux1 = int(match.group(3))
if line[0:1] == 'S':
# This is just a toggle button
button_arduino_in = 1 - button_arduino_in
print "ButtonAIn toggle"
# Read lines from output Arduino
while '\n' in buffer_out:
line, buffer_out = buffer_out.split('\n', 1)
if line[0:3] == 'Mil':
sp = line.split('\t')
milliseconds = int(sp[1])
odometer_ticks += 1
if line[0:6] == 'Button':
sp = line.split('\t')
button_arduino_out = int(sp[1])
return steering, throttle, aux1, button_arduino_in, button_arduino_out
imu_stream = ''
def process_imu(imu_port):
global imu_stream
try:
imu_stream += imu_port.read(imu_port.in_waiting).decode('ascii')
except UnicodeDecodeError:
imu_stream = ''
print("Imu stream read error")
telemetry = None
while '\n' in imu_stream:
line, imu_stream = imu_stream.split('\n', 1)
if line[0:3] == 'IMU':
# quat.xyzw, gyro.xyz, acc.xyz
# IMU -0.0233 -0.0109 -0.0178 0.9995 0.0000 0.0000 0.0000 0.0400 -0.0400 0.1900
sp = line.split(' ')
try:
quat = [float(sp[1]), float(sp[2]), float(sp[3]), float(sp[4])]
except:
quat = [0.0, 0.0, 0.0, 0.0]
try:
gyro = [float(sp[5]), float(sp[6]), float(sp[7])]
except:
gyro = [0.0, 0.0, 0.0]
try:
accel = [float(sp[8]), float(sp[9]), float(sp[10])]
except:
accel = [0.0, 0.0, 0.0]
telemetry = quat + gyro + accel
return telemetry
def main():
make_data_folder("./data")
dm.print_info("Starting carputer passthrough")
dm.print_info("Setting up serial ports")
port_in, port_out, imu_port = setup_serial_ports()
# Init values
steering = 0
steering_old = 0
throttle = 0
throttle_old = 0
aux = 0
aux_old = 1000
telemetry = ["0", "1", "2"]
global is_running
while is_running:
start = time.time()
# Get the commanded input from the arduino
new_steering, new_throttle, new_aux, b1, b2 = process_input(port_in, port_out)
telemetry = process_imu(imu_port)
# Check for valid input
if new_steering != None:
steering = new_steering
if new_throttle != None:
throttle = new_throttle
if new_aux != None:
aux = new_aux
if telemetry != None:
frames = [str(122).zfill(5)]
telemetry = frames + telemetry
dm.log_data(telemetry)
# dm.print_debug("S: {}, T: {}, aux: {}".format(steering, throttle, aux))
# Simple passthrough
send_vehicle_commands(steering_old, throttle_old, steering, throttle, port_out)
# Update the values
aux_old = aux
steering_old = steering
throttle_old = throttle
# Rate limit so we aren't destroying the CPU
rate_limit(start, 0.001)
if __name__ == "__main__":
main()
|
import os
import random
import string
from configparser import ConfigParser
import netifaces
from scale.logger import create_logger
from scale.network.node import Node
class VPNManager:
def __init__(self, config):
self.logger = create_logger('VPN')
self.config = config
self.nodes: list[Node] = []
self.iface = self.config.network['interface']
pass
# Read the interfaces from the host
def get_interfaces(self):
return netifaces.interfaces()
def bootstrap(self):
# Check if the VPN is running
if (len(self.config.network['passKey']) == 0):
self.logger.info('No passkey found. Preparing one')
self.config.network['passKey'] = self.generate_pass_key(32)
self.config.save()
if (len(self.config.network['publicKey']) < 1 or len(self.config.network['privateKey']) < 1):
self.logger.info('No keys found. Generating...')
self.generate_keys()
self.generate_wg_config()
self.logger.info('Bootstrapped VPN...')
def check_if_wg_running(self):
return self.get_interfaces().__contains__(self.iface)
def generate_wg_config(self) -> None:
config_path = os.path.join(
'/etc/wireguard', '{}.conf'.format(self.iface))
self.logger.info('Generating WG config [{}]'.format(config_path))
with open(config_path, 'w') as f:
iconfig = ConfigParser()
iconfig.optionxform = str
iconfig.add_section('Interface')
iconfig.set('Interface', 'PrivateKey',
self.config.network['privateKey'])
iconfig.set('Interface', 'Address ', '10.0.1.1/24')
iconfig.set('Interface', 'ListenPort ', str(
self.config.network['discoveryPort'] - 1))
for node in self.nodes:
iconfig.add_section('Interface')
iconfig.set('Interface', 'PublicKey', node.public_key)
# TODO: Add support for multiple interfaces~
for iface in node.interfaces:
if iface.name == self.iface:
iconfig.set('Interface', 'Address', iface.ip)
iconfig.write(f)
pass
def generate_pass_key(self, length):
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
def generate_keys(self):
privateKey = os.popen('wg genkey').read().replace('\n', '')
publicKey = os.popen('echo {} | wg pubkey'.format(
privateKey)).read().replace('\n', '')
self.config.network['privateKey'] = privateKey
self.config.network['publicKey'] = publicKey
self.logger.info('Private key: {}'.format(privateKey))
self.logger.info('Public key: {}'.format(publicKey))
self.config.save()
pass
def connect(self):
# Restart with the new config
if self.check_if_wg_running():
self.stop()
self.generate_wg_config()
exit_code = os.system('wg-quick up {}'.format(self.iface))
if (exit_code == 0):
self.logger.info('WireGuard started')
else:
self.logger.fatal('WireGuard failed to start')
def stop(self):
exit_code = os.system(
'wg-quick down {}'.format(self.iface))
if (exit_code == 0):
self.logger.info('WireGuard stopped')
else:
self.logger.fatal('WireGuard failed to stop')
|
import re
import htmlgenerator as hg
from django import forms
from django.contrib.auth.decorators import user_passes_test
from django.utils.translation import gettext_lazy as _
from django_celery_results.models import TaskResult
from bread import layout
from bread.layout import admin
from bread.layout.components.datatable import DataTableColumn
from bread.utils.urls import aslayout
from bread.views import BrowseView
R = layout.grid.Row
C = layout.grid.Col
F = layout.forms.FormField
TR = layout.datatable.DataTable.row
TD = layout.datatable.DataTableColumn
@user_passes_test(lambda user: user.is_superuser)
@aslayout
def maintenancesettings(request):
# Add the view's header
ret = layout.grid.Grid(R(C(hg.H3(_("Maintenance")))), gutter=False)
# Add the Package Information modal
ret.append(
R(
C(
hg.H4(_("Packages")),
admin.maintainance_package_layout(request),
),
C(
hg.H4(_("Optimize database")),
admin.maintenance_database_optimization(request),
hg.H4(_("Rebuild search index"), _style="margin-top: 3rem;"),
admin.maintenance_search_reindex(request),
),
)
)
return ret
@aslayout
def widgetpreview(request):
class ConfigForm(forms.Form):
with_label = forms.BooleanField(required=False)
with_helptext = forms.BooleanField(required=False)
with_errors = forms.BooleanField(required=False)
disabled = forms.BooleanField(required=False)
CHOICES = (
("choice1", "Choice 1"),
("choice2", "Choice 2"),
("choice3", "Choice 3"),
("choice4", "Choice 4"),
)
widgets = {
forms.TextInput: (forms.CharField, {"widget": forms.TextInput}),
forms.NumberInput: (forms.DecimalField, {"widget": forms.NumberInput}),
forms.EmailInput: (forms.EmailField, {"widget": forms.EmailInput}),
forms.URLInput: (forms.URLField, {"widget": forms.URLInput}),
forms.PasswordInput: (forms.CharField, {"widget": forms.PasswordInput}),
forms.HiddenInput: (forms.CharField, {"widget": forms.HiddenInput}),
forms.DateInput: (forms.DateField, {"widget": forms.DateInput}),
forms.DateTimeInput: (forms.DateTimeField, {"widget": forms.DateTimeInput}),
forms.TimeInput: (forms.TimeField, {"widget": forms.TimeInput}),
forms.Textarea: (forms.CharField, {"widget": forms.Textarea}),
forms.CheckboxInput: (forms.BooleanField, {"widget": forms.CheckboxInput}),
forms.Select: (forms.ChoiceField, {"widget": forms.Select, "choices": CHOICES}),
forms.NullBooleanSelect: (
forms.NullBooleanField,
{"widget": forms.NullBooleanSelect},
),
forms.SelectMultiple: (
forms.MultipleChoiceField,
{"widget": forms.SelectMultiple, "choices": CHOICES},
),
forms.RadioSelect: (
forms.ChoiceField,
{"widget": forms.RadioSelect, "choices": CHOICES},
),
forms.CheckboxSelectMultiple: (
forms.ChoiceField,
{"widget": forms.CheckboxSelectMultiple, "choices": CHOICES},
),
forms.FileInput: (forms.FileField, {"widget": forms.FileInput}),
forms.ClearableFileInput: (
forms.FileField,
{"widget": forms.ClearableFileInput},
),
}
HELPTEXT = "This is a piece of helptext, maximized for helpfulness"
ERRORS = [
"This is an example of an error",
"This is a second errors, but actually none of them are real errors, so do not worry",
]
def nicefieldname(cls):
return re.sub(r"(?<!^)(?=[A-Z])", "_", cls.__name__)
configform = ConfigForm(request.GET)
if not configform.is_valid() or not request.GET:
config = configform.initial
config = configform.cleaned_data
Form = type(
"Form",
(forms.Form,),
{
nicefieldname(widget): field[0](
**field[1],
**({"help_text": HELPTEXT} if config["with_helptext"] else {}),
disabled=config["disabled"]
)
for widget, field in widgets.items()
},
)
return hg.BaseElement(
hg.H3(_("Widget preview")),
layout.grid.Grid(
layout.grid.Row(
layout.grid.Col(
hg.H4(_("Widgets")),
layout.forms.Form(
Form(),
*[
F(
nicefieldname(w),
no_label=not config["with_label"],
errors=ERRORS if config["with_errors"] else None,
)
for w in widgets.keys()
]
),
),
layout.grid.Col(
hg.H4(_("Configure preview")),
layout.forms.Form(
configform,
F("with_label"),
F("with_helptext"),
F("with_errors"),
F("disabled"),
layout.forms.helpers.Submit(_("Apply")),
method="GET",
),
),
)
),
)
class TaskResultBrowseView(BrowseView):
columns = [
DataTableColumn(
layout.ObjectFieldLabel("task_id", TaskResult),
hg.DIV(
hg.C("row.task_id"),
),
"task_id",
),
DataTableColumn(
layout.ObjectFieldLabel("task_name", TaskResult),
hg.DIV(
hg.C("row.task_name"),
),
"task_name",
),
DataTableColumn(
_("Date Created"),
hg.DIV(
hg.C("row.date_created"),
),
"date_created",
),
DataTableColumn(
_("Date Completed"),
hg.DIV(
hg.C("row.date_done"),
),
"date_done",
),
"status",
"worker",
"content_type",
DataTableColumn(
_("Metadata"),
hg.DIV(
hg.C("row.meta"),
),
),
]
rowclickaction = BrowseView.gen_rowclickaction("read")
title = "Background Jobs"
|
# IMPORTATION STANDARD
from datetime import datetime
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.screener import yahoofinance_view
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "1598220000"),
("period2", "1635980400"),
],
}
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_historical(mocker):
# FORCE SINGLE THREADING
yf_download = yahoofinance_view.yf.download
def mock_yf_download(*args, **kwargs):
kwargs["threads"] = False
return yf_download(*args, **kwargs)
mocker.patch(
"openbb_terminal.stocks.screener.yahoofinance_view.yf.download",
side_effect=mock_yf_download,
)
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
# MOCK EXPORT_DATA
mocker.patch(
target="openbb_terminal.stocks.screener.finviz_view.export_data",
)
# MOCK PROGRESS_BAR
mocker.patch(
target="finvizfinance.screener.overview.progress_bar",
)
# MOCK EXPORT_DATA
mocker.patch(
target="random.shuffle",
)
yahoofinance_view.historical(
preset_loaded="top_gainers",
limit=2,
start=datetime.strptime("2022-01-03", "%Y-%m-%d"),
type_candle="a",
normalize=True,
export="",
)
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_historical_no_d_signals(mocker):
# FORCE SINGLE THREADING
yf_download = yahoofinance_view.yf.download
def mock_yf_download(*args, **kwargs):
kwargs["threads"] = False
return yf_download(*args, **kwargs)
mocker.patch(
"openbb_terminal.stocks.screener.yahoofinance_view.yf.download",
side_effect=mock_yf_download,
)
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
# MOCK EXPORT_DATA
mocker.patch(
target="openbb_terminal.stocks.screener.finviz_view.export_data",
)
# MOCK PROGRESS_BAR
mocker.patch(
target="finvizfinance.screener.overview.progress_bar",
)
# MOCK EXPORT_DATA
mocker.patch(
target="random.shuffle",
)
# MOCK D_SIGNALS
mocker.patch.object(
target=yahoofinance_view.finviz_model,
attribute="d_signals",
new=[],
)
yahoofinance_view.historical(
preset_loaded="oversold",
limit=2,
start=datetime.strptime("2022-01-03", "%Y-%m-%d"),
type_candle="a",
normalize=True,
export="",
)
|
#!/usr/bin/env python
from setuptools import setup
setup(
entry_points="""
[nose.plugins]
pylons = pylons.test:PylonsPlugin
"""
)
|
from ..utils import Object
class AuthenticationCodeTypeSms(Object):
"""
An authentication code is delivered via an SMS message to the specified phone number
Attributes:
ID (:obj:`str`): ``AuthenticationCodeTypeSms``
Args:
length (:obj:`int`):
Length of the code
Returns:
AuthenticationCodeType
Raises:
:class:`telegram.Error`
"""
ID = "authenticationCodeTypeSms"
def __init__(self, length, **kwargs):
self.length = length # int
@staticmethod
def read(q: dict, *args) -> "AuthenticationCodeTypeSms":
length = q.get('length')
return AuthenticationCodeTypeSms(length)
|
class StoreRequest(object):
def __init__(self):
self.op = None
self.records = None
self.filename = None
def getOp(self):
return self.op
def setOp(self, op):
self.op = op
def getRecords(self):
return self.records
def setRecords(self, records):
self.records = records
def getFilename(self):
return self.filename
def setFilename(self, filename):
self.filename = filename
|
from .. import db
import datetime
class AttemptsModel(db.Model):
"""
[summary]
Args:
AttemptsMixin ([type]): [description]
db ([type]): [description]
"""
__tablename__ = 'attempts'
id = db.Column(db.Integer, primary_key=True)
max_score = db.Column(db.Integer, nullable=False)
created_at = db.Column(db.DateTime(timezone=True),
nullable=False, default=datetime.datetime.now())
contest_id = db.Column(db.Integer, nullable=False)
challenge_id = db.Column(db.Integer, nullable=True)
user_id = db.Column(db.Integer, db.ForeignKey(
'users.id'), nullable=False)
submission_id = db.Column(db.Integer, db.ForeignKey(
'submissions.id'), nullable=False)
|
#!/usr/bin/env python
"""
_SiblingSubscriptionsComplete_
MySQL implementation of Subscription.SiblingSubscriptionsComplete
"""
from WMCore.Database.DBFormatter import DBFormatter
class SiblingSubscriptionsComplete(DBFormatter):
"""
For each file in the input fileset count the number of subscriptions
(on the same input fileset) that have completed the file. If the number
of subscriptions that have completed the file is the same as the number
of subscriptions that processed the file (not counting this subscription)
we can say that processing of the file is complete and we can perform
some other action on it (usually deletion).
"""
sql = """SELECT wmbs_file_details.id,
wmbs_file_details.events,
wmbs_file_details.lfn,
wmbs_pnns.pnn
FROM (
SELECT wmbs_sub_files_available.fileid
FROM wmbs_sub_files_available
INNER JOIN wmbs_subscription ON
wmbs_subscription.id = wmbs_sub_files_available.subscription
LEFT OUTER JOIN wmbs_subscription sibling_subscription ON
sibling_subscription.fileset = wmbs_subscription.fileset AND
sibling_subscription.id != wmbs_subscription.id
LEFT OUTER JOIN wmbs_sub_files_complete ON
wmbs_sub_files_complete.fileid = wmbs_sub_files_available.fileid AND
wmbs_sub_files_complete.subscription = sibling_subscription.id
WHERE wmbs_sub_files_available.subscription = :subscription
GROUP BY wmbs_sub_files_available.fileid
HAVING COUNT(sibling_subscription.id) = COUNT(wmbs_sub_files_complete.fileid)
) available_files
INNER JOIN wmbs_file_details ON
wmbs_file_details.id = available_files.fileid
INNER JOIN wmbs_file_location ON
wmbs_file_location.fileid = available_files.fileid
INNER JOIN wmbs_pnns ON
wmbs_file_location.pnn = wmbs_pnns.id
"""
def execute(self, subscription, conn=None, transaction=False):
results = self.dbi.processData(self.sql, {'subscription': subscription},
conn=conn, transaction=transaction)
return self.formatDict(results)
|
import pytest
from lstchain.io import EventSelector, DL3FixedCuts, DataBinning
import numpy as np
import pandas as pd
from astropy.table import QTable
import astropy.units as u
def test_event_selection():
evt_fil = EventSelector()
data_t = QTable(
{
"a": u.Quantity([1, 2, 3], unit=u.kg),
"b": u.Quantity([np.nan, 2.2, 3.2], unit=u.m),
"c": u.Quantity([1, 3, np.inf], unit=u.s),
}
)
evt_fil.filters = dict(a=[0, 2.5], b=[0, 3], c=[0, 4])
evt_fil.finite_params = ["b"]
data_t = evt_fil.filter_cut(data_t)
data_t_df = evt_fil.filter_cut(data_t.to_pandas())
np.testing.assert_array_equal(
data_t_df, pd.DataFrame({"a": [2], "b": [2.2], "c": [3]})
)
np.testing.assert_array_equal(
data_t,
QTable(
{
"a": u.Quantity([2], unit=u.kg),
"b": u.Quantity([2.2], unit=u.m),
"c": u.Quantity([3], unit=u.s),
}
),
)
def test_dl3_fixed_cuts():
temp_cuts = DL3FixedCuts()
temp_cuts.fixed_gh_cut = 0.7
temp_cuts.fixed_theta_cut = 0.2
temp_cuts.allowed_tels = [1, 2]
temp_data = QTable({
"gh_score": u.Quantity(np.arange(0.1, 1.1, 0.1)),
"theta": u.Quantity(np.arange(0., 1., 0.1), unit=u.deg),
"tel_id": u.Quantity([1, 1, 2, 2, 1, 2, 1, 3, 4, 5])
})
assert len(temp_cuts.gh_cut(temp_data)) == 4
assert len(temp_cuts.theta_cut(temp_data)) == 2
assert len(temp_cuts.allowed_tels_filter(temp_data)) == 7
def test_data_binning():
tempbin = DataBinning()
tempbin.true_energy_min = 0.01
tempbin.true_energy_max = 100
tempbin.true_energy_n_bins_per_decade = 5.5
tempbin.reco_energy_min = 0.01
tempbin.reco_energy_max = 100
tempbin.reco_energy_n_bins_per_decade = 5.5
tempbin.energy_migration_min = 0.2
tempbin.energy_migration_max = 5
tempbin.energy_migration_n_bins = 15
tempbin.fov_offset_min = 0.1
tempbin.fov_offset_max = 1.1
tempbin.fov_offset_n_edges = 9
tempbin.bkg_fov_offset_min = 0
tempbin.bkg_fov_offset_max = 10
tempbin.bkg_fov_offset_n_edges = 11
tempbin.source_offset_min = 0
tempbin.source_offset_max = 1.0001
tempbin.source_offset_n_edges = 1001
e_true = tempbin.true_energy_bins()
e_reco = tempbin.reco_energy_bins()
e_migra = tempbin.energy_migration_bins()
fov_off = tempbin.fov_offset_bins()
bkg_fov = tempbin.bkg_fov_offset_bins()
src_off = tempbin.source_offset_bins()
assert len(e_true) == 22
assert len(e_reco) == 22
assert len(e_migra) == 15
assert len(fov_off) == 9
assert len(bkg_fov) == 11
assert len(src_off) == 1001
|
import numpy as np
from random import random
from math import log, ceil
from time import time, ctime
class Hyperband:
def __init__(self, data, get_params_function, try_params_function, max_iter=81):
self.data = data
self.get_params = get_params_function
self.try_params = try_params_function
self.max_iter = max_iter # maximum iterations per configuration
self.eta = 3 # defines configuration downsampling rate (default = 3)
self.logeta = lambda x: log(x) / log(self.eta)
self.s_max = int(self.logeta(self.max_iter))
self.B = (self.s_max + 1) * self.max_iter
self.results = [] # list of dicts
self.counter = 0
self.best_loss = np.inf
self.best_counter = -1
# can be called multiple times
def run(self, skip_last=0, dry_run=False):
for s in reversed(range(self.s_max + 1)):
# initial number of configurations
n = int(ceil(self.B / self.max_iter / (s + 1) * self.eta ** s))
# initial number of iterations per config
r = self.max_iter * self.eta ** (-s)
# n random configurations
T = [self.get_params() for i in range(n)]
for i in range((s + 1) - int(skip_last)): # changed from s + 1
# Run each of the n configs for <iterations>
# and keep best (n_configs / eta) configurations
n_configs = n * self.eta ** (-i)
n_iterations = r * self.eta ** (i)
print("\n*** {} configurations x {:.1f} iterations each".format(
n_configs, n_iterations))
val_losses = []
early_stops = []
for t in T:
self.counter += 1
print("\n{} | {} | lowest loss so far: {:.4f} (run {})\n".format(
self.counter, ctime(), self.best_loss, self.best_counter))
start_time = time()
if dry_run:
result = {'loss': random(), 'log_loss': random(), 'auc': random()}
else:
result = self.try_params(self.data, n_iterations, t) # <---
assert (type(result) == dict)
assert ('loss' in result)
seconds = int(round(time() - start_time))
print("\n{} seconds.".format(seconds))
loss = result['loss']
val_losses.append(loss)
early_stop = result.get('early_stop', False)
early_stops.append(early_stop)
# keeping track of the best result so far (for display only)
# could do it be checking results each time, but hey
if loss < self.best_loss:
self.best_loss = loss
self.best_counter = self.counter
result['counter'] = self.counter
result['seconds'] = seconds
result['params'] = t
result['iterations'] = n_iterations
self.results.append(result)
# select a number of best configurations for the next loop
# filter out early stops, if any
indices = np.argsort(val_losses)
T = [T[i] for i in indices if not early_stops[i]]
T = T[0:int(n_configs / self.eta)]
return self.results
|
from contextlib import contextmanager
from pathlib import Path
from typing import IO, Any, Iterator, List, Optional, Tuple, Union, cast
from .gettable import Gettable
from .padded_text_file import SplittedPaddedTextFile
class ColumnNotFoundError(Exception):
pass
class _PaddedCSVFile(Gettable):
"""Represent a padded CSV file, where lines are reachable with O(1) complexity.
A padded CSV file is a CSV file where all lines have exactly the same length.
In general, lines are right padded with white spaces.
The last line MUST also contain a carriage return.
Only line(s) you request will be load in memory.
Usage:
padded_csv_file = _PaddedCSVFile(<files_descriptor_and_size>,
<file_zise>,
<column_and_type_tuples>
)
Example: With the following file represented by <files_descriptor_and_size>:
a,b,c,d
1,2,3,4
5,6,7,8
9,10,11,12
13,14,15,16
17,18,19,20
padded_csv_file = _PaddedCSVFile(<files_descriptor_and_size>,
<file_size>,
[("d", int), ("b", int)]
)
# Get the number of lines
len(padded_csv_file) # = 5
# Get the third line of the file
padded_csv_file[2] # = [12, 10]
# Get the last line of the file
padded_csv_file[-1] # = [20, 18]
# Get an iterator on lines between the third line (included) and the last line
# (excluded)
padded_csv_file.get(start=2, stop=-1)
# Get all lines between the third line (included) and the last line (excluded)
# Warning: All lines in the selected range will be loaded into memory.
# For example: padded_csv_file[:] will load all the file in memory.
# If possible, use padded_csv_file.get(start=a, stop=b) instead of
# padded_csv_file[a, b]
padded_csv_file[2:-1] # = [[12, 10], [16, 14]]
"""
def __init__(
self,
files_descriptor_and_size: List[Tuple[IO, int]],
columns_and_types: List[Tuple[str, type]],
unwrap_if_one_column=False,
) -> None:
"""Constructor.
files_descriptor_and_size:
A liste of tuples like:
- The file descriptor pointing to the padded CSV file
- The size (in bytes) of th padded CSV file pointed by the file
descriptor
columns_and_types: A list of tuples where each tuple has:
- The name of the column
- The type of the column
unwrap_if_one_column: Unwrap if only one column unwrap result.
Exemple: Instead of returning [[4], [5], [2]] return
[4, 5, 2]
If at least one line of the file pointed by `file_descriptor` has not the same
length than others, a `TextFileNotPaddedError` is raised.
"""
padded_text_file = SplittedPaddedTextFile(files_descriptor_and_size, offset=0)
header_line = cast(str, padded_text_file[0])
headers = header_line.split(",")
if columns_and_types == []:
raise ValueError("`column_and_type` is an empty list")
columns, _ = zip(*columns_and_types)
if not set(columns) <= set(headers):
raise ColumnNotFoundError(
"At least one column specified in `column_to_type` in not present in "
"the file"
)
header_to_index = {header: index for index, header in enumerate(headers)}
self.__column_indexes_type = [
(header_to_index[column], type) for column, type in columns_and_types
]
self.__padded_text_file = SplittedPaddedTextFile(
files_descriptor_and_size, offset=1
)
_, *others = columns_and_types
self.__has_to_unwrap = unwrap_if_one_column and others == []
def __len__(self):
"""Return the number of lines of the file (excluding the header)."""
return len(self.__padded_text_file)
def __getitem__(
self, line_number_or_slice: Union[int, slice]
) -> Union[Any, List, List[List]]:
"""Get given values or a given slice of values.
line_number_or_slice: The line number or the slice where values will be
retrieved
"""
def handle_line_number(line_number: int) -> Union[Any, List]:
line = cast(str, self.__padded_text_file[line_number])
items = line.split(",")
return self.__unwrap_if_needed_single(
[type(items[index]) for index, type in self.__column_indexes_type]
)
def handle_slice(slice: slice) -> List[Union[Any, List]]:
return self.__unwrap_if_needed_multi(
[
[
type(items.split(",")[index])
for index, type in self.__column_indexes_type
]
for items in self.__padded_text_file[slice]
]
)
if isinstance(line_number_or_slice, int):
return handle_line_number(line_number_or_slice)
elif isinstance(line_number_or_slice, slice):
return handle_slice(line_number_or_slice)
def __unwrap_if_needed_single(self, items: List) -> Union[List, Any]:
if self.__has_to_unwrap:
item, *trash = items
assert trash == []
return item
return items
def __unwrap_if_needed_multi(self, items: List[List]) -> List:
return (
[item for sublist in items for item in sublist]
if self.__has_to_unwrap
else items
)
def get(
self, start: Optional[int] = None, stop: Optional[int] = None
) -> Iterator[List]:
"""Return an iterator on a given slice of lines.
start: The first line of slice (included)
stop : The last line of slice (excluded)
"""
for line in self.__padded_text_file.get(start, stop):
items = line.split(",")
toto = [type(items[index]) for index, type in self.__column_indexes_type]
yield self.__unwrap_if_needed_single(toto)
@contextmanager
def padded_csv_file(
path: Path, columns_and_types: List[Tuple[str, type]]
) -> Iterator[_PaddedCSVFile]:
"""Represent a padded CSV file, where lines are reachable with O(1) complexity.
A padded CSV file is a CSV file where all lines have exactly the same length.
In general, lines are right padded with white spaces.
The last line MUST also contain a carriage return.
Only line(s) you request will be load in memory.
Usage:
with padded_csv_file(<file_path>, <columns_and_types>) as pcf:
...
Example: With the following file represented by <file_descriptor>:
a,b,c,d
1,2,3,4
5,6,7,8
9,10,11,12
13,14,15,16
17,18,19,20
with padded_csv_file(<file_path>, [("d", int), ("b", int)]) as pcf:
# Get the number of lines
len(pcf) # = 5
# Get the third line of the file
pcf[2] # = [12, 10]
# Get the last line of the file
pcf[-1] # = [20, 18]
# Get an iterator on lines between the third line (included) and the last line
# (excluded)
pcf.get(start=2, stop=-1)
# Get all lines between the third line (included) and the last line (excluded)
# Warning: All lines in the selected range will be loaded into memory.
# For example: padded_csv_file[:] will load all the file in memory.
# If possible, use pcf.get(start=a, stop=b) instead of
# pcf[a, b]
pcf[2:-1] # = [[12, 10], [16, 14]]
"""
with path.open() as file_descriptor:
yield _PaddedCSVFile(
[(file_descriptor, path.stat().st_size)], columns_and_types
)
|
load("//bazel/rules/cpp:main.bzl", "cpp_main")
load("@rules_pkg//:pkg.bzl", "pkg_deb", "pkg_tar")
load("//bazel/rules/data:package_data.bzl", "package_data")
def distributable_data(name, description, file_groups):
EVERYTHING_EXTENSION = "-debian-all"
MAINTAINER = "Trevor Hickey <TrevorJamesHickey@gmail.com>"
DEFAULT_VERSION = "1.0"
DEFAULT_HOMEPAGE = "none"
DATA_TARGET = ":" + name + "-data"
package_data(
name = name,
file_groups = file_groups,
)
all_name = name + EVERYTHING_EXTENSION
pkg_deb(
name = all_name,
data = DATA_TARGET,
package = name,
architecture = "all",
maintainer = MAINTAINER,
version = DEFAULT_VERSION,
description = description,
homepage = DEFAULT_HOMEPAGE,
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pytext.exporters.exporter import ModelExporter
__all__ = ["ModelExporter"]
|
from tzlocal import get_localzone
def add_local_tz(date):
''' Add the local time zone to the given date and returns a new date.
Parameters
----------
date : :obj:`datetime`
Date to which to adjust with the local time zone.
Returns
-------
:obj:`datetime`
Date adjusted with local timezone.
'''
tz = get_localzone()
return tz.fromutc(date.replace(tzinfo=tz))
def create_option_id_filter(option_type, underlying_id, expiry_date,
min_strike_price, max_strike_price):
''' Simple utility to generate an OptionIdFilter structure.
Parameters
----------
option_type : :obj:`str`, {'Call', 'Put'}
Option type.
underlying_id : :obj:`int`
Underlying ID.
expiry_date : :obj:`datetime`
Expiry date.
min_strike_price : :obj:`double`
Min strike price.
max_strike_price : :obj:`double`
Max strike price.
Note
----
More details on allowed `option_type` values can be found `here
<https://www.questrade.com/api/documentation/rest-operations/enumerat \
ions/enumerations#option-type>`__.
Returns
-------
:obj:`dict`
OptionIdFilter structure.
'''
option_id_filter = {
'optionType': option_type,
'underlyingId': underlying_id,
'expiry_date': expiry_date,
'minStrikePrice': min_strike_price,
'maxStrikePrice': maxStrikePrice
}
return option_id_filter
def create_strategy_variant_request(variant_id, strategy, legs):
''' Simple utility to generate a StrategyVariantRequest structure.
Parameters
----------
variant_id : :obj:`int`
Variant ID.
strategy : :obj:`str`, {'CoveredCall', 'MarriedPuts', \
'VerticalCallSpread', 'VerticalPutSpread', 'CalendarCallSpread', \
'CalendarPutSpread', 'DiagonalCallSpread', 'DiagonalPutSpread', 'Collar', \
'Straddle', 'Strangle', 'ButterflyCall', 'ButterflyPut', 'IronButterfly', \
'CondorCall', 'Custom'}
Strategy type.
legs : :obj:`list` of :obj:`dict`
List of StrategyVariantLeg structures.
Note
----
More details on allowed `strategy` values can be found `here \
<https://www.questrade.com/api/documentation/rest-operations/enumerations/ \
enumerations#strategy-types>`__.
Returns
-------
:obj:`dict`
StrategyVariantRequest structure.
'''
strategy_variant_request = {
'variantId': variant_id,
'strategy': strategy,
'legs': legs
}
return strategy_variant_request
def create_strategy_variant_leg(symbol_id, action, ratio):
''' Simple utility function to generate a StrategyVariantLeg structure.
Parameters
----------
symbolId : :obj:`int`
Internal symbol identifier.
action : :obj:`str`, {'Buy', 'Sell'}
Order side.
ratio : :obj:`int`
Numeric ration of the leg strategy.
Note
----
More details on allowed `action` values can be found `here
<https://www.questrade.com/api/documentation/rest-operations/enumerations/ \
enumerations#order-action>`__.
Returns
-------
:obj:`dict`
StrategyVariantLeg structure.
'''
strategy_variant_leg = {
'symbolId': symbol_id,
'acton': action,
'ratio': ratio
}
return strategy_variant_leg
def create_bracket_order_component(quantity, action, limit_price, stop_price,
order_type, time_in_force, order_class,
order_id=0):
''' Simple utility to generate a BracketOrderComponent structure.
Parameters
----------
quantity : :obj:`double`
Order quantity.
action : :obj:`str`, {'Buy', 'Sell'}
Order side.
limit_price : :obj:`double`
Limit price.
stop_price : :obj:`double`
Stop price.
order_type : :obj:`str`, {'Market', 'Limit', 'Stop', 'StopLimit', \
'TrailStopInPercentage', 'TrailStopInDollar', \
'TrailStopLimitInPercentage', 'TrailStopLimitInDollar', 'LimitOnOpen', \
'LimitOnClose'}
Order type.
time_in_force : :obj:`str`, {'Day', 'GoodTillCanceled', \
'GoodTillExtendedDay', 'GoodTillDate', 'ImmediateOrCancel', 'FillOrKill'}
Order duration.
order_class : :obj:`str`, {'Primary', 'Limit', 'StopLoss'}
Type of component
Note
----
More details on allowed `action`, `order_type`, `time_in_force` and
`order_class` can be found `here <https://www.questrade.com/api/ \
documentation/rest-operations/enumerations/enumerations>`__
Returns
-------
:obj:`dict`
BracketOrderComponent structure.
'''
bracket_order_component = {
'orderId': order_id,
'quantity': quantity,
'action': action,
'limitPrice': limit_price,
'stopPrice': stop_price,
'orderType': order_type,
'timeInForce': time_in_force,
'orderClass': order_class
}
return bracket_order_component
def create_insert_order_leg_data(symbol_id, action, leg_quantity):
''' Simple utililty function to generate a InsertOrderLegData structure.
Parameters
----------
symbol_id : :obj:`int`
Internal symbol identifier.
action : :obj:`str`, {'Buy', 'Sell'}
Leg action.
leg_quantity : :obj:`int`
Leg quantity.
Note
----
More details on allowed `action` values can be found `here
<https://www.questrade.com/api/documentation/rest-operations/enumerations/ \
enumerations#order-action>`__.
Returns
-------
:obj:`dict`
InsertOrderLegData structure.
'''
insert_order_leg_data = {
'symbolId': symbol_id,
'action': action,
'legQuantity': leg_quantity
}
return insert_order_leg_data
|
#
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from xbb_tools.cluster_startup import attach_to_cluster
from xbb_tools.utils import (
benchmark,
tpcxbb_argparser,
run_query,
train_clustering_model
)
from dask import delayed
# -------- Q25 -----------
# -- store_sales and web_sales date
q25_date = "2002-01-02"
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
def get_clusters(client, ml_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in ml_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = ml_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Based on CDH6.1 q25-result formatting
results_dict["cid_labels"] = output
return results_dict
def read_tables(data_dir, bc):
bc.create_table("web_sales", data_dir + "web_sales/*.parquet")
bc.create_table("store_sales", data_dir + "store_sales/*.parquet")
bc.create_table("date_dim", data_dir + "date_dim/*.parquet")
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
WITH concat_table AS
(
(
SELECT
ss_customer_sk AS cid,
count(distinct ss_ticket_number) AS frequency,
max(ss_sold_date_sk) AS most_recent_date,
CAST( SUM(ss_net_paid) AS DOUBLE) AS amount
FROM store_sales ss
JOIN date_dim d ON ss.ss_sold_date_sk = d.d_date_sk
WHERE CAST(d.d_date AS DATE) > DATE '{q25_date}'
AND ss_customer_sk IS NOT NULL
GROUP BY ss_customer_sk
) union all
(
SELECT
ws_bill_customer_sk AS cid,
count(distinct ws_order_number) AS frequency,
max(ws_sold_date_sk) AS most_recent_date,
CAST( SUM(ws_net_paid) AS DOUBLE) AS amount
FROM web_sales ws
JOIN date_dim d ON ws.ws_sold_date_sk = d.d_date_sk
WHERE CAST(d.d_date AS DATE) > DATE '{q25_date}'
AND ws_bill_customer_sk IS NOT NULL
GROUP BY ws_bill_customer_sk
)
)
SELECT
cid AS cid,
CASE WHEN 37621 - max(most_recent_date) < 60 THEN 1.0
ELSE 0.0 END AS recency, -- 37621 == 2003-01-02
CAST( SUM(frequency) AS BIGINT) AS frequency, --total frequency
CAST( SUM(amount) AS DOUBLE) AS amount --total amount
FROM concat_table
GROUP BY cid
ORDER BY cid
"""
cluster_input_ddf = bc.sql(query)
# Prepare df for KMeans clustering
cluster_input_ddf["recency"] = cluster_input_ddf["recency"].astype("int64")
cluster_input_ddf = cluster_input_ddf.repartition(npartitions=1)
cluster_input_ddf = cluster_input_ddf.persist()
cluster_input_ddf = cluster_input_ddf.set_index('cid')
results_dict = get_clusters(client=client, ml_input_df=cluster_input_ddf)
return results_dict
if __name__ == "__main__":
config = tpcxbb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
|
def format_date(date):
"""format the date of blog to correct format"""
date = date.strftime('%H:%M:%S %m/%d/%Y')
return date
|
import pandas as pd
from datetime import datetime, timedelta
class JHUData(object):
url_pattern = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/"
"csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-{}.csv"
)
def __init__(self, refresh_rate=30):
self.refresh_rate = timedelta(minutes=refresh_rate)
self.data_sources = {
r: self.url_pattern.format(r) for r in ["Confirmed", "Deaths", "Recovered"]
}
self.auto_refresh(force=True)
def load_data(self):
df_list = [
pd.read_csv(data).assign(Record=record)
for record, data in self.data_sources.items()
]
df = pd.concat(df_list, ignore_index=True)
return df
def preprocess(self, df):
df.drop(columns=["Province/State", "Lat", "Long"], inplace=True)
df.rename(
columns=lambda c: pd.to_datetime(c)
if c not in ["Country/Region", "Record"]
else c,
inplace=True,
)
df_country = df.groupby(["Country/Region", "Record"]).sum()
df_country.reset_index(level=1, drop=False, inplace=True)
# calculate active cases
data_cols = df_country.columns.drop("Record")
df_confirmed = df_country[df_country["Record"] == "Confirmed"][data_cols]
df_recovered = df_country[df_country["Record"] == "Recovered"][data_cols]
df_dead = df_country[df_country["Record"] == "Deaths"][data_cols]
df_active = df_confirmed - df_recovered - df_dead
df_active["Record"] = "Active"
return pd.concat([df_country, df_active])
def auto_refresh(self, force=False):
if force or (datetime.utcnow() - self._ts > self.refresh_rate):
df = self.load_data()
self._df = self.preprocess(df)
self._ts = datetime.utcnow()
self.data_cols = self._df.columns.drop("Record").to_list()
def get_df(self):
self.auto_refresh()
return self._df
def get_country_data(self, country="Germany"):
df = self.get_df()
df_country = df.loc[country].reset_index(drop=True).set_index("Record")
return df_country.loc[:, (df_country != 0).any(axis=0)]
def get_country_total(self, record="Active"):
df = self.get_df()
df_record = df[df["Record"] == record]
return df_record.iloc[:, -1].sort_values(ascending=False)
def get_country_ranking(self):
country_total = self.get_country_total(record="Active")
return country_total.index.to_list()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
# This module is part of the package GTW.__test__.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# migration
#
# Purpose
# Test scope migrations
#
# Revision Dates
# 19-May-2010 (CT) Creation
# 1-Jul-2010 (CT) `race_results` as example of composite-collection added
# 13-Jul-2010 (CT) Changed to use `DB_Man` for migration
# (instead of `Scope.copy`)
# 2-Aug-2010 (MG) `Account_Anonymous` added to test an border case for
# the migration
# 16-Aug-2010 (MG) Test for a change with children added
# 17-Aug-2010 (CT) Use `unicode` instead of `str`
# 6-Sep-2010 (CT) Adapted to change of `Race_Result` from Composite-List
# to `Link1`
# 14-Jun-2011 (MG) `MYST` added to `Backend_Parameters`
# 19-Mar-2012 (CT) Adapt to `Boat_Class.name.ignore_case` now being `True`
# 19-Mar-2012 (CT) Adapt to reification of `SRM.Handicap`
# 1-Aug-2012 (MG) Add test type name change queries
# 24-Jan-2013 (CT) Change `nation` from `Austria` to `AUT`
# ««revision-date»»···
#--
from _GTW.__test__.model import *
class _Migration_Scaffold_ (Scaffold.__class__) :
Backend_Parameters = dict \
( Scaffold.Backend_Parameters
, HPS = "'hps:///test.hps'"
, SQL = "'sqlite:///test.sql'"
, sq = "'sqlite:///test.sql'"
)
# end class _Migration_Scaffold_
Scaffold = _Migration_Scaffold_ ()
_test_code = r"""
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> apt_s, url_s = scope.app_type, scope.db_url
>>> PAP = scope.PAP
>>> SRM = scope.SRM
>>> Auth = scope.Auth
>>> x = SRM.Boat_Class (u"29er", max_crew = 2)
>>> x = SRM.Boat_Class (u"420er", max_crew = 2)
>>> x = SRM.Boat_Class (u"470er", max_crew = 2)
>>> x = SRM.Boat_Class (u"49er", max_crew = 2)
>>> x = SRM.Boat_Class (u"Aquila Kiel", max_crew = 3)
>>> sw= x.copy (u"Aquila Schwert", max_crew = 3)
>>> x = SRM.Boat_Class (u"Fam", max_crew = 3)
>>> x = SRM.Boat_Class (u"Finn-Dinghy", max_crew = 1)
>>> x = SRM.Boat_Class (u"Korsar", max_crew = 2)
>>> x = SRM.Boat_Class (u"Laser", max_crew = 1)
>>> x = SRM.Boat_Class (u"Laser 4.7", max_crew = 1)
>>> x = SRM.Boat_Class (u"Laser Master", max_crew = 1)
>>> x = SRM.Boat_Class (u"Laser Radial", max_crew = 1)
>>> x = SRM.Boat_Class (u"O-Jolle", max_crew = 1)
>>> x = SRM.Boat_Class (u"Optimist", max_crew = 1)
>>> x = SRM.Boat_Class (u"Pirat Regatta", max_crew = 2)
>>> x = SRM.Boat_Class (u"Pirat Klassik", max_crew = 2)
>>> x = SRM.Boat_Class (u"Pirat Schulboot", max_crew = 2)
>>> x = SRM.Boat_Class (u"Pirat", max_crew = 2)
>>> x = SRM.Boat_Class (u"Robby Jolle", max_crew = 2)
>>> x = SRM.Boat_Class (u"Seascape 18", max_crew = 4)
>>> x = SRM.Boat_Class (u"Zoom8", max_crew = 1)
>>> sw.last_cid
7
>>> for c in scope.uncommitted_changes :
... print (c)
<Create SRM.Boat_Class ('29er', 'SRM.Boat_Class'), new-values = {'last_cid' : '1', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('420er', 'SRM.Boat_Class'), new-values = {'last_cid' : '2', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('470er', 'SRM.Boat_Class'), new-values = {'last_cid' : '3', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('49er', 'SRM.Boat_Class'), new-values = {'last_cid' : '4', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Aquila Kiel', 'SRM.Boat_Class'), new-values = {'last_cid' : '5', 'max_crew' : '3'}>
<Copy SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '7'}>
<Create SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '6', 'max_crew' : '3'}>
<Create SRM.Boat_Class ('Fam', 'SRM.Boat_Class'), new-values = {'last_cid' : '8', 'max_crew' : '3'}>
<Create SRM.Boat_Class ('Finn-Dinghy', 'SRM.Boat_Class'), new-values = {'last_cid' : '9', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Korsar', 'SRM.Boat_Class'), new-values = {'last_cid' : '10', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Laser', 'SRM.Boat_Class'), new-values = {'last_cid' : '11', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Laser 4.7', 'SRM.Boat_Class'), new-values = {'last_cid' : '12', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Laser Master', 'SRM.Boat_Class'), new-values = {'last_cid' : '13', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Laser Radial', 'SRM.Boat_Class'), new-values = {'last_cid' : '14', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('O-Jolle', 'SRM.Boat_Class'), new-values = {'last_cid' : '15', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Optimist', 'SRM.Boat_Class'), new-values = {'last_cid' : '16', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Pirat Regatta', 'SRM.Boat_Class'), new-values = {'last_cid' : '17', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Pirat Klassik', 'SRM.Boat_Class'), new-values = {'last_cid' : '18', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Pirat Schulboot', 'SRM.Boat_Class'), new-values = {'last_cid' : '19', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Pirat', 'SRM.Boat_Class'), new-values = {'last_cid' : '20', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Robby Jolle', 'SRM.Boat_Class'), new-values = {'last_cid' : '21', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Seascape 18', 'SRM.Boat_Class'), new-values = {'last_cid' : '22'}>
<Create SRM.Boat_Class ('Zoom8', 'SRM.Boat_Class'), new-values = {'last_cid' : '23', 'max_crew' : '1'}>
>>> scope.commit ()
>>> x = SRM.Boat (('Optimist',), 1, u"AUT")
>>> x = SRM.Boat (('Optimist',), 2, u"AUT")
>>> x = SRM.Boat (('Laser',), 3, u"AUT")
>>> x = SRM.Boat (('Seascape 18',), 14, u"AUT")
>>> scope.commit ()
>>> bc = SRM.Boat_Class.instance (u"Optimist")
>>> ys = SRM.Handicap ("Yardstick")
>>> b = SRM.Boat.instance_or_new ('Optimist', u"1107", u"AUT", raw = True)
>>> p = PAP.Person.instance_or_new (u"Tanzer", u"Christian")
>>> s = SRM.Sailor.instance_or_new (p.epk_raw, nation = u"AUT", mna_number = u"29676", raw = True) ### 1
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", dict (start = u"20080501", raw = True), raw = True)
>>> reg = SRM.Regatta_C (rev, bc)
>>> reh = SRM.Regatta_H (rev, ys)
>>> bir = SRM.Boat_in_Regatta (b, reg, skipper = s)
>>> rr1 = SRM.Race_Result (bir, 1, points = 8)
>>> rr2 = SRM.Race_Result (bir, 2, points = 4)
>>> scope.commit ()
>>> sw.last_cid
7
>>> scope.MOM.Id_Entity.count
36
>>> int (scope.query_changes (parent = None).count ())
36
>>> int (scope.query_changes ().count ())
37
>>> int (scope.ems.max_cid)
37
>>> bc.set (loa = 2.43)
1
>>> SRM.Boat_Class.instance (u"Laser").set (sail_area = 7.06, loa = 4.064, beam = 1.422)
3
>>> SRM.Boat_Class.instance (u"Seascape 18").set (loa = 5.45, beam = 2.45, sail_area = 23)
3
>>> scope.commit ()
>>> MOM.B = True
>>> print (sw.last_cid) ### X
7
>>> scope.MOM.Id_Entity.count
36
>>> print (sw.last_cid) ### Y
7
>>> MOM.B = False
>>> int (scope.query_changes ().count ())
40
>>> int (scope.ems.max_cid)
40
>>> len (scope.SRM.Regatta_Event.query ().first ().regattas)
2
>>> b = SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (b.last_cid, sw.last_cid, b is sw)
7 7 True
>>> c = scope.query_changes (cid = b.last_cid).one ()
>>> print (c) ### change in source scope
<Copy SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '7'}>
<Create SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '6', 'max_crew' : '3'}>
>>> len (c.children)
1
>>> int (c.cid), int (c.children [0].cid)
(7, 6)
>>> [s for s in scope if not s.last_cid] ### before expunge
[]
>>> sum ((not s.last_cid) for s in scope), sum (bool (s.last_cid) for s in scope) ### before expunge
(0, 36)
>>> if hasattr (scope.ems.session, "expunge") : scope.ems.session.expunge ()
>>> [s for s in scope if not s.last_cid] ### after expunge
[]
>>> sum ((not s.last_cid) for s in scope), sum (bool (s.last_cid) for s in scope) ### after expunge
(0, 36)
>>> scope.query_changes (type_name = "SRM.Boat_Class").count ()
26
>>> b = scope.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (b.last_cid) ### before migration
7
Save contents of scope to database and destroy scope:
>>> scope.ems.compact ()
>>> scope.destroy ()
Now, we migrate all objects and the change history to a new backend. All
entities, changes, cids, and pids should be identical afterwards:
>>> db_url = "hps:////tmp/gtw_test_migration.gtw"
>>> apt_t, url_t = Scaffold.app_type_and_url (db_url)
>>> apt_t.delete_database (url_t) # 1
>>> db_man_s = Scaffold.DB_Man.connect (apt_s, url_s)
>>> db_man_t = Scaffold.DB_Man.create (apt_t, url_t, db_man_s)
>>> db_man_s.destroy ()
>>> db_man_t.destroy ()
>>> scope_s = Scaffold.scope (url_s, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> scope_t = Scaffold.scope (url_t, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> tuple (s.MOM.Id_Entity.count for s in (scope_s, scope_t))
(36, 36)
>>> all (s.as_pickle_cargo () == t.as_pickle_cargo () for (s, t) in zip (scope_s, scope_t))
True
>>> int (scope_t.ems.max_cid)
40
>>> len (scope_t.SRM.Regatta_Event.query ().first ().regattas)
2
>>> [s for (s, t) in zip (scope_s, scope_t) if s.last_cid != t.last_cid or not s.last_cid]
[]
>>> [s.query_changes (type_name = "SRM.Boat_Class").count () for s in (scope_t, scope_s)]
[26, 26]
>>> bs = scope_s.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> bt = scope_t.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (bs.last_cid, bt.last_cid) ### migrated to HPS
7 7
Now we delete the original database and then migrate back into the
original app-type/backend. Again, all entities, changes, cids,
and pids should still be identical:
>>> scope_s.destroy ()
>>> scope_t.destroy ()
>>> apt_s.delete_database (url_s) # 2
>>> db_man_t = Scaffold.DB_Man.connect (apt_t, url_t)
>>> db_man_u = Scaffold.DB_Man.create (apt_s, url_s, db_man_t)
>>> db_man_t.destroy ()
>>> db_man_u.destroy ()
>>> scope_t = Scaffold.scope (url_t, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> scope_u = Scaffold.scope (url_s, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> tuple (s.MOM.Id_Entity.count for s in (scope_t, scope_u))
(36, 36)
>>> all (s.as_pickle_cargo () == t.as_pickle_cargo () for (s, t) in zip (scope_t, scope_u))
True
>>> int (scope_u.ems.max_cid)
40
>>> len (scope_u.SRM.Regatta_Event.query ().first ().regattas)
2
>>> [s for (s, t) in zip (scope_t, scope_u) if s.last_cid != t.last_cid or not s.last_cid]
[]
>>> b = scope_u.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (b.last_cid) ### after migration
7
>>> c = scope_u.query_changes (cid = b.last_cid).one () ### mig scope
>>> print (c)
<Copy SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '7'}>
<Create SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '6', 'max_crew' : '3'}>
>>> len (c.children)
1
>>> int (c.cid), int (c.children [0].cid)
(7, 6)
>>> scope_u.query_changes (type_name = "SRM.Boat_Class").count ()
26
Lets clean up::
>>> scope_t.destroy ()
>>> scope_u.destroy ()
>>> apt_t.delete_database (url_t) # 3
>>> apt_s.delete_database (url_s) # 4
"""
__test__ = Scaffold.create_test_dict \
( dict
( test_code = _test_code
)
, ignore = ("HPS", )
)
### __END__ migration
|
from typing import List
import json
from time import sleep
from datetime import date
from os import path
from api import BilibiliApi
from writer import write_md, write_raw_data
BASE_PATH = './archive'
NAP_TIME = .5
def generate_md(raw_data: BilibiliApi.RAW_DATA_T) -> str:
res = []
for video in raw_data:
line = '1. '
url = f'https://www.bilibili.com/video/{video["bvid"]}'
line += f'[{video["title"]}]({url})'
res.append(line)
return '\n'.join(res)
def generate_md_table_row(row: List[Any]) -> str:
return f'| {" | ".join(r for r in row)} |\n'
def summarize_tags(api: BilibiliApi, loc: str, name: str, aids: List[str]) -> BilibiliApi.RAW_DATA_T:
all_tags = {}
for aid in aids:
sleep(NAP_TIME)
tag_list = api.get_tag(aid)
for tag in tag_list:
if tag['tag_id'] in all_tags:
all_tags[tag['tag_id']]['day_count'] += 1
else:
all_tags[tag['tag_id']] = {'data': tag, 'day_count': 1}
write_raw_data(all_tags, path.join(loc, 'Tags', 'README.md'))
summary = []
for _, tag in all_tags.items():
name = tag['data']['tag_name']
count = tag['day_count']
summary.append((name, count))
sort(summary, key=lambda x: x[1], acending=False)
summary_header = ['Tag', 'Count']
summary_md = '# Tag Distribution\n'
summary_md += generate_md_table_row(summary_header)
summary_md += generate_md_table_row(['---'] * len(summary_header))
for row in summary:
summary_md += generate_md_table_row(row)
write_md(summary_md, path.join(loc, 'Tags', name))
def summarize_highest_ranked(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
highest_ranked = api.get_highest_ranked()
write_raw_data(highest_ranked, path.join(loc, 'Raw', 'highest_ranked.json'))
aids = [video['aid'] for video in highest_ranked]
summarize_tags(api, loc, 'highest_ranked.json', aids)
return highest_ranked
def summarize_most_popular(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
most_popular = api.get_most_popular()
write_raw_data(most_popular, path.join(loc, 'Raw', 'most_popular.json'))
aids = (video['aid'] for video in most_popular)
summarize_tags(api, loc, 'most_popular.json', aids)
return most_popular
def summarize_today():
date_str = date.today().isoformat()
loc = path.join(BASE_PATH, 'Bilibili', date_str)
api = BilibiliApi()
highest_ranked = summarize_highest_ranked(api, loc)
most_popular = summarize_most_popular(api, loc)
md_str = '# Highest Ranked Videos\n'
md_str += generate_md(highest_ranked)
md_str += '\n\n'
md_str += '# Most Popular Videos\n'
md_str += generate_md(most_popular)
write_md(md_str, path.join(loc, 'README.md'))
if __name__ == '__main__':
summarize_today()
|
class Source:
def __init__ (self,category,id,name,description,url):
self.category = category
self.id = id
self.name = name
self.description = description
self.url = url
|
from gpbasics import global_parameters as global_param
global_param.ensure_init()
import tensorflow as tf
from enum import Enum
class SimilarityType(Enum):
LINEAR = 0
SQRT_LINEAR = 1
LOG_LINEAR = 2
RECIPROCAL = 3
def get_similarity_based_distance(distance, similarity_type: SimilarityType):
if similarity_type is SimilarityType.LINEAR:
return get_linear_similarity(distance)
elif similarity_type is SimilarityType.SQRT_LINEAR:
return get_sqrt_linear_similarity(distance)
elif similarity_type is SimilarityType.LOG_LINEAR:
return get_log_linear_similarity(distance)
elif similarity_type is SimilarityType.RECIPROCAL:
return get_reciprocal_similarity(distance)
else:
raise Exception("SimilarityBased Distance type invalid: %s" % str(similarity_type))
def get_linear_similarity(distance):
return 1 - distance
def get_sqrt_linear_similarity(distance):
return tf.sqrt(get_linear_similarity(distance))
def get_log_linear_similarity(distance):
return tf.math.log(get_linear_similarity(distance))
def get_reciprocal_similarity(distance):
return tf.divide(1, distance) - 1
|
import sys,shutil,os,glob,re
use_comma_separated_values = False
def summarizeLoRaD(model):
lc_model = model.lower()
# set default values in case there are no results yet for this analysis
rnseed = 0
cov1 = 0.0
logL1 = 0.0
cov2 = 0.0
logL2 = 0.0
cov3 = 0.0
logL3 = 0.0
# read output files
outfilenames = glob.glob('%s/frt-%s.out' % (model,lc_model))
errfilenames = glob.glob('%s/frt-%s.err' % (model,lc_model))
if len(outfilenames) == 1 and len(errfilenames) == 1:
print('%s...' % lc_model)
outfn = outfilenames[0]
errfn = errfilenames[0]
# read output and error files
outstuff = open(outfn, 'r').read()
errstuff = open(errfn, 'r').read()
stuff = outstuff + errstuff
# get seed
m = re.search('Pseudorandom number seed: (\d+)', stuff, re.M | re.S)
if m is not None:
rnseed = int(m.group(1))
# grab times
#m = re.search('user-seconds\s+([.0-9]+)', stuff, re.M | re.S)
#if m is not None:
# secs = float(m.group(1))
# grab marginal likelihood estimate for each of the three coverage values
results = re.findall(' Determining working parameter space for coverage = ([.0-9]+?)[.][.][.].+?log Pr\(data\)\s+=\s+([-.0-9]+)', stuff, re.M | re.S)
nresults = len(results)
if nresults == 3:
cov1 = float(results[0][0])
cov2 = float(results[1][0])
cov3 = float(results[2][0])
logL1 = float(results[0][1])
logL2 = float(results[1][1])
logL3 = float(results[2][1])
else:
print(' nresults was %d (expecting 3) so did not process' % nresults)
else:
print('%s: Did not process because there were %d outfilenames and %d errfilenames' % (lc_model,len(outfilenames),len(errfilenames)))
return {
'rnseed':rnseed,
'cov1':cov1,
'logL1':logL1,
'cov2':cov2,
'logL2':logL2,
'cov3':cov3,
'logL3':logL3
}
models = ['JC', 'JCI', 'JCG', 'JCIG', 'GTR', 'GTRI', 'GTRG', 'GTRIG', '3JC', '3JCI', '3JCG', '3JCIG', '3GTR', '3GTRI', '3GTRG', '3GTRIG']
lorad = {}
for m in models:
lorad[m] = summarizeLoRaD(m)
gss = {}
gss['JC'] = -2776.52
gss['JCI'] = -2744.59
gss['JCG'] = -2747.44
gss['JCIG'] = -2743.56
gss['GTR'] = -2714.20
gss['GTRI'] = -2681.00
gss['GTRG'] = -2682.73
gss['GTRIG'] = -2680.29
gss['3JC'] = -2681.79
gss['3JCI'] = -2668.38
gss['3JCG'] = -2668.99
gss['3JCIG'] = -2667.19
gss['3GTR'] = -2551.10
gss['3GTRI'] = -2535.57
gss['3GTRG'] = -2536.75
gss['3GTRIG'] = -2534.66
outf = open('output-summary.txt','w')
if use_comma_separated_values:
outf.write('model,seed,gss,cov1,lorad1,diff1,cov2,lorad2,diff2,cov3,lorad3,diff3\n')
for m in models:
outf.write('%s,%d,%.2f,%.3f,%.5f,%.5f,%.3f,%.5f,%.5f,%.3f,%.5f,%.5f\n' % (
m,
lorad[m]['rnseed'],
gss[m],
lorad[m]['cov1'],
lorad[m]['logL1'],
lorad[m]['logL1'] - gss[m],
lorad[m]['cov2'],
lorad[m]['logL2'],
lorad[m]['logL2'] - gss[m],
lorad[m]['cov3'],
lorad[m]['logL3'],
lorad[m]['logL3'] - gss[m]
))
else:
outf.write('model\tseed\tgss\tcov1\tlorad1\tdiff1\tcov2\tlorad2\tdiff2\tcov3\tlorad3\tdiff3\n')
for m in models:
outf.write('%s\t%d\t%.2f\t%.3f\t%.5f\t%.5f\t%.3f\t%.5f\t%.5f\t%.3f\t%.5f\t%.5f\n' % (
m,
lorad[m]['rnseed'],
gss[m],
lorad[m]['cov1'],
lorad[m]['logL1'],
lorad[m]['logL1'] - gss[m],
lorad[m]['cov2'],
lorad[m]['logL2'],
lorad[m]['logL2'] - gss[m],
lorad[m]['cov3'],
lorad[m]['logL3'],
lorad[m]['logL3'] - gss[m]
))
outf.close()
|
import copy
from django import forms
from django.contrib.admin.options import BaseModelAdmin
from django.contrib.admin.widgets import AutocompleteSelect, AutocompleteSelectMultiple
from django.db import models
from django.utils.translation import gettext as _
from paper_admin.admin import widgets
from paper_admin.monkey_patch import MonkeyPatchMeta
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
"form_class": forms.SplitDateTimeField,
"widget": forms.SplitDateTimeWidget,
},
models.TextField: {"widget": widgets.AdminTextarea},
models.GenericIPAddressField: {"widget": widgets.AdminIPInput},
models.UUIDField: {"widget": widgets.AdminUUIDInput},
models.BooleanField: {"widget": widgets.AdminCheckboxInput},
models.NullBooleanField: {"widget": forms.NullBooleanSelect},
models.FileField: {"widget": forms.ClearableFileInput},
models.ImageField: {"widget": forms.ClearableFileInput},
}
# Метакласс MonkeyPatch для класса BaseModelAdmin.
ModelAdminMonkeyPatchMeta = type("ModelAdminMonkeyPatchMeta", (MonkeyPatchMeta, forms.MediaDefiningClass), {})
class PatchBaseModelAdmin(BaseModelAdmin, metaclass=ModelAdminMonkeyPatchMeta):
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_choice_field(self, db_field, request, **kwargs):
if db_field.name in self.radio_fields:
if "widget" not in kwargs:
kwargs["widget"] = widgets.AdminRadioSelect()
if "choices" not in kwargs:
kwargs["choices"] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[("", _("None"))]
)
return db_field.formfield(**kwargs)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
db = kwargs.get('using')
if db_field.name in self.get_autocomplete_fields(request):
kwargs["widget"] = AutocompleteSelect(
db_field.remote_field,
self.admin_site,
using=db
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.AdminForeignKeyRawIdWidget(
db_field.remote_field,
self.admin_site,
using=db
)
elif db_field.name in self.radio_fields:
kwargs["widget"] = widgets.AdminRadioSelect()
kwargs["empty_label"] = _("None") if db_field.blank else None
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get("using")
autocomplete_fields = self.get_autocomplete_fields(request)
if db_field.name in autocomplete_fields:
kwargs['widget'] = AutocompleteSelectMultiple(
db_field.remote_field,
self.admin_site,
using=db
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.AdminManyToManyRawIdWidget(
db_field.remote_field,
self.admin_site,
using=db
)
elif db_field.name in list(self.filter_vertical) + list(self.filter_horizontal):
kwargs["widget"] = widgets.FilteredSelectMultiple()
else:
kwargs.setdefault("widget", forms.SelectMultiple)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
return db_field.formfield(**kwargs)
|
#!/usr/bin/python
with open("/dev/axis_fifo_0x0000000080002000", "r+b") as character:
writewords = []
#SPI config register
writewords.append("\x84\x0A\x03\x00")
#GEN config
writewords.append("\x80\x01\x04\x00")
#power down control
writewords.append("\x00\x00\x09\x00")
#DACRANGE
writewords.append("\xAA\xAA\x0A\x00")
#DACRANGE
writewords.append("\xAA\xAA\x0B\x00")
#DACRANGE
writewords.append("\xAA\xAA\x0C\x00")
#DACRANGE
writewords.append("\xAA\xAA\x0D\x00")
#broadcast register
writewords.append("\x00\x80\x0F\x00")
for word in writewords:
character.write(word)
print('Reading...')
reading = character.read(4)
print('Read {} bytes: {} {} {} {}'.format(len(reading), hex(ord(reading[0])), hex(ord(reading[1])), hex(ord(reading[2])), hex(ord(reading[3]))))
|
# Access to KEGG API
from bioservices.kegg import KEGG
import ora_msc
import matplotlib.pyplot as plt
# Define the path of metabolomics data
DATA_PATH = './data/'
# Stating the annotation files & modzscore files
pos_annot = DATA_PATH + 'annotation_pos.txt'
pos_mod = DATA_PATH + 'modzscore_pos_annotated.tsv'
neg_annot = DATA_PATH + 'annotation_neg.txt'
neg_mod = DATA_PATH + 'modzscore_neg_annotated.tsv'
# Initialise KEGG instance
kegg_instance = KEGG()
kegg_instance.organism = "eco"
# Initialise both backgrounds
test_compounds = ora_msc.get_all_compounds('eco')
zamboni_bg = ora_msc.loadTsv(DATA_PATH + 'annotation_all.txt')
# Remove metabolites detected in Zamboni but not in any E.coli pathway
zamboni_bg = zamboni_bg & test_compounds
# build {pathway: compounds} dictionary for E.coli
ecoli_pathways = kegg_instance.pathwayIds
pathway_2_compounds = dict()
for pathway in ecoli_pathways:
parsed_output = kegg_instance.parse(kegg_instance.get(pathway)) # parsed_ouput has lots of information about the pathway
try:
compounds = set(parsed_output['COMPOUND'].keys())
pathway_2_compounds[pathway] = compounds
except KeyError: # Some pathways do not have defined compounds
pass
# Translate KO number to gene name
sample_id_all = DATA_PATH + 'sample_id_modzscore.tsv'
all_knockouts = []# End product
fh_sample_id_all = open(sample_id_all, 'r')
for knockout in fh_sample_id_all:
all_knockouts.append(knockout.rstrip())
fh_sample_id_all.close()
size_dist = []
for pathway in pathway_2_compounds:
#if len(pathway_2_compounds[pathway]) == 1:
# print(pathway)
size_dist.append(len(pathway_2_compounds[pathway]))
zamboni_size_dist = []
for pathway in pathway_2_compounds:
compounds = pathway_2_compounds[pathway]
cmpd_count = 0
for compound in compounds:
if compound in zamboni_bg:
cmpd_count += 1
zamboni_size_dist.append(cmpd_count)
plt.subplot(211)
plt.hist(zamboni_size_dist, bins=range(0, 145, 5))
plt.ylim(0, 40)
plt.xlabel('Pathway size')
plt.ylabel('Number of pathways')
plt.title('Pathway size distribution (Zamboni background)')
plt.subplot(212)
plt.hist(size_dist, bins=range(0, 145, 5))
plt.ylim(0, 40)
plt.xlabel('Pathway size')
plt.ylabel('Number of pathways')
plt.title('Pathway size distribution (all compounds)')
plt.tight_layout()
plt.show()
|
from abc import ABC, abstractmethod
from datetime import datetime
import os
from typing import Tuple
from omegaconf import DictConfig
import torch
from rlcycle.common.models.base import BaseModel
class LearnerBase(ABC):
"""Abstract base class for Learner"""
@abstractmethod
def update_model(
self, experience: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
pass
@abstractmethod
def get_policy(self, to_cuda: bool) -> BaseModel:
pass
class Learner(LearnerBase):
"""Abstract class for all learners
Attributes:
experiment_info (DictConfig): experiment info
hyper_params (DictConfig): algorithm hyperparameters
model_cfg (DictConfig): model configurations
use_cuda (bool): true if using gpu
"""
def __init__(
self,
experiment_info: DictConfig,
hyper_params: DictConfig,
model_cfg: DictConfig,
):
self.experiment_info = experiment_info
self.hyper_params = hyper_params
self.model_cfg = model_cfg
self.use_cuda = self.experiment_info.device == "cuda"
time_info = datetime.now()
timestamp = f"{time_info.year}-{time_info.month}-{time_info.day}"
self.ckpt_path = (
f"../../../../checkpoints/{self.experiment_info.env.name}"
f"/{self.experiment_info.experiment_name}/{timestamp}/"
)
os.makedirs(self.ckpt_path, exist_ok=True)
@abstractmethod
def _initialize(self):
pass
@abstractmethod
def update_model(
self, experience: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
pass
@abstractmethod
def get_policy(self, to_cuda: bool) -> BaseModel:
pass
class LearnerWrapper(LearnerBase):
"""Abstract base class for Learner Wrappers
Attributes:
learner (Learner): learner to be wrapped
"""
def __init__(self, learner: Learner):
self.learner = learner
def update_model(
self, experience: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
"""Call wrapped learner update_model()"""
return self.learner.update_model(experience)
def get_policy(self, to_cuda: bool) -> BaseModel:
"""Call wrapped learner get_policy()"""
return self.learner.get_policy(to_cuda)
|
import os
config = {
'CURRENT_DIR': os.getcwd(),
'IGNORED_DIRS': 'venv',
}
|
import os
true_strings = ['true', 'True', 't', '1']
# S3
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_DEFAULT_REGION = os.getenv('AWS_DEFAULT_REGION')
# Datastores
KAFKA_HOSTS = os.getenv('KAFKA_HOSTS', 'kafka:9092')
ZOOKEEPER_HOST = os.getenv('ZOOKEEPER_HOST', 'zookeeper:2181')
REDIS_HOST = os.getenv('REDIS_HOST', 'redis')
# Thumbnail size
TARGET_RESOLUTION = (640, 640)
# Number of tasks to schedule (but not necessarily execute) simultaneously.
# Each pending resize task takes ~3kb of memory, so scheduling 1MM events means
# consuming 3gb of memory in pending tasks alone.
SCHEDULE_SIZE = int(os.getenv('SCHEDULE_SIZE', '3000'))
# Number of resize tasks to run concurrently.
# Each task requires a connection to Redis, so be mindful of the max connection
# limit. Also be mindful that running too many coroutines simultaneously can
# be detrimental to performance.
MAX_TASKS = 3000
PROFILE_MEMORY = os.getenv('PROFILE_MEMORY', 'False') in true_strings
|
from showml.deep_learning.layers import Activation
import numpy as np
class Sigmoid(Activation):
"""A layer which applies the Sigmoid operation to an input.
"""
def forward(self, X: np.ndarray) -> np.ndarray:
return 1 / (1 + np.exp(-X))
def backward(self, X: np.ndarray) -> np.ndarray:
return self.forward(X) * (1 - self.forward(X))
class Relu(Activation):
"""A layer which applies the ReLU operation to an input.
"""
def forward(self, X: np.ndarray) -> np.ndarray:
return abs(X) * (X > 0)
def backward(self, X: np.ndarray) -> np.ndarray:
return 1.0 * (X > 0)
class Softmax(Activation):
"""A layer which applies the Softmax operation to an input.
"""
def forward(self, X: np.ndarray) -> np.ndarray:
e_x = np.exp(X - np.max(X, axis=-1, keepdims=True))
return e_x / np.sum(e_x, axis=-1, keepdims=True)
def backward(self, X: np.ndarray) -> np.ndarray:
return self.forward(X) * (1 - self.forward(X))
|
class IFormattable:
""" Provides functionality to format the value of an object into a string representation. """
def ToString(self,format,formatProvider):
"""
ToString(self: IFormattable,format: str,formatProvider: IFormatProvider) -> str
Formats the value of the current instance using the specified format.
format: The format to use.-or- A null reference (Nothing in Visual Basic) to use the default format
defined for the type of the System.IFormattable implementation.
formatProvider: The provider to use to format the value.-or- A null reference (Nothing in Visual Basic) to
obtain the numeric format information from the current locale setting of the operating system.
Returns: The value of the current instance in the specified format.
"""
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
# Please keep this file so we can directly use this repo as a package.
|
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# pylint: disable=too-many-public-methods,cyclic-import
"""Base class for all message classes, abstracting out
common functionality and facilitating differences via
property and method overrides"""
import logging
from rbac.common import protobuf
from rbac.common.crypto.keys import Key
from rbac.common.crypto.keys import PUBLIC_KEY_PATTERN
from rbac.common.sawtooth import batcher
from rbac.common.sawtooth import client
from rbac.common.sawtooth import state_client
from rbac.common.base import base_processor as processor
from rbac.common.base.base_address import AddressBase
LOGGER = logging.getLogger(__name__)
class BaseMessage(AddressBase):
"""Base class for all message classes, abstracting out
common functionality and facilitating differences via
property and method overrides"""
def __init__(self):
AddressBase.__init__(self)
self._message_type_name = batcher.get_message_type_name(self.message_type)
if self.register_message:
processor.register_message_handler(self)
else:
processor.unregister_message_handler(self)
@property
def message_action_type(self):
"""The action type performed by this message"""
return None
# raise NotImplementedError("Class must implement this property")
@property
def message_subaction_type(self):
"""The subsequent action performed or proposed by this message"""
return None
@property
def message_object_type(self):
"""The object type this message acts upon"""
return self.object_type
@property
def message_related_type(self):
"""The related object type this message acts upon"""
return self.related_type
@property
def message_relationship_type(self):
"""The relationship type this message acts upon"""
return self.relationship_type
@property
def message_type_name(self):
"""The name of the message type, derives from the message properties
Example: ObjectType.USER MessageActionType.CREATE -> CREATE_USER
-or- ActionType.PROPOSE, SubActionType.ADD, MessageObjectType.USER,
RelationshipType.MANAGER -> PROPOSE_ADD_USER_MANAGER
Override where behavior differs"""
if (
self.message_action_type
and self.message_subaction_type
and self.message_relationship_type
):
return (
self.message_action_type.name
+ "_"
+ self.message_subaction_type.name
+ "_"
+ self.message_object_type.name
+ "_"
+ self.message_relationship_type.name
)
if self.message_action_type.name:
return self.message_action_type.name + "_" + self.message_object_type.name
return self._message_type_name
@property
def message_type(self):
"""The message type of this message, an atrribute enum of RBACPayload
Defaults to protobuf.rbac_payload_pb2.{message_type_name}
(see message_type_name) Override message_type_name where behavior differs"""
if not self.message_action_type:
raise NotImplementedError("Class must implement this property")
return getattr(protobuf.rbac_payload_pb2.RBACPayload, self.message_type_name)
@property
def message_proto(self):
"""The protobuf used to serialize this message type
Derives name form the object type and message action type names.
Example: ObjectType.USER MessageActionType.CREATE
-> protobuf.user_transaction_pb2.CreateUser
(see message_type_name) Override where behavior differs"""
if not self.message_action_type:
raise NotImplementedError("Class must implement this property")
return getattr(
getattr(
protobuf, self.message_object_type.name.lower() + "_transaction_pb2"
),
self._camel_case(self.message_type_name),
)
@property
def message_fields_not_in_state(self):
"""Fields that are on the message but not stored on its state object"""
return []
@property
def register_message(self):
"""Whether to register this message handler with the transaction processor"""
return False # TODO: default will flip to True after TP refactor
def make(self, **kwargs):
"""Makes the message (protobuf) from the named arguments passed to make"""
# pylint: disable=not-callable
message = self.message_proto()
batcher.make_message(message, self.message_type, **kwargs)
if hasattr(message, self._name_id) and getattr(message, self._name_id) == "":
# sets the unique identifier field of the message to a unique_id if no identifier is provided
setattr(message, self._name_id, self.unique_id())
self.validate(message=message)
return message
def make_addresses(self, message, signer_keypair):
"""Make addresses returns the inputs (read) and output (write)
addresses that may be required in order to validate the message
and store the resulting data of a successful or failed execution"""
raise NotImplementedError("Class must implement this method")
def validate(self, message, signer=None):
"""Commmon validation for all messages"""
if not isinstance(message, self.message_proto):
raise TypeError("Expected message to be {}".format(self.message_proto))
if (
signer is not None
and not isinstance(signer, Key)
and not (isinstance(signer, str) and PUBLIC_KEY_PATTERN.match(signer))
):
raise TypeError("Expected signer to be a keypair or a public key")
if isinstance(signer, Key):
signer = signer.public_key
return signer
def validate_state(self, state, message, signer):
"""Common state validation for all messages"""
if signer is None:
raise ValueError("Signer is required")
if message is None:
raise ValueError("Message is required")
if not isinstance(signer, str) and PUBLIC_KEY_PATTERN.match(signer):
raise TypeError("Expected signer to be a public key")
if state is None:
raise ValueError("State is required")
def make_payload(self, message, signer_keypair=None):
"""Make a payload for the given message type"""
self.validate(message=message, signer=signer_keypair)
message_type = self.message_type
inputs, outputs = self.make_addresses(
message=message, signer_keypair=signer_keypair
)
return batcher.make_payload(
message=message, message_type=message_type, inputs=inputs, outputs=outputs
)
def create(self, signer_keypair, message, object_id=None, target_id=None):
"""Send a message to the blockchain"""
self.validate(message=message, signer=signer_keypair)
return self.send(
signer_keypair=signer_keypair,
payload=self.make_payload(message=message, signer_keypair=signer_keypair),
object_id=object_id,
target_id=target_id,
)
def send(self, signer_keypair, payload, object_id=None, target_id=None):
"""Sends a payload to the validator API"""
if not isinstance(signer_keypair, Key):
raise TypeError("Expected signer_keypair to be a Key")
if not isinstance(payload, protobuf.rbac_payload_pb2.RBACPayload):
raise TypeError("Expected payload to be an RBACPayload")
_, _, batch_list, _ = batcher.make(
payload=payload, signer_keypair=signer_keypair
)
got = None
status = client.send_batches_get_status(batch_list=batch_list)
if object_id is not None:
got = self.get(object_id=object_id, target_id=target_id)
return got, status
def get(self, object_id, target_id=None):
"""Gets an address from the blockchain from the validator API"""
address = self.address(object_id=object_id, target_id=target_id)
# pylint: disable=not-callable
container = self._state_container()
container.ParseFromString(client.get_address(address=address))
return self._find_in_state_container(
container=container,
address=address,
object_id=object_id,
target_id=target_id,
)
def message_to_storage(self, message):
"""Transforms the message into the state (storage) object"""
# pylint: disable=not-callable
return batcher.message_to_message(
self._state_object(), self._name_camel, message
)
def set_state(self, state, message, object_id, target_id=None):
"""Creates a new address in the blockchain state"""
store = self.message_to_storage(message=message)
# pylint: disable=no-member,not-callable
container = self._state_container()
container.users.extend([store])
address = self.address(object_id=object_id, target_id=target_id)
state_client.set_address(state=state, address=address, container=container)
def apply(self, header, payload, state):
"""Handles a message in the transaction processor"""
# pylint: disable=not-callable
message = self.message_proto()
message.ParseFromString(payload.content)
signer = header.signer_public_key
self.validate(message=message, signer=signer)
self.validate_state(state=state, message=message, signer=signer)
self.set_state(state=state, message=message, object_id=message.user_id)
|
class Solution:
def maximizeSweetness(self, sweetness: List[int], k: int) -> int:
left = 1
right = sum(sweetness) + 2
maximum = 0
while left < right:
mid = left + (right - left)// 2
if self.verify(sweetness, mid, k):
maximum = mid
left = mid + 1
else:
right = mid
return maximum
def verify(self, sweetness, candidateMin, k):
curChunk = 0
totalChunks = 0
for sweet in sweetness:
curChunk += sweet
if curChunk >= candidateMin:
curChunk = 0
totalChunks += 1
return totalChunks > k
|
import threading
import collections
import zephyr
class EventStream:
def __init__(self):
self.events = []
self.events_cleaned_up = 0
self.lock = threading.RLock()
def __iter__(self):
with self.lock:
return iter(self.events[:])
def __len__(self):
with self.lock:
corrected_length = len(self.events) + self.events_cleaned_up
return corrected_length
def __getitem__(self, index):
with self.lock:
assert 0 <= index < len(self)
assert index >= self.events_cleaned_up
corrected_index = index - self.events_cleaned_up
return self.events[corrected_index]
def append(self, value):
with self.lock:
self.events.append(value)
def clean_up_events_before(self, timestamp_lower_bound):
with self.lock:
cutoff_index = 0
for event_timestamp, event_value in self.events: #@UnusedVariable
if event_timestamp < timestamp_lower_bound:
cutoff_index += 1
else:
break
if cutoff_index:
self.events = self.events[cutoff_index:]
self.events_cleaned_up += cutoff_index
def iterate_samples(self, from_sample_index, to_end_timestamp):
sample_index = from_sample_index
while True:
with self.lock:
if self.events_cleaned_up > sample_index:
break
last_item = self[sample_index] if len(self) > sample_index else None
if last_item is not None:
event_timestamp, event_value = last_item
if event_timestamp <= to_end_timestamp:
yield event_value
sample_index += 1
continue
break
class SignalStream:
def __init__(self, signal_packet):
self.samplerate = signal_packet.samplerate
self.samples = []
self.lock = threading.RLock()
self.end_timestamp = None
self.append_signal_packet(signal_packet)
def append_signal_packet(self, signal_packet):
with self.lock:
assert signal_packet.samplerate == self.samplerate
self.samples.extend(signal_packet.samples)
self.end_timestamp = signal_packet.timestamp + len(signal_packet.samples) / float(signal_packet.samplerate)
def remove_samples_before(self, timestamp_lower_bound):
with self.lock:
samples_to_remove = max(0, int((timestamp_lower_bound - self.start_timestamp) * self.samplerate))
if samples_to_remove:
self.samples = self.samples[samples_to_remove:]
return samples_to_remove
@property
def start_timestamp(self):
return self.end_timestamp - len(self.samples) / float(self.samplerate)
def iterate_timed_samples(self, skip_samples=0):
with self.lock:
start_timestamp = self.start_timestamp
sample_period = 1.0 / self.samplerate
for sample_i, sample in enumerate(self.samples[skip_samples:], start=skip_samples):
sample_timestamp = start_timestamp + sample_i * sample_period
yield sample_timestamp, sample
class SignalStreamHistory:
def __init__(self):
self._signal_streams = []
self.samples_cleaned_up = 0
def append_signal_packet(self, signal_packet, starts_new_stream):
if starts_new_stream or not len(self._signal_streams):
signal_stream = SignalStream(signal_packet)
self._signal_streams.append(signal_stream)
else:
signal_stream = self._signal_streams[-1]
signal_stream.append_signal_packet(signal_packet)
def get_signal_streams(self):
return self._signal_streams
def _cleanup_signal_stream(self, signal_stream, timestamp_bound):
if timestamp_bound >= signal_stream.end_timestamp:
self._signal_streams.remove(signal_stream)
samples_removed = len(signal_stream.samples)
else:
samples_removed = signal_stream.remove_samples_before(timestamp_bound)
self.samples_cleaned_up += samples_removed
def clean_up_samples_before(self, history_limit):
for signal_stream in self._signal_streams[:]:
first_timestamp = signal_stream.start_timestamp
if first_timestamp >= history_limit:
break
self._cleanup_signal_stream(signal_stream, history_limit)
def iterate_samples(self, from_sample_index, to_end_timestamp):
from_sample_index = from_sample_index - self.samples_cleaned_up
signal_stream_start_index = 0
for signal_stream in self._signal_streams:
sample_count = len(signal_stream.samples)
next_signal_stream_start_index = signal_stream_start_index + sample_count
if from_sample_index < next_signal_stream_start_index:
samples_to_skip = max(0, from_sample_index - signal_stream_start_index)
for sample_timestamp, sample in signal_stream.iterate_timed_samples(samples_to_skip):
if sample_timestamp > to_end_timestamp:
break
yield sample
signal_stream_start_index = next_signal_stream_start_index
class MeasurementCollector:
def __init__(self, history_length_seconds=20.0):
self._signal_stream_histories = collections.defaultdict(SignalStreamHistory)
self._event_streams = collections.defaultdict(EventStream)
self.history_length_seconds = history_length_seconds
self.last_cleanup_time = 0.0
def get_signal_stream_history(self, stream_type):
return self._signal_stream_histories[stream_type]
def get_event_stream(self, stream_type):
return self._event_streams[stream_type]
def iterate_signal_stream_histories(self):
return self._signal_stream_histories.items()
def iterate_event_streams(self):
return self._event_streams.items()
def handle_signal(self, signal_packet, starts_new_stream):
signal_stream_history = self._signal_stream_histories[signal_packet.type]
signal_stream_history.append_signal_packet(signal_packet, starts_new_stream)
self.cleanup_if_needed()
def handle_event(self, stream_name, value):
self._event_streams[stream_name].append(value)
self.cleanup_if_needed()
def cleanup_if_needed(self):
now = zephyr.time()
if self.last_cleanup_time < now - 5.0:
history_limit = now - self.history_length_seconds
for signal_stream_history in self._signal_stream_histories.values():
signal_stream_history.clean_up_samples_before(history_limit)
for event_stream in self._event_streams.values():
event_stream.clean_up_events_before(history_limit)
self.last_cleanup_time = now
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from conversationinsights.channels.console import ConsoleInputChannel
def test_console_input():
import conversationinsights.channels.console
# Overwrites the input() function and when someone else tries to read something from the command line
# this function gets called. But instead of waiting input for the user, this simulates the input of
# "2", therefore it looks like the user is always typing "2" if someone requests a cmd input.
conversationinsights.channels.console.input = lambda _=None: "Test Input" # simulates cmdline input
recorded = []
def on_message(message):
recorded.append(message)
channel = ConsoleInputChannel()
channel._record_messages(on_message, max_message_limit=3)
assert [r.text for r in recorded] == ["Test Input", "Test Input", "Test Input"]
|
import math
import numpy as np
import time
from enum import Enum
PYGAME_DISPLAY = None
class Rotation(object):
"""Used to represent the rotation of an actor or obstacle.
Rotations are applied in the order: Roll (X), Pitch (Y), Yaw (Z).
A 90-degree "Roll" maps the positive Z-axis to the positive Y-axis.
A 90-degree "Pitch" maps the positive X-axis to the positive Z-axis.
A 90-degree "Yaw" maps the positive X-axis to the positive Y-axis.
Args:
pitch: Rotation about Y-axis.
yaw: Rotation about Z-axis.
roll: Rotation about X-axis.
Attributes:
pitch: Rotation about Y-axis.
yaw: Rotation about Z-axis.
roll: Rotation about X-axis.
"""
def __init__(self, pitch=0, yaw=0, roll=0):
self.pitch = pitch
self.yaw = yaw
self.roll = roll
@classmethod
def from_carla_rotation(cls, rotation):
"""Creates a pylot Rotation from a CARLA rotation.
Args:
rotation (carla.Rotation): An instance of a CARLA rotation.
Returns:
:py:class:`.Rotation`: A pylot rotation.
"""
import carla
if not isinstance(rotation, carla.Rotation):
raise ValueError('rotation should be of type carla.Rotation')
return cls(rotation.pitch, rotation.yaw, rotation.roll)
def as_carla_rotation(self):
""" Retrieves the rotation as an instance of a CARLA rotation.
Returns:
carla.Rotation: Instance representing the rotation.
"""
import carla
return carla.Rotation(self.pitch, self.yaw, self.roll)
def __repr__(self):
return self.__str__()
def __str__(self):
return 'Rotation(pitch={}, yaw={}, roll={})'.format(
self.pitch, self.yaw, self.roll)
class Vector3D(object):
"""Represents a 3D vector and provides useful helper functions.
Args:
x: The value of the first axis.
y: The value of the second axis.
z: The value of the third axis.
Attributes:
x: The value of the first axis.
y: The value of the second axis.
z: The value of the third axis.
"""
def __init__(self, x=0, y=0, z=0):
self.x, self.y, self.z = float(x), float(y), float(z)
@classmethod
def from_carla_vector(cls, vector):
"""Creates a pylot Vector3D from a CARLA 3D vector.
Args:
vector (carla.Vector3D): An instance of a CARLA 3D vector.
Returns:
:py:class:`.Vector3D`: A pylot 3D vector.
"""
import carla
if not isinstance(vector, carla.Vector3D):
raise ValueError('The vector must be a carla.Vector3D')
return cls(vector.x, vector.y, vector.z)
def __add__(self, other):
"""Adds the two vectors together and returns the result."""
return type(self)(x=self.x + other.x,
y=self.y + other.y,
z=self.z + other.z)
def __sub__(self, other):
"""Subtracts the other vector from self and returns the result."""
return type(self)(x=self.x - other.x,
y=self.y - other.y,
z=self.z - other.z)
def as_numpy_array(self):
"""Retrieves the 3D vector as a numpy array."""
return np.array([self.x, self.y, self.z])
def as_carla_vector(self):
"""Retrieves the 3D vector as an instance of CARLA 3D vector.
Returns:
carla.Vector3D: Instance representing the 3D vector.
"""
import carla
return carla.Vector3D(self.x, self.y, self.z)
def magnitude(self):
"""Returns the magnitude of the 3D vector."""
return np.linalg.norm(self.as_numpy_array())
def to_camera_view(self, extrinsic_matrix, intrinsic_matrix):
"""Converts the given 3D vector to the view of the camera using
the extrinsic and the intrinsic matrix.
Args:
extrinsic_matrix: The extrinsic matrix of the camera.
intrinsic_matrix: The intrinsic matrix of the camera.
Returns:
An instance with the coordinates converted to the camera view.
"""
position_vector = np.array([[self.x], [self.y], [self.z], [1.0]])
# Transform the points to the camera in 3D.
transformed_3D_pos = np.dot(np.linalg.inv(extrinsic_matrix),
position_vector)
# Transform the points to 2D.
position_2D = np.dot(intrinsic_matrix, transformed_3D_pos[:3])
# Normalize the 2D points.
location_2D = type(self)(float(position_2D[0] / position_2D[2]),
float(position_2D[1] / position_2D[2]),
float(position_2D[2]))
return location_2D
def rotate(self, angle):
""" Rotate the vector by a given angle.
Args:
angle (float): The angle to rotate the Vector by. (in degrees)
Returns:
An instance with the coordinates of the rotated vector.
"""
x_ = math.cos(math.radians(angle)) * self.x - math.sin(
math.radians(angle)) * self.y
y_ = math.sin(math.radians(angle)) * self.x - math.cos(
math.radians(angle)) * self.y
return type(self)(x_, y_, self.z)
def __repr__(self):
return self.__str__()
def __str__(self):
return 'Vector3D(x={}, y={}, z={})'.format(self.x, self.y, self.z)
class Vector2D(object):
"""Represents a 2D vector and provides helper functions."""
def __init__(self, x, y):
self.x = x
self.y = y
def get_angle(self, other):
"""Computes the angle between the vector and another vector."""
angle = math.atan2(self.y, self.x) - math.atan2(other.y, other.x)
if angle > math.pi:
angle -= 2 * math.pi
elif angle < -math.pi:
angle += 2 * math.pi
return angle
def get_vector_and_magnitude(self, other):
"""Calculates vector and magnitude between two vectors.
Args:
other (:py:class:`.Vector2D`): The other vector to be used to
calculate.
Returns:
:py:class:`.Vector2D`, :obj:`float`: A tuple comprising of a 2D
vector and its magnitude.
"""
vec = np.array([self.x - other.x, self.y - other.y])
magnitude = np.linalg.norm(vec)
if magnitude > 0.00001:
vec = vec / magnitude
return Vector2D(vec[0], vec[1]), magnitude
def l1_distance(self, other):
"""Calculates the L1 distance between the given point and the other
point.
Args:
other (:py:class:`~.Vector2D`): The other vector used to
calculate the L1 distance to.
Returns:
:obj:`float`: The L1 distance between the two points.
"""
return abs(self.x - other.x) + abs(self.y - other.y)
def l2_distance(self, other):
vec = np.array([self.x - other.x, self.y - other.y])
return np.linalg.norm(vec)
def __add__(self, other):
"""Adds the two vectors together and returns the result. """
return type(self)(x=self.x + other.x, y=self.y + other.y)
def __sub__(self, other):
"""Subtracts the other vector from self and returns the result. """
return type(self)(x=self.x - other.x, y=self.y - other.y)
def __repr__(self):
return self.__str__()
def __str__(self):
return 'Vector2D(x={}, y={})'.format(self.x, self.y)
class Location(Vector3D):
"""Stores a 3D location, and provides useful helper methods.
Args:
x: The value of the x-axis.
y: The value of the y-axis.
z: The value of the z-axis.
Attributes:
x: The value of the x-axis.
y: The value of the y-axis.
z: The value of the z-axis.
"""
def __init__(self, x=0, y=0, z=0):
super(Location, self).__init__(x, y, z)
@classmethod
def from_carla_location(cls, location):
"""Creates a pylot location from a CARLA location.
Args:
location (carla.Location): An instance of a CARLA location.
Returns:
:py:class:`.Location`: A pylot location.
"""
import carla
if not isinstance(location, carla.Location):
raise ValueError('The location must be a carla.Location')
return cls(location.x, location.y, location.z)
def distance(self, other):
"""Calculates the Euclidean distance between the given point and the
other point.
Args:
other (:py:class:`~.Location`): The other location used to
calculate the Euclidean distance to.
Returns:
:obj:`float`: The Euclidean distance between the two points.
"""
return (self - other).magnitude()
def l1_distance(self, other):
"""Calculates the L1 distance between the given point and the other
point.
Args:
other (:py:class:`~.Location`): The other location used to
calculate the L1 distance to.
Returns:
:obj:`float`: The L1 distance between the two points.
"""
return abs(self.x - other.x) + abs(self.y - other.y) + abs(self.z -
other.z)
def get_vector_and_magnitude(self, other):
"""Calculates vector and magnitude between two locations.
Args:
other (:py:class:`~.Location`): The other location to used to
calculate.
Returns:
:py:class:`.Vector2D`, :obj:`float`: A tuple comprising of a 2D
vector and its magnitude.
"""
vec = Vector2D(self.x, self.y)
other_vec = Vector2D(other.x, other.y)
return vec.get_vector_and_magnitude(other_vec)
def as_carla_location(self):
"""Retrieves the location as a carla location instance.
Returns:
carla.Location: Instance representing the location.
"""
import carla
return carla.Location(self.x, self.y, self.z)
def __repr__(self):
return self.__str__()
def __str__(self):
return 'Location(x={}, y={}, z={})'.format(self.x, self.y, self.z)
class Transform(object):
"""A class that stores the location and rotation of an obstacle.
It can be created from a carla.Transform, defines helper functions needed
in Pylot, and makes the carla.Transform serializable.
A transform object is instantiated with either a location and a rotation,
or using a matrix.
Args:
location (:py:class:`.Location`, optional): The location of the object
represented by the transform.
rotation (:py:class:`.Rotation`, optional): The rotation (in degreers)
of the object represented by the transform.
matrix: The transformation matrix used to convert points in the 3D
coordinate space with respect to the location and rotation of the
given object.
Attributes:
location (:py:class:`.Location`): The location of the object
represented by the transform.
rotation (:py:class:`.Rotation`): The rotation (in degreers) of the
object represented by the transform.
forward_vector (:py:class:`.Vector3D`): The forward vector of the
object represented by the transform.
matrix: The transformation matrix used to convert points in the 3D
coordinate space with respect to the location and rotation of the
given object.
"""
def __init__(self, location=None, rotation=None, matrix=None):
if matrix is not None:
self.matrix = matrix
self.location = Location(matrix[0, 3], matrix[1, 3], matrix[2, 3])
# Forward vector is retrieved from the matrix.
self.forward_vector = Vector3D(self.matrix[0, 0],
self.matrix[1, 0], self.matrix[2,
0])
pitch_r = math.asin(self.forward_vector.z)
yaw_r = math.acos(
np.clip(self.forward_vector.x / math.cos(pitch_r), -1, 1))
roll_r = math.asin(matrix[2, 1] / (-1 * math.cos(pitch_r)))
self.rotation = Rotation(math.degrees(pitch_r),
math.degrees(yaw_r), math.degrees(roll_r))
else:
self.location, self.rotation = location, rotation
self.matrix = Transform._create_matrix(self.location,
self.rotation)
# Forward vector is retrieved from the matrix.
self.forward_vector = Vector3D(self.matrix[0, 0],
self.matrix[1, 0], self.matrix[2,
0])
@classmethod
def from_carla_transform(cls, transform):
"""Creates a pylot transform from a carla transform.
Args:
transform (carla.Transform): Carla transform.
Returns:
:py:class:`.Transform`: An instance of a pylot transform.
"""
import carla
if not isinstance(transform, carla.Transform):
raise ValueError('transform should be of type carla.Transform')
return cls(Location.from_carla_location(transform.location),
Rotation.from_carla_rotation(transform.rotation))
@staticmethod
def _create_matrix(location, rotation):
"""Creates a transformation matrix to convert points in the 3D world
coordinate space with respect to the object.
Use the transform_points function to transpose a given set of points
with respect to the object.
Args:
location (:py:class:`.Location`): The location of the object
represented by the transform.
rotation (:py:class:`.Rotation`): The rotation of the object
represented by the transform.
Returns:
A 4x4 numpy matrix which represents the transformation matrix.
"""
matrix = np.identity(4)
cy = math.cos(np.radians(rotation.yaw))
sy = math.sin(np.radians(rotation.yaw))
cr = math.cos(np.radians(rotation.roll))
sr = math.sin(np.radians(rotation.roll))
cp = math.cos(np.radians(rotation.pitch))
sp = math.sin(np.radians(rotation.pitch))
matrix[0, 3] = location.x
matrix[1, 3] = location.y
matrix[2, 3] = location.z
matrix[0, 0] = (cp * cy)
matrix[0, 1] = (cy * sp * sr - sy * cr)
matrix[0, 2] = -1 * (cy * sp * cr + sy * sr)
matrix[1, 0] = (sy * cp)
matrix[1, 1] = (sy * sp * sr + cy * cr)
matrix[1, 2] = (cy * sr - sy * sp * cr)
matrix[2, 0] = (sp)
matrix[2, 1] = -1 * (cp * sr)
matrix[2, 2] = (cp * cr)
return matrix
def __transform(self, points, matrix):
"""Internal function to transform the points according to the
given matrix. This function either converts the points from
coordinate space relative to the transform to the world coordinate
space (using self.matrix), or from world coordinate space to the
space relative to the transform (using inv(self.matrix))
Args:
points: An n by 3 numpy array, where each row is the
(x, y, z) coordinates of a point.
matrix: The matrix of the transformation to apply.
Returns:
An n by 3 numpy array of transformed points.
"""
# Needed format: [[X0,..Xn],[Y0,..Yn],[Z0,..Zn]].
# So let's transpose the point matrix.
points = points.transpose()
# Add 1s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[1,..1]]
points = np.append(points, np.ones((1, points.shape[1])), axis=0)
# Point transformation (depends on the given matrix)
points = np.dot(matrix, points)
# Get all but the last row in array form.
points = np.asarray(points[0:3].transpose())
return points
def transform_points(self, points):
"""Transforms the given set of points (specified in the coordinate
space of the current transform) to be in the world coordinate space.
For example, if the transform is at location (3, 0, 0) and the
location passed to the argument is (10, 0, 0), this function will
return (13, 0, 0) i.e. the location of the argument in the world
coordinate space.
Args:
points: A (number of points) by 3 numpy array, where each row is
the (x, y, z) coordinates of a point.
Returns:
An n by 3 numpy array of transformed points.
"""
return self.__transform(points, self.matrix)
def inverse_transform_points(self, points):
"""Transforms the given set of points (specified in world coordinate
space) to be relative to the given transform.
For example, if the transform is at location (3, 0, 0) and the location
passed to the argument is (10, 0, 0), this function will return
(7, 0, 0) i.e. the location of the argument relative to the given
transform.
Args:
points: A (number of points) by 3 numpy array, where each row is
the (x, y, z) coordinates of a point.
Returns:
An n by 3 numpy array of transformed points.
"""
return self.__transform(points, np.linalg.inv(self.matrix))
def transform_locations(self, locations):
"""Transforms the given set of locations (specified in the coordinate
space of the current transform) to be in the world coordinate space.
This method has the same functionality as transform_points, and
is provided for convenience; when dealing with a large number of
points, it is advised to use transform_points to avoid the slow
conversion between a numpy array and list of locations.
Args:
points (list(:py:class:`.Location`)): List of locations.
Returns:
list(:py:class:`.Location`): List of transformed points.
"""
points = np.array([loc.as_numpy_array() for loc in locations])
transformed_points = self.__transform(points, self.matrix)
return [Location(x, y, z) for x, y, z in transformed_points]
def inverse_transform_locations(self, locations):
"""Transforms the given set of locations (specified in world coordinate
space) to be relative to the given transform.
This method has the same functionality as inverse_transform_points,
and is provided for convenience; when dealing with a large number of
points, it is advised to use inverse_transform_points to avoid the slow
conversion between a numpy array and list of locations.
Args:
points (list(:py:class:`.Location`)): List of locations.
Returns:
list(:py:class:`.Location`): List of transformed points.
"""
points = np.array([loc.as_numpy_array() for loc in locations])
transformed_points = self.__transform(points,
np.linalg.inv(self.matrix))
return [Location(x, y, z) for x, y, z in transformed_points]
def as_carla_transform(self):
"""Converts the transform to a carla transform.
Returns:
carla.Transform: Instance representing the current Transform.
"""
import carla
return carla.Transform(
carla.Location(self.location.x, self.location.y, self.location.z),
carla.Rotation(pitch=self.rotation.pitch,
yaw=self.rotation.yaw,
roll=self.rotation.roll))
def get_vector_magnitude_angle(self, target_loc):
"""Computes distance and relative angle between the transform and a
target location.
Args:
target_loc (:py:class:`.Location`): Location of the target.
Returns:
Tuple of distance to the target and the angle
"""
target_vec, magnitude = target_loc.get_vector_and_magnitude(
self.location)
if magnitude > 0:
forward_vector = Vector2D(
math.cos(math.radians(self.rotation.yaw)),
math.sin(math.radians(self.rotation.yaw)))
angle = target_vec.get_angle(forward_vector)
else:
angle = 0
return (target_vec, magnitude, angle)
def is_within_distance_ahead(self, dst_loc, max_distance):
"""Checks if a location is within a distance.
Args:
dst_loc (:py:class:`.Location`): Location to compute distance to.
max_distance (:obj:`float`): Maximum allowed distance.
Returns:
bool: True if other location is within max_distance.
"""
_, norm_dst, d_angle = self.get_vector_magnitude_angle(dst_loc)
# Return if the vector is too small.
if norm_dst < 0.001:
return True
# Return if the vector is greater than the distance.
if norm_dst > max_distance:
return False
return d_angle < 90.0
def __mul__(self, other):
new_matrix = np.dot(self.matrix, other.matrix)
return Transform(matrix=new_matrix)
def __repr__(self):
return self.__str__()
def __str__(self):
if self.location:
return "Transform(location: {}, rotation: {})".format(
self.location, self.rotation)
else:
return "Transform({})".format(str(self.matrix))
class Pose(object):
"""Class used to wrap ego-vehicle information.
Args:
transform (:py:class:`~pylot.utils.Transform`): Transform of the ego
vehicle.
forward_speed (:obj:`int`): Forward speed in m/s.
velocity_vector (:py:class:`~pylot.utils.Vector3D`): Velocity vector
in world frame
Attributes:
transform (:py:class:`~pylot.utils.Transform`): Transform of the ego
vehicle.
forward_speed (:obj:`int`): Forward speed in m/s.
velocity_vector (:py:class:`~pylot.utils.Vector3D`): Velocity vector
in world frame
"""
def __init__(self, transform, forward_speed, velocity_vector=None):
if not isinstance(transform, Transform):
raise ValueError(
'transform should be of type pylot.utils.Transform')
self.transform = transform
# Forward speed in m/s.
self.forward_speed = forward_speed
self.velocity_vector = velocity_vector
def __repr__(self):
return self.__str__()
def __str__(self):
return "Pose(transform: {}, forward speed: {}, velocity vector: {})"\
.format(self.transform, self.forward_speed, self.velocity_vector)
class LaneMarkingColor(Enum):
""" Enum that defines the lane marking colors according to OpenDrive 1.4.
The goal of this enum is to make sure that lane colors are correctly
propogated from Carla to Pylot.
"""
WHITE = 0
BLUE = 1
GREEN = 2
RED = 3
YELLOW = 4
OTHER = 5
class LaneMarkingType(Enum):
""" Enum that defines the lane marking types according to OpenDrive 1.4.
The goal of this enum is to make sure that lane markings are correctly
propogated from Carla to Pylot.
"""
OTHER = 0
BROKEN = 1
SOLID = 2
SOLIDSOLID = 3
SOLIDBROKEN = 4
BROKENSOLID = 5
BROKENBROKEN = 6
BOTTSDOTS = 7
GRASS = 8
CURB = 9
NONE = 10
class LaneChange(Enum):
""" Enum that defines the permission to turn either left, right, both or
none for a given lane.
The goal of this enum is to make sure that the lane change types are
correctly propogated from Carla to Pylot.
"""
NONE = 0
RIGHT = 1
LEFT = 2
BOTH = 3
class LaneType(Enum):
""" Enum that defines the type of the lane according to OpenDrive 1.4.
The goal of this enum is to make sure that the lane change types are
correctly propogated from Carla to Pylot.
"""
NONE = 1
DRIVING = 2
STOP = 4
SHOULDER = 8
BIKING = 16
SIDEWALK = 32
BORDER = 64
RESTRICTED = 128
PARKING = 256
BIDIRECTIONAL = 512
MEDIAN = 1024
SPECIAL1 = 2048
SPECIAL2 = 4096
SPECIAL3 = 8192
ROADWORKS = 16384
TRAM = 32768
RAIL = 65536
ENTRY = 131072
EXIT = 262144
OFFRAMP = 524288
ONRAMP = 1048576
ANY = 4294967294
class LaneMarking(object):
""" Used to represent a lane marking.
Args:
marking_color (:py:class:`carla.LaneMarkingColor`): The color of the
lane marking.
marking_type (:py:class:`carla.LaneMarkingType`): The type of the lane
marking.
lane_change (:py:class:`carla.LaneChange`): The type that defines the
permission to either turn left, right, both or none.
Attributes:
marking_color (:py:class:`.LaneMarkingColor`): The color of the lane
marking
marking_type (:py:class:`.LaneMarkingType`): The type of the lane
marking.
lane_change (:py:class:`.LaneChange`): The type that defines the
permission to either turn left, right, both or none.
"""
def __init__(self, marking_color, marking_type, lane_change):
self.marking_color = LaneMarkingColor(marking_color)
self.marking_type = LaneMarkingType(marking_type)
self.lane_change = LaneChange(lane_change)
@classmethod
def from_carla_lane_marking(cls, lane_marking):
"""Creates a pylot LaneMarking from a CARLA lane marking.
Args:
lane_marking (:py:class:`carla.LaneMarking`): An instance of a
CARLA lane marking.
Returns:
:py:class:`.LaneMarking`: A pylot lane-marking.
"""
return cls(lane_marking.color, lane_marking.type,
lane_marking.lane_change)
def __repr__(self):
return self.__str__()
def __str__(self):
return "LaneMarking(color: {}, type: {}, change: {})".format(
self.marking_color, self.marking_type, self.lane_change)
def add_timestamp(image_np, timestamp):
"""Adds a timestamp text to an image np array.
Args:
image_np: A numpy array of the image.
timestamp (:obj:`int`): The timestamp of the image.
"""
import cv2
txt_font = cv2.FONT_HERSHEY_SIMPLEX
timestamp_txt = '{}'.format(timestamp)
# Put timestamp text.
cv2.putText(image_np,
timestamp_txt, (5, 15),
txt_font,
0.5, (0, 0, 0),
thickness=1,
lineType=cv2.LINE_AA)
def get_top_down_transform(transform, top_down_lateral_view):
# Height calculation relies on the fact that the camera's FOV is 90.
top_down_location = (transform.location +
Location(0, 0, top_down_lateral_view))
return Transform(top_down_location, Rotation(-90, 0, 0))
def time_epoch_ms():
"""Get current time in milliseconds."""
return int(time.time() * 1000)
def set_tf_loglevel(level):
"""To be used to suppress TensorFlow logging."""
import logging
import os
if level >= logging.FATAL:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if level >= logging.ERROR:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
if level >= logging.WARNING:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
logging.getLogger('tensorflow').setLevel(level)
def create_pygame_display(width, height):
global PYGAME_DISPLAY
import pygame
PYGAME_DISPLAY = pygame.display.set_mode((width, height))
|
import datetime
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class Question(models.Model):
question_text = models.CharField(max_length=500)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
def __str__(self):
return self.choice_text
class Answer(models.Model):
class Importance(models.IntegerChoices):
NOT_IMPORTANT = 0, _('doesn\'t matter at all')
SLIGHTLY = 1, _('a little')
MEDIUM = 50, _('average')
VERY_IMPORTANT = 250, _('very important')
MANDATORY = 300, _('mandatory')
user = models.ForeignKey(User, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
answer_self = models.ForeignKey(Choice, on_delete=models.CASCADE, null=True)
answer_other = models.ManyToManyField(Choice, related_name='answer_other')
importance = models.IntegerField(choices=Importance.choices, default=50)
public_self = models.BooleanField(default=False)
public_other = models.BooleanField(default=False)
answer_date = models.DateTimeField('date answered', default=timezone.now)
def __str__(self):
return f'{self.user.username}: {self.question.question_text}'
class Matching(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
other_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='other_user')
forward_score = models.DecimalField(default=0, decimal_places=1, max_digits=5)
backward_score = models.DecimalField(default=0, decimal_places=1, max_digits=5)
combined_score = models.DecimalField(default=0, decimal_places=1, max_digits=5)
|
class RevStr(str):
def __iter__(self):
return ItRevStr(self)
class ItRevStr:
def __init__(self, chaine_a_parcourir):
self.chaine_a_parcourir=chaine_a_parcourir
self.position=len(self.chaine_a_parcourir)
def __next__(self):
if self.position==0:
raise StopIteration
self.position-=1
return self.chaine_a_parcourir[self.position]
|
import json
from gzip import (
compress,
decompress
)
import numpy as np
from slovnet.record import Record
from slovnet.tar import Tar, DumpTar
from slovnet.vocab import Vocab
PROTOCOL = 1
META = 'meta.json'
MODEL = 'model.json'
class Meta(Record):
__attributes__ = ['id', 'protocol']
def __init__(self, id, protocol=PROTOCOL):
self.id = id
self.protocol = protocol
def check_protocol(self):
if self.protocol != PROTOCOL:
raise ValueError('Expected protocol=%r, got %r' % (PROTOCOL, self.protocol))
#######
#
# ARRAY
#
#######
def array_name(id):
return 'arrays/%d.bin' % id
def array_bytes(array):
return array.tobytes()
def bytes_array(bytes, shape, dtype):
return np.frombuffer(bytes, dtype).reshape(shape)
######
#
# VOCAB
#
#######
def vocab_name(id):
return 'vocabs/%s.gz' % id
def vocab_bytes(vocab):
content = '\n'.join(vocab.items)
bytes = content.encode('utf8')
return compress(bytes)
def bytes_vocab(bytes):
content = decompress(bytes).decode('utf8')
items = content.splitlines()
return Vocab(items)
######
#
# PACK
#
########
def json_bytes(data):
content = json.dumps(data, ensure_ascii=False, indent=2)
return content.encode('utf8')
def bytes_json(bytes):
return json.loads(bytes.decode('utf8'))
class Pack(Tar):
def load_record(self, name, Record):
bytes = self.read(name)
data = bytes_json(bytes)
return Record.from_json(data)
def load_meta(self):
return self.load_record(META, Meta)
def load_model(self, Model):
return self.load_record(MODEL, Model)
def load_arrays(self, weights):
for weight in weights:
if not weight.is_id:
continue
shape, dtype, id = weight
name = array_name(id)
bytes = self.read(name)
yield id, bytes_array(bytes, shape, dtype)
def load_vocab(self, id):
name = vocab_name(id)
bytes = self.read(name)
return bytes_vocab(bytes)
class DumpPack(DumpTar):
def dump_record(self, record, name):
bytes = json_bytes(record.as_json)
self.write(bytes, name)
def dump_meta(self, meta):
self.dump_record(meta, META)
def dump_model(self, model):
self.dump_record(model, MODEL)
def dump_arrays(self, arrays):
for id, array in arrays.items():
name = array_name(id)
bytes = array_bytes(array)
self.write(bytes, name)
def dump_vocab(self, vocab, id):
name = vocab_name(id)
bytes = vocab_bytes(vocab)
self.write(bytes, name)
|
import os
from datetime import datetime, timedelta
def create_folder_if_needed(path):
if not os.path.exists(path):
os.makedirs(path)
def format_time(hour: int, minute: int) -> str:
"""Turns hours and minutes to a string with the format 'HH:MM'. Assumes 24h clock"""
return f"{str(hour).rjust(2, '0')}:{str(minute).rjust(2, '0')}"
def time_now_with_tz(tz):
"""Timezone aware clock"""
assert tz is not None
now = datetime.utcnow() + timedelta(hours=tz)
return format_time(now.hour, now.minute)
def offset_format(utc_offset):
"""Display + or - in front of UTC offset number"""
return str(utc_offset) if utc_offset < 0 else f"+{str(utc_offset)}"
|
from codecs import open
from setuptools import setup, find_packages
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='blog.kottenator.com',
version='0.5.0.dev1',
description='Super simple blog engine',
long_description=long_description,
url='https://github.com/kottenator/blog.kottenator.com',
author='Rostyslav Bryzgunov',
author_email='kottenator@gmail.com',
license='MIT',
packages=find_packages('src'),
package_dir={'': 'src'},
scripts=['bin/manage.py'],
install_requires=[
'Django~=1.10.0',
'Pillow~=3.4',
'settings-overrider~=0.5',
'django-compressor~=2.0',
'django-compressor-toolkit~=0.5'
],
extras_require={
'dev': ['check-manifest'],
'docs': ['Sphinx'],
'test': [
'pytest~=3.0',
'pytest-django~=3.0',
'pytest-cov~=2.4'
]
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'
]
)
|
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
# Imports from this application
from app import app
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## 🤑 How much could you make?
Imagine you work as a delivery driver for Domino's Pizza.
(I know -- the best job ever!) You are paid a direct wage,
but most of your earnings are received in tips.
Does your income depend solely on your excellent service and
the generosity of your customers? Or can we optimize
your earnings by planning out your work schedule?
You can use this interactive app to predict your daily take
home tips. Then once you're done, you'll be ready to join
the team!
"""
),
dcc.Link(dbc.Button("Let's try!", color='primary'), href='/predictions', className='mb-2'),
dcc.Markdown(" "),
dcc.Markdown(
"""
>> Ready to apply? »
[jobs.Dominos](https://jobs.dominos.com/dominos-careers/)
"""
)
],
md=6,
)
gapminder = px.data.gapminder()
fig = px.scatter(gapminder.query("year==2007"), x="gdpPercap", y="lifeExp", size="pop", color="continent",
hover_name="country", log_x=True, size_max=60)
column2 = dbc.Col(
[
#dcc.Graph(figure=fig),
html.Img(src='assets/domino.jpg', className='img-fluid', height="500", width="300"),
]
)
layout = dbc.Row([column1, column2])
|
PRM_Header = [
'Mass [m/z]', # MS1 m/z
'Formula [M]',
'Formula type',
'Species',
'CS [z]', # Integer
'Polarity', # "Positive"
'Start [min]',
'End [min]',
'(N)CE',
'(N)CE type',
'MSX ID',
'Comment',
]
|
import utils
utils.prepare_test_resources()
|
def format(number, total=7, decimal=4):
return "{{: {0}.{1}f}}".format(total, decimal).format(number)
def formatInt(number, spaces=4):
return '{{:{0}d}}'.format(spaces).format(number)
import math
s = 1/8
tot = 0
steps = []
while tot <=1:
steps.append(tot)
tot += s
size = 10
for step in steps:
x = size * math.cos(step * 2 * math.pi)
y = 2
z = -size * math.sin(step * 2 * math.pi)
print(f"{{x: {format(x)}, y: {format(y)}, z: {format(z)}}},")
|
__all__ = ["network_common",
"network_tests"
]
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from sklearn.metrics import confusion_matrix, classification_report
import os
cwd = os.path.dirname(os.path.realpath(__file__))
finalAudioPath = os.path.join(cwd + '\\finalAudio', 'audio.csv')
def trainData(modelPath=os.path.join(cwd + '\\savedModel', 'model.h5')):
"""
This function will make a model of the data that has been preprocessed.
"""
df = pd.read_pickle(finalAudioPath)
x = df["feature"].values
x = np.concatenate(x, axis=0).reshape(len(x), 40)
y = np.array(df["classLabel"].tolist())
y = to_categorical(y)
xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size=0.2,
random_state=42)
model = Sequential([
Dense(256, input_shape=xTrain[0].shape),
Activation('relu'),
Dropout(0.5),
Dense(256),
Activation('relu'),
Dropout(0.5),
Dense(2, activation='softmax')
])
print(model.summary())
model.compile(
loss="categorical_crossentropy",
optimizer='adam',
metrics=['accuracy']
)
print("Model Score: \n")
model.fit(xTrain, yTrain, epochs=1000)
model.save(modelPath)
score = model.evaluate(xTest, yTest)
print(score)
print("Model Classification Report: \n")
yPred = np.argmax(model.predict(xTest), axis=1)
print(confusion_matrix(np.argmax(yTest, axis=1), yPred))
print(classification_report(np.argmax(yTest, axis=1), yPred))
|
from webbot import Browser
from baixar import baixar
import time
# Ativo o navegador e entro no site do detran
web = Browser()
web.go_to('https://acesso.detran.mg.gov.br/veiculos/leiloes/editais')
configuracao = input("Configurou? S/n: ")
i = 0
parar = False # Variavel de Parada do while
texto_array = []
print("\n---------- Sistema de Downloads de Tabela de veiculos -------------\n")
print("Processando quantidade de leiloes.....")
bd_principal = web.find_elements(
tag='div', classname='pd-subcategory') # Carregar divs
print("..... Procesamento Finalizado com Sucesso\n")
contador = len(bd_principal)
print('O numero de leiloes: {}\n'.format(contador))
contador_aux = contador - 1
for y in range(contador_aux):
texto = bd_principal[y].text
# INICIO - Tirada do texto de small
x = texto.split()
x.pop()
texto = ' '.join(x)
# FIM - Tirada do texto de small
texto_array.append(texto)
print('----------------------- Iniciando Sistema ---------------------------\n')
while(parar == False):
web.click(texto_array[i], tag='a')
baixar(web)
web.click('Editais de Leilões', tag='a')
################## Condição de parada ############################
if texto_array[i].find('/2014') == -1:
i += 1
print(i)
else:
# Fechar Guia
# web.close_current_tab()
parar = True # Parar while
|
from .mobilenet_pretrained import mobilenet_v2
from .resnet_pretrained import resnet18, resnet34, resnet50, resnet101, resnet152, resnext50_32x4d, resnext101_32x8d
from .squeezenet_pretrained import squeezenet1_0, squeezenet1_1
|
"""
Semi-Quantum Conference Key Agreement (SQCKA)
Author:
- Ruben Andre Barreiro (r.barreiro@campus.fct.unl.pt)
Supervisors:
- Andre Nuno Souto (ansouto@fc.ul.pt)
- Antonio Maria Ravara (aravara@fct.unl.pt)
Acknowledgments:
- Paulo Alexandre Mateus (pmat@math.ist.utl.pt)
"""
# Class of Utilities
class Utilities:
# Compute the Hamming Weight of a given Binary String
@staticmethod
def compute_hamming_weight(binary_string):
# Initialise the Hamming Weight
hamming_weight = 0
# For each bit (binary digit)
for current_bit in range(len(binary_string)):
# If the current bit (binary digit) is set to 1
if binary_string[current_bit] == "1":
# Increase the Hamming Weight
hamming_weight += 1
# Return the computed Hamming Weight
return hamming_weight
|
from astropy.table import QTable, join
from collections import defaultdict
from DRE.misc.read_catalog import cat_to_table
import os
class Summary:
def __init__(self, name):
self.name = name
self.parameters = defaultdict(list)
self.row_idx = 0
def append(self, params):
self.parameters['ROW'].append(self.row_idx)
self.row_idx += 1
for key, value in params.items():
self.parameters[key].append(value)
def save(self, save_dir='Summary', catalogs_dir='Sextracted'):
os.makedirs(save_dir, exist_ok=True)
table = QTable(self.parameters)
if table:
if os.path.isdir(os.path.join(catalogs_dir)):
if os.path.isdir(os.path.join(catalogs_dir, self.name)):
cat_file = os.path.join(catalogs_dir, self.name, f"{self.name}_cat.fits")
else:
cat_file = os.path.join(catalogs_dir, f"{self.name}_cat.fits")
table = join(table, cat_to_table(cat_file), join_type='inner')
if 'VIGNET' in table.colnames:
table.remove_column('VIGNET')
else:
print(f"Warning: Can't find catalogs in {catalogs_dir}")
table.write(os.path.join(save_dir, f"{self.name}_dre.fits"), overwrite=True)
|
from future.utils import python_2_unicode_compatible
@python_2_unicode_compatible
class LazyStr:
def __init__(self, fn):
self.fn = fn
def __str__(self):
return self.fn()
def parse_list(s, sep=','):
s = s.strip()
if not s:
return []
return [item.strip() for item in s.split(sep)]
|
"""This module contains `docker image rm` class"""
from docker.errors import APIError
from .command import Command
class Rm(Command):
"""This class implements `docker image rm` command"""
name = "image rm"
require = []
def __init__(self):
Command.__init__(self)
self.settings[self.name] = None
def eval_command(self, args):
try:
Images = []
images = args['images']
del args['images']
for Image in images:
Images.append(Image)
args['image'] = Image
self.client.remove_image(**args)
del args['image']
self.settings[self.name] = '\n'.join(Images)
except APIError as e:
raise e
def final(self):
return self.settings[self.name]
|
from datetime import datetime
from django.core.management.base import BaseCommand
from plugins.polio.models import Campaign, Preparedness
from plugins.polio.preparedness.calculator import get_preparedness_score
from plugins.polio.preparedness.parser import (
get_national_level_preparedness,
get_regional_level_preparedness,
open_sheet_by_url,
)
from logging import getLogger
logger = getLogger(__name__)
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
started_at = datetime.now()
campaigns_with_spreadsheet = Campaign.objects.only("id", "preperadness_spreadsheet_url").filter(
preperadness_spreadsheet_url__isnull=False
)
campaigns_with_spreadsheet.update(preperadness_sync_status="QUEUED")
logger.info(campaigns_with_spreadsheet)
for campaign in campaigns_with_spreadsheet:
campaign.preperadness_sync_status = "ONGOING"
campaign.save()
print(f"Campaign {campaign.pk} refresh started")
try:
sheet = open_sheet_by_url(campaign.preperadness_spreadsheet_url)
preparedness_data = {
"national": get_national_level_preparedness(sheet),
**get_regional_level_preparedness(sheet),
}
preparedness_data["totals"] = get_preparedness_score(preparedness_data)
preparedness = Preparedness.objects.create(
campaign=campaign,
spreadsheet_url=campaign.preperadness_spreadsheet_url,
national_score=preparedness_data["totals"]["national_score"],
district_score=preparedness_data["totals"]["district_score"],
regional_score=preparedness_data["totals"]["regional_score"],
payload=preparedness_data,
)
print(f"Campaign {campaign.pk} refreshed")
print(preparedness)
campaign.preperadness_sync_status = "FINISHED"
campaign.save()
except Exception as e:
logger.error(f"Campaign {campaign.pk} refresh failed")
logger.exception(e)
campaign.preperadness_sync_status = "FAILURE"
campaign.save()
finished_at = datetime.now()
print(
f"""
Started at: {started_at}
Finished at: {finished_at}
Duration in seconds: {(finished_at - started_at).total_seconds()}
"""
)
|
# -*- coding: utf-8 -*-
"""
biothings_explorer.dispatcher
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains code that biothings_explorer use to communicate to and receive from APIs. It serves as a glue between "apicall" module and "api_output_parser" module.
"""
from .json_transformer import Transformer
class OutputParser():
def __init__(self, res, mapping, batch_mode=False, api=None):
self.api = api
self.response = res
self.mapping = mapping
self.batch_mode = batch_mode
self.BIOTHINGS = ['mygene.info', 'myvariant.info',
'mychem.info', 'mydisease.info',
'semmeddisease', 'semmedanatomy',
'semmedbp', 'semmedchemical',
'semmedgene', 'semmedphenotype', 'bp',
'cc', 'mf', 'pathway', 'umlschem']
def parse_biothings_get_res(self):
"""Parse the API response from biothings API using GET method"""
if self.response['total'] == 0:
return None
else:
new_res = {}
for _res in self.response['hits']:
transformed_json = Transformer(_res, self.mapping).transform()
if type(transformed_json) == dict:
for k, v in transformed_json.items():
if k in ["@context", "@type"]:
new_res[k] = v
else:
if k not in new_res:
new_res[k] = []
if type(v) == list:
new_res[k] += v
else:
new_res[k].append(v)
else:
continue
return new_res
def parse_biothings_post_res(self):
"""Parse the API response from biothings API using POST method"""
new_res = {}
for _res in self.response:
if type(_res) != dict:
continue
# handle case where the queried item is not found
elif _res.get('notfound'):
# check if the item is already in final res
if _res['query'] in new_res:
continue
else:
new_res[_res['query']] = {}
else:
transformed_json = Transformer(_res, self.mapping).transform()
if _res['query'] not in new_res:
new_res[_res['query']] = transformed_json
else:
if type(transformed_json) == dict:
for k, v in transformed_json.items():
if k in ["@context", "@type"]:
new_res[_res['query']][k] = v
else:
if k not in new_res[_res['query']]:
new_res[_res['query']][k] = []
if type(v) == list:
new_res[_res['query']][k] += v
else:
new_res[_res['query']][k].append(v)
return dict(new_res)
def parse(self):
if not self.response:
return None
# parse the results from BioThings APIs
if self.api in self.BIOTHINGS:
if self.batch_mode:
return self.parse_biothings_post_res()
else:
return self.parse_biothings_get_res()
# parse the results from non-BioThings APIs
else:
return Transformer(self.response, self.mapping).transform()
|
import logging
from rest_framework import serializers
from helium.feed.models import ExternalCalendar
from helium.feed.services import icalexternalcalendarservice
from helium.feed.services.icalexternalcalendarservice import HeliumICalError
__author__ = "Alex Laird"
__copyright__ = "Copyright 2021, Helium Edu"
__version__ = "1.4.46"
logger = logging.getLogger(__name__)
class ExternalCalendarSerializer(serializers.ModelSerializer):
class Meta:
model = ExternalCalendar
fields = ('id', 'title', 'url', 'color', 'shown_on_calendar', 'user',)
read_only_fields = ('user',)
def validate(self, attrs):
"""
Ensure a valid ICAL URL is given. If not, disable the calendar.
:param attrs: the data to be saved
:return: the validated data
"""
url = attrs.get('url', None)
if not url and self.instance:
url = self.instance.url
if url and (not self.instance or (self.instance and url != self.instance.url)):
try:
icalexternalcalendarservice.validate_url(url)
except HeliumICalError:
logger.info(f"Unable to validate external ICAL URL {url}, so disabling the calendar.")
if self.instance:
self.instance.shown_on_calendar = False
self.instance.save()
attrs['shown_on_calendar'] = False
return attrs
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Pranay S. Yadav
"""
from hypno import read_raw_hypnogram, load_hypnogram, resample_hypnogram
from cycle_detection import detect_cycles, update_hypnogram_cycles
from visualize import plot_hypnogram, save_hypnogram_plot
|
import datetime
import random
import uuid
import requests
# basically everything in this file was generated by CoPilot !
def main():
print('Hello World')
dt = get_datetime()
print(f'The time and date is {dt}')
print('The random number is {rn}'.format(rn=get_random_number()))
print('The string "Hello World" in all caps is {uc}'.format(uc=to_upper('Hello World')))
print('The uuid is {uuid}'.format(uuid=generate_uuid()))
print('A joke: {joke}'.format(joke=get_joke()))
print('The temperature in Bristol is {temp}'.format(temp=get_bbc_temperature()))
print('A headline: {headline}'.format(headline=get_bbc_headline()))
print('The sorted numbers are {numbers}'.format(numbers=sort_numbers()))
print('The name of my new puppy is {name}'.format(name=get_puppy_name()))
print('The TV show Succession info is {info}'.format(info=get_tv_show_info()))
print('The xml is {xml}'.format(xml=generate_xml()))
# a function that returns the current datetime
def get_datetime():
return datetime.datetime.now()
# a function that returns a random number between 1 and 500
def get_random_number():
return random.randint(1, 500)
# a function that turns a string to all caps
def to_upper(string):
return string.upper()
# generate a uuid
def generate_uuid():
return str(uuid.uuid4())
# get a random joke from the web using a web service
def get_joke():
url = 'https://sv443.net/jokeapi/v2/joke/Any'
response = requests.get(url)
# if response json has joke property, return it
if 'joke' in response.json():
return response.json()['joke']
# if response has setup and delivery properties, return them
elif 'setup' in response.json() and 'delivery' in response.json():
return response.json()['setup'] + ' ' + response.json()['delivery']
# else return empty string
else:
return ''
# get the current temperature in Bristol, UK
def get_temperature():
url = 'http://api.openweathermap.org/data/2.5/weather?q=Bristol,uk&appid=b6907d289e10d714a6e88b30761fae22'
response = requests.get(url)
return response.json()['main']['temp']
# scrape the bbc website for the current temperature in Bristol UK
def get_bbc_temperature():
url = 'https://www.bbc.co.uk/weather/2654675'
response = requests.get(url)
return response.text.split('<span class="wr-value--temperature">')[1].split('</span>')[0]
# scrape the bbc news website for a random headline
def get_bbc_headline():
url = 'https://www.bbc.co.uk/news'
response = requests.get(url)
return response.text.split('<h3 class="gs-c-promo-heading__title gel-pica-bold nw-o-link-split__text">')[1].split('</h3>')[0]
# implement a quick sort of a list of numbers
def quick_sort(numbers):
if len(numbers) <= 1:
return numbers
else:
pivot = numbers[0]
less = [i for i in numbers[1:] if i <= pivot]
greater = [i for i in numbers[1:] if i > pivot]
return quick_sort(less) + [pivot] + quick_sort(greater)
# generate a list of 100 random numbers and sort it using quick sort
def sort_numbers():
numbers = [random.randint(1, 500) for i in range(100)]
return quick_sort(numbers)
# return a good name for my new puppy
def get_puppy_name():
return 'Fido'
# get information for the TV show Succession from wikipedia
def get_tv_show_info():
url = 'https://en.wikipedia.org/wiki/Succession_(TV_series)'
response = requests.get(url)
return response.text.split('<p>')[1].split('</p>')[0]
# generate some json, and then turn it into xml
def generate_xml():
json = {
'name': 'John Doe',
'age': '42',
'address': {
'street': '123 Main St',
'city': 'Bristol',
'state': 'CT'
},
'phone_numbers': [
'555-123-4567',
'555-987-6543'
]
}
xml = '<person>\n'
for key, value in json.items():
if isinstance(value, dict):
xml += '<{0}>\n'.format(key)
for subkey, subvalue in value.items():
xml += '<{0}>{1}</{0}>\n'.format(subkey, subvalue)
xml += '</{0}>\n'.format(key)
else:
xml += '<{0}>{1}</{0}>\n'.format(key, value)
xml += '</person>'
return xml
if __name__ == "__main__":
main()
|
"""Adiabatic evolution for the Ising Hamiltonian using linear scaling."""
import os
import argparse
import json
import time
import qibo
from qibo import callbacks, hamiltonians, models
parser = argparse.ArgumentParser()
parser.add_argument("--nqubits", default=4, type=int)
parser.add_argument("--dt", default=1e-2, type=float)
parser.add_argument("--solver", default="exp", type=str)
parser.add_argument("--dense", action="store_true")
parser.add_argument("--accelerators", default=None, type=str)
parser.add_argument("--backend", default="qibotf", type=str)
parser.add_argument("--filename", default=None, type=str)
def parse_accelerators(accelerators):
"""Transforms string that specifies accelerators to dictionary.
The string that is parsed has the following format:
n1device1,n2device2,n3device3,...
and is transformed to the dictionary:
{'device1': n1, 'device2': n2, 'device3': n3, ...}
Example:
2/GPU:0,2/GPU:1 --> {'/GPU:0': 2, '/GPU:1': 2}
"""
if accelerators is None:
return None
def read_digit(x):
i = 0
while x[i].isdigit():
i += 1
return x[i:], int(x[:i])
acc_dict = {}
for entry in accelerators.split(","):
device, n = read_digit(entry)
if device in acc_dict:
acc_dict[device] += n
else:
acc_dict[device] = n
return acc_dict
def main(nqubits, dt, solver, backend, trotter=False, accelerators=None,
filename=None):
"""Performs adiabatic evolution with critical TFIM as the "hard" Hamiltonian."""
qibo.set_backend(backend)
if accelerators is not None:
dense = False
solver = "exp"
if filename is not None:
if os.path.isfile(filename):
with open(filename, "r") as file:
logs = json.load(file)
print("Extending existing logs from {}.".format(filename))
else:
print("Creating new logs in {}.".format(filename))
logs = []
else:
logs = []
logs.append({
"nqubits": nqubits, "dt": dt, "solver": solver, "trotter": trotter,
"backend": qibo.get_backend(), "precision": qibo.get_precision(),
"device": qibo.get_device(), "threads": qibo.get_threads(),
"accelerators": accelerators
})
print(f"Using {solver} solver and dt = {dt}.")
print(f"Accelerators: {accelerators}")
print("Backend:", logs[-1]["backend"])
start_time = time.time()
h0 = hamiltonians.X(nqubits, trotter=trotter)
h1 = hamiltonians.TFIM(nqubits, h=1.0, trotter=trotter)
logs[-1]["hamiltonian_creation_time"] = time.time() - start_time
print(f"\nnqubits = {nqubits}, solver = {solver}")
print(f"trotter = {trotter}, accelerators = {accelerators}")
print("Hamiltonians created in:", logs[-1]["hamiltonian_creation_time"])
start_time = time.time()
evolution = models.AdiabaticEvolution(h0, h1, lambda t: t, dt=dt, solver=solver,
accelerators=accelerators)
logs[-1]["creation_time"] = time.time() - start_time
print("Evolution model created in:", logs[-1]["creation_time"])
start_time = time.time()
final_psi = evolution(final_time=1.0)
logs[-1]["simulation_time"] = time.time() - start_time
print("Simulation time:", logs[-1]["simulation_time"])
if filename is not None:
with open(filename, "w") as file:
json.dump(logs, file)
if __name__ == "__main__":
args = vars(parser.parse_args())
args["accelerators"] = parse_accelerators(args.pop("accelerators"))
main(**args)
|
import param
import panel as pn
PAGES = {
"About": pn.pane.Markdown("about " * 2500, sizing_mode="stretch_width", name="About"),
"Holoviews": pn.pane.Markdown(
"holoviews " * 2500, sizing_mode="stretch_width", name="Holoviews"
),
"Plotly": pn.pane.Markdown("plotly " * 2500, sizing_mode="stretch_width", name="Plotly"),
}
CSS = """\
body {
margin: 0px;
width: 100vh-500px;
}
"""
def main() -> pn.Pane:
pn.config.raw_css.append(CSS)
navigator = pn.widgets.RadioBoxGroup(name="RadioBoxGroup", options=list(PAGES))
sidebar = pn.Column(navigator, pn.layout.VSpacer(), width=300, background="lightgray")
tabs = pn.layout.Tabs(name="Tabs")
content = pn.Column(PAGES["About"], sizing_mode="stretch_both")
def page(event):
print("---------")
print(event)
print(PAGES[event.new])
content.clear()
content.append(PAGES[event.new])
navigator.param.watch(page, "value")
app = pn.Row(sidebar, content, sizing_mode="stretch_both")
return app
if __name__.startswith("bk_script"):
main().servable()
|
from tkinter import *
PROGRAM_NAME = "Footprint Editor"
root = Tk()
root.geometry('350x350')
root.title(PROGRAM_NAME)
menu_bar = Menu(root) # menu begins
file_menu = Menu(menu_bar, tearoff=0)
# all file menu-items will be added here next
menu_bar.add_cascade(label='File', menu=file_menu)
edit_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label='Edit', menu=edit_menu)
view_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label='View', menu=view_menu)
about_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label='About', menu=about_menu)
root.config(menu=menu_bar) # menu ends
root.mainloop()
|
"""rla_export: Export data from ColoradoRLA to allow public verification of the audit
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Examples
--------
With no options, the command will run queries using
all the standard .sql files provided in the package, and
put the resulting exported data in files in the current directory.
``rla_export``
The optional -p argument specifies connection information via
a database properties file, which should be the same file used
for the ``java jar`` command line. The output can also optionally be put
in a different output directory using the -e argument.
``rla_export [-p properties_file] [-e export_directory]``
Export a query in json and csv format for selected sql files:
``rla_export file.sql ...``
Full command line usage synopsis:
``rla_export -h``
See README.rst for documentation.
"""
from __main__ import main
|
from ...core.enum.ea_mode import EAMode
from ...core.models.assembly_parameter import AssemblyParameter
from ...core.enum import ea_mode_bin
from ...core.enum.ea_mode_bin import parse_ea_from_binary
from ...simulator.m68k import M68K
from ...core.util.split_bits import split_bits
from ...core.opcodes.opcode import Opcode
from ...core.util import opcode_util
from ...core.enum.op_size import OpSize
from ..util.parsing import parse_assembly_parameter
from ..enum.condition_status_code import ConditionStatusCode
from ..models.memory_value import MemoryValue
class Ori(Opcode): # Forward declaration
pass
class Ori(Opcode):
"""
ORI: Inclusive-OR
Operation: Immediate Data V Destination → Destination
Syntax: ORI # < data > , < ea >
Attributes: Size = (Byte, Word, Long)
Description: Performs an inclusive-OR operation on the immediate data and the
destination operand and stores the result in the destination location. The size of the
operation is specified as byte, word, or long. The size of the immediate data matches
the operation size.
Condition Codes:
X — Not affected.
N — Set if the most significant bit of the result is set; cleared otherwise.
Z — Set if the result is zero; cleared otherwise.
V — Always cleared.
C — Always cleared.
Size field—Specifies the size of the operation.
00— Byte operation
01— Word operation
10— Long operation
Immediate field: —Data immediately following the instruction.
If size = 00, the data is the low-order byte of the immediate word.
If size = 01, the data is the entire immediate word.
If size = 10, the data is the next two immediate words.
"""
valid_sizes = [OpSize.BYTE, OpSize.WORD, OpSize.LONG]
def __init__(self, params: list, size: OpSize=OpSize.WORD):
assert len(params) == 2
assert isinstance(params[0], AssemblyParameter)
assert isinstance(params[1], AssemblyParameter)
# check src param is valid
assert params[0].mode == EAMode.IMM
self.src = params[0]
# check the dest param is valid
assert params[1].mode != EAMode.ARD or params[1].mode != EAMode.IMM
self.dest = params[1]
assert size in Ori.valid_sizes
self.size = size
def assemble(self) -> bytearray:
"""
Assembles this opcode into hex to be inserted into memory
:return: The hex version of this opcode
"""
# The first 8 bits are always 0
ret_opcode = 0
if self.size == OpSize.BYTE:
ret_opcode |= 0b00 << 6
elif self.size == OpSize.WORD:
ret_opcode |= 0b01 << 6
elif self.size == OpSize.LONG:
ret_opcode |= 0b10 << 6
ret_opcode |= ea_mode_bin.parse_from_ea_mode_modefirst(self.dest) << 0
ret_bytes = bytearray(ret_opcode.to_bytes(OpSize.WORD.value, byteorder='big', signed=False))
ret_bytes.extend(opcode_util.ea_to_binary_post_op(self.src, self.size).get_value_bytearray())
if self.dest.mode == EAMode.AWA or self.dest.mode == EAMode.ALA:
ret_bytes.extend(opcode_util.ea_to_binary_post_op(self.dest, self.size).get_value_bytearray())
return ret_bytes
def execute(self, simulator: M68K):
"""
Executes this command in a simulator
:param simulator: The simulator to execute the command on
:return: Nothing
"""
# get the length
val_length = self.size.get_number_of_bytes()
# get the value of src from the simulator
src_val = self.src.get_value(simulator, val_length)
# get the value of dest from the simulator
dest_val = self.dest.get_value(simulator, val_length)
# increment the program counter by the length of the instruction (1 word)
to_increment = OpSize.WORD.value
# add the length of the size of the operation, in words
if self.size is OpSize.BYTE:
to_increment += OpSize.WORD.value
else:
to_increment += self.size.value
# repeat for the dest
if self.dest.mode in [EAMode.AbsoluteLongAddress]:
to_increment += OpSize.LONG.value
if self.dest.mode in [EAMode.AbsoluteWordAddress]:
to_increment += OpSize.WORD.value
result_unsigned = src_val.get_value_unsigned() | dest_val.get_value_unsigned()
msb_bit = 0
if self.size is OpSize.BYTE:
msb_bit = 0x80
elif self.size is OpSize.WORD:
msb_bit = 0x8000
elif self.size is OpSize.LONG:
msb_bit = 0x80000000
# set hte FONCKin CCR
simulator.set_ccr_reg(None, (msb_bit & result_unsigned != 0), (result_unsigned == 0), False, False)
# and set the value
self.dest.set_value(simulator, MemoryValue(OpSize.LONG, unsigned_int=result_unsigned))
# set the program counter value
simulator.increment_program_counter(to_increment)
def __str__(self):
return 'Ori command: size {}, src {}, dest {}'.format(self.size, self.src, self.dest)
@classmethod
def command_matches(cls, command: str) -> bool:
"""
Checks whether a command string is an instance of this command type
:param command: The command string to check (e.g. 'MOVE.B', 'LEA', etc.)
:return: Whether the string is an instance of this command type
"""
return opcode_util.command_matches(command, 'ORI')
@classmethod
def get_word_length(cls, command: str, parameters: str) -> int:
"""
>>> Ori.get_word_length('ORI.B', '#$08, D1')
2
>>> Ori.get_word_length('ORI.B', '#$F1, D3')
2
>>> Ori.get_word_length('ORI.W', '#$ABCDE, D0')
2
>>> Ori.get_word_length('ORI.W', '#$000A, ($7000).W')
3
>>> Ori.get_word_length('ORI.L', '#$FFFF7000, ($1234).W')
4
>>> Ori.get_word_length('ORI.L', '#$FFFFFFFF, ($FFFF).L')
5
Gets what the end length of this command will be in memory
:param command: The text of the command itself (e.g. "LEA", "MOVE.B", etc.)
:param parameters: The parameters after the command
:return: The length of the bytes in memory in words, as well as a list of warnings or errors encountered
"""
parts = command.split('.') # Split the command by period to get the size of the command
if len(parts) == 1: # Use the default size
size = OpSize.WORD
else:
size = OpSize.parse(parts[1])
# Split the parameters into EA modes
params = parameters.split(',')
dest = parse_assembly_parameter(params[1].strip())
length = 1 # Always 1 word not counting additions to end
if size == OpSize.LONG:
length += 2 # Longs are 2 words long
else:
length += 1 # This is a word or byte, so only 1 word
if dest.mode == EAMode.AWA: # Appends a word
length += 1
if dest.mode == EAMode.ALA: # Appends a long, so 2 words
length += 2
return length
@classmethod
def is_valid(cls, command: str, parameters: str) -> (bool, list):
"""
Tests whether the given command is valid
>>> Ori.is_valid('ORI.B', '#$1, D1')[0]
True
>>> Ori.is_valid('ORI.W', 'A3, D7')[0]
False
>>> Ori.is_valid('ORI.L', '#$ABCD, D3')[0]
True
>>> Ori.is_valid('ORI.L', '#$A0008000, D5')[0]
True
>>> Ori.is_valid('ORR.W', '#AB, D3')[0]
False
>>> Ori.is_valid('OR.G', 'D0, D7')[0]
False
:param command: The command itself (e.g. 'MOVE.B', 'LEA', etc.)
:param parameters: The parameters after the command (such as the source and destination of a move)
:return: Whether the given command is valid and a list of issues/warnings encountered
"""
return opcode_util.n_param_is_valid(command, parameters, "ORI", 2, param_invalid_modes=[
[EAMode.DRD, EAMode.ARD, EAMode.ARI, EAMode.ARIPI, EAMode.ARIPD, EAMode.AWA, EAMode.ALA]
,[EAMode.ARD, EAMode.IMM]])[:2]
@classmethod
def disassemble_instruction(cls, data: bytearray) -> Opcode:
"""
This has a non-ORI opcode
>>> Ori.disassemble_instruction(bytearray.fromhex('D280'))
ORI.B #0, D1
>>> op = Ori.disassemble_instruction(bytearray.fromhex('00010000'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 0'
>>> str(op.dest)
'EA Mode: EAMode.DRD, Data: 1'
ORI.W #$A000, D0
>>> op = Ori.disassemble_instruction(bytearray.fromhex('0040A000'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 40960'
>>> str(op.dest)
'EA Mode: EAMode.DRD, Data: 0'
ORI.L #$FFFF0000, D7
>>> op = Ori.disassemble_instruction(bytearray.fromhex('0087FFFF0000'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 4294901760'
>>> str(op.dest)
'EA Mode: EAMode.DRD, Data: 7'
ORI.W #$FFFF, ($1234).W
>>> op = Ori.disassemble_instruction(bytearray.fromhex('0078FFFF1234'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 65535'
>>> str(op.dest)
'EA Mode: EAMode.AWA, Data: 4660'
Parses some raw data into an instance of the opcode class
:param data: The data used to convert into an opcode instance
:return: The constructed instance or none if there was an error and
the amount of data in words that was used (e.g. extra for immediate
data) or 0 for not a match
"""
assert len(data) >= 2, 'Opcode size is at least one word'
first_word = int.from_bytes(data[0:2], 'big')
[opcode_bin,
size_bin,
ea_mode_bin,
ea_reg_bin] = split_bits(first_word, [8, 2, 3, 3])
if opcode_bin != 0b00000000:
return None
# determine the size
if size_bin == 0b00:
size = OpSize.BYTE
elif size_bin == 0b01:
size = OpSize.WORD
elif size_bin == 0b10:
size = OpSize.LONG
else:
return None
# set the source
src = parse_ea_from_binary(0b111, 0b100, size, True, data[2:])[0]
# set the destination
dest = parse_ea_from_binary(ea_mode_bin, ea_reg_bin, size, False, data[4:])[0]
return cls([src, dest], size)
@classmethod
def from_str(cls, command: str, parameters: str):
"""
Parses a ORI command from text.
>>> str(Ori.from_str('ORI.B', '#$2, D1'))
'Ori command: size OpSize.BYTE, src EA Mode: EAMode.IMM, Data: 2, dest EA Mode: EAMode.DRD, Data: 1'
>>> str(Ori.from_str('ORI.L', '#$FFFF8000, (A0)'))
'Ori command: size OpSize.LONG, src EA Mode: EAMode.IMM, Data: 4294934528, dest EA Mode: EAMode.ARI, Data: 0'
:param command: The command itself (e.g. 'MOVE.B', 'LEA', etc.)
:param parameters: The parameters after the command (such as the source and destination of a move)
:return: The parsed command
"""
return opcode_util.n_param_from_str(command, parameters, Ori, 2, OpSize.WORD)
|
# -*- coding: utf-8 -*-
"""
/dms/usermanagementorg/help_form.py
.. enthaelt die kompletten Kontext-Hilfetexte fuer die User-Verwaltung der Institutionen
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 06.02.2007 Beginn der Arbeit
0.02 21.06.2007 Wiederaufnahme det Arbeit
"""
from django.utils.translation import ugettext as _
from dms.help_form_base import get_help_form
help_form = get_help_form()
# ----------------------------------------------------------------
help_form['name'] = {
'title' : _(u'Kurzname/ID'),
'help' : _(u"""<p>
Tragen Sie hier den Kurznamen ein. Dieser Kurzname wird beim Aufrufen
der Web-Adresse verwendet. Der Kurzname sollte den Inhalt
möglichst präzise beschreiben und gleichzeitig möglichst kurz
sein.
</p>
<p>
Beim Aufrufen der Seiten wird zwischen Groß- und Kleinschreibung unterschieden. Bitte
verwenden Sie beim Kurznamen ausschließlich Kleinbuchstaben. Leerzeichen werden durch
einen Unterstrich, Umlaute durch "ae", "oe" usw. ersetzt.
</p>""") }
# ----------------------------------------------------------------
help_form['title'] = {
'title' : _(u'Überschrift'),
'help' : _(u"""<p>
Tragen Sie hier die Überschrift der Community-Verwaltung ein. Unter dieser Überschrift wird
die Community-Verwaltung angezeigt. Dieser Titel erscheint ebenfalls im übergeordneten
Ordner.
</p>
<p>
Hinweis: Kurze Überschriften fördern die Lesbarkeit und verhindern
störende Zeilenumbrüche.
</p>""") }
# ----------------------------------------------------------------
help_form['sub_title'] = {
'title' : _(u'Unterüberschrift'),
'help' : _(u"""<p>
Falls erforderlich tragen Sie hier die Unterüberschrift Ihrer Community-Verwaltung ein.
Dieser Text wird direkt unterhalb der Überschrift angezeigt.
</p>""") }
# ----------------------------------------------------------------
help_form['text'] = {
'title' : _(u'Intro'),
'help' : _(u"""<p>
Mit diesem Eingabefeld legen Sie den Text fest, der unterhalb
des Überschrift im Sinne einer Einführung angezeigt wird. Sie sollten dieses
Feld beispielsweise aber auch dann nutzen, wenn Sie auf wichtiges Ereignis,
eine gravierende Änderung o.ä. hinweisen möchten.
</p>
<p>
In der Regel werden Sie dieses Feld aber leer lassen.
</p>""") }
# ----------------------------------------------------------------
help_form['text_more'] = {
'title' : _(u'Intro - "Mehr ..."'),
'help' : _(u"""<p>
Mit diesem Eingabefeld können Sie einen ausführlicheren Introtext
anbieten, der automatisch mit "Mehr ..." auf der erreichbar ist.
</p>""") }
# ----------------------------------------------------------------
help_form['image_url'] = {
'title' : _(u'Bild zum Intro'),
'help' : _(u"""<p>
Bei Bedarf können Sie links neben Ihrem Intro-Text ein Bild anzeigen lassen.
Da Sie hier die Web-Adresse (http://..) des Bildes angeben, muss sich diesen Bild bereits
auf dem Server befinden.
</p>""") }
# ----------------------------------------------------------------
help_form['image_url_url'] = {
'title' : _(u'URL zum Bild des Intros'),
'help' : _(u"""<p>
Falls Sie ein Bild zum Intro angegeben haben, können Sie das Bild
mit einer Web-Adresse (http://..) verknüpfen.
</p>""") }
# ----------------------------------------------------------------
help_form['image_extern'] = {
'title' : _(u'Verweis im eigenen Fenster'),
'help' : _(u"""<p>
Falls die mit dem Bild verknüpfte Seite in einem eigenen Fenster angezeigt werden soll,
müssen Sie dieses Feld aktivieren.
</p>""") }
# ----------------------------------------------------------------
help_form['is_wide'] = {
'title' : _(u'Intro mit voller Breite'),
'help' : _(u"""<p>
Mit diesem Feld werden die Intro-Information in voller Breite angezeigt.
</p>""") }
# ----------------------------------------------------------------
help_form['is_important'] = {
'title' : _(u'Intro mit Hervorhebung'),
'help' : _(u"""<p>
Dieses Feld hinterlegt die Intro-Information mit einem farbigen Block.
</p>""") }
# ----------------------------------------------------------------
help_form['info_slot_right'] = {
'title' : _(u'Seiteninfo'),
'help' : _(u"""<p>
In der rechten Spalte können Sie zusätziche Informationen anzeigen.
Diese werden in Blöcken organisiert, wobei ein Block aus einer
Überschrift sowie dem eigentlichen Text besteht. Für Zwischenüberschriften
verwenden Sie bitte das Format "Überschrift 4", da dieses automatisch umgewandelt wird.
</p>
<ul>
<li>
Falls Sie Bilder einbinden wollen, sollten dies nicht breiter als
120 Pixel sein.
</li>
<li>
Wegen der geringen Spaltenbreite sollten Ihre Texte möglichst
knapp gehalten werden. Bei sehr langen Worten stößt das
System an technische Grenzen.
</li>
</ul>""") }
# ----------------------------------------------------------------
help_form['section'] = {
'title' : _(u'Zuordnung beim <i>übergeordneten</i> Ordner'),
'help' : _(u"""<p>
Hier legen Sie fest, beim welchem Zwischentitel Ihre Community-Verwaltung im
<b>übergeordneten</b> Ordner angezeigt wird. Bei Bedarf können
Sie später mit der Aktion "umordnen" Ihre Community-Verwaltung weiter nach oben
oder nach unten verschieben.
</p>""") }
# ----------------------------------------------------------------
help_form['username'] = {
'title' : _(u'Zugangsname'),
'help' : _(u"""<p>
Geben Sie hier bitte den exakten Zugangsnamen ein.
</p>""") }
# ----------------------------------------------------------------
help_form['org_name'] = {
'title' : _(u'Name der Einrichtung'),
'auto_complete': True,
'help' : _(u"""<p>
Geben Sie hier den Namen der neuen Einrichtung ein..
</p>""") }
# ----------------------------------------------------------------
help_form['email'] = {
'title' : _(u'E-Mail-Adresse'),
'help' : _(u"""<p>
Geben Sie hier bitte die exakte E-Mail-Adresse ein.
</p>""") }
# ----------------------------------------------------------------
help_form['groupname'] = {
'title' : _(u'Gruppenname'),
'help' : _(u"""<p>
Tragen Sie hier bitte den neuen Gruppennamen.
</p>""") }
# ----------------------------------------------------------------
help_form['group_names'] = {
'title' : _(u'Vorhandene Gruppen'),
'help' : _(u"""<p>
Wählen Sie bitte die gewünschte Gruppe aus.
</p>""") }
# ----------------------------------------------------------------
help_form['group_names_target'] = {
'title' : _(u'Zielgruppen'),
'help' : _(u"""<p>
Wählen Sie bitte die Gruppen aus, denen Sie Mitglieder der betreffenden Basisgruppe
zuordnen möchten.
</p>""") }
# ----------------------------------------------------------------
help_form['group_names_primary'] = {
'title' : _(u'Basisgruppen'),
'help' : _(u"""<p>
Hier sehen die nicht-veränderbaren Basisgruppen Ihrer Organisation.
</p>""") }
# ----------------------------------------------------------------
help_form['group_names_del'] = {
'title' : _(u'Vorhandene Gruppen'),
'help' : _(u"""<p>
Aus dieser Liste können Sie eine oder mehrere Gruppen entfernen.
</p>""") }
# ----------------------------------------------------------------
help_form['fname'] = {
'title' : _(u'CSV-Datei'),
'help' : _(u"""<p>
Wählen Sie bitte auf Ihrer Festplatte Ihre CSV-Datei aus.
Die entsprechende CSV-Datei, die Sie mit Excel erzeugen können, muss folgenden Aufbau haben:
</p>
<ul>
<li>Jeweils eine Zeile pro Person.</li>
<li>Die einzelnen Angaben zur Person werden mit einem Semikolon getrennt.</li>
<li>Die Spalten haben folgenden Aufbau:<br />
<br />
<tt>Anrede;Nachname;Vorname;E-Mail</tt><br />
oder<br />
<tt>Anrede;Titel;Nachname;Vorname ; E-Mail</tt>
<br />oder<br />
<tt>Nr.;Anrede;Titel;Nachname;Vorname;E-Mail</tt><br />
</li>
</ul>
<p>Beachten Sie bitte, dass E-Mail-Adressen innerhalb der Community nur einmal auftreten
dürfen.</p>
""") }
# ----------------------------------------------------------------
help_form['sex'] = {
'title' : _(u'Anrede'),
'help' : _(u"""<p>
Tragen Sie hier bitte die Anrede ein.
</p>""") }
# ----------------------------------------------------------------
help_form['first_name'] = {
'title' : _(u'Vorname'),
'help' : _(u"""<p>
Tragen Sie hier bitte den Vornamen sein. Sollte der Vorname Akzente
oder Buchstaben eines anderen Alphabets enthalten, wandeln Sie diese
bitte in den zugehörigen deutschen Buchstaben um. Umlaute werden
automatisch gewandelt.
</p>""") }
# ----------------------------------------------------------------
help_form['last_name'] = {
'title' : _(u'Nachname'),
'help' : _(u"""<p>
Tragen Sie hier bitte den Nachnamen sein. Sollte der Nachname Akzente
oder Buchstaben eines anderen Alphabets enthalten, wandeln Sie diese
bitte in den zugehörigen deutschen Buchstaben um. Umlaute werden
automatisch gewandelt.
</p>""") }
# ----------------------------------------------------------------
help_form['title_name'] = {
'title' : _(u'Titel'),
'help' : _(u"""<p>
Falls vorhanden tragen Sie hier bitte den Titel ein. In der Regel
wird dieses Feld aber leer bleiben.
</p>""") }
# ----------------------------------------------------------------
help_form['email'] = {
'title' : _(u'E-Mail-Adresse'),
'help' : _(u"""<p>
Tragen Sie hier bitte die E-Mail-Adresse ein. Wichtig: Diese
E-Mail-Adresse muss wirklich existieren, da Community-Mitglieder
mit falschen E-Mail-Adressen periodisch gelöscht werden.
</p>""") }
# ----------------------------------------------------------------
help_form['tab_base'] = {
'title' : _(u'Basisdaten'),
'info' : _(u"""<p>
Mit diesem Formular legen Sie die wichtigsten Eigenschaften der
Community-Verwaltung einer Institution fest.
</p>""") }
help_form['tab_intro'] = {
'title' : _(u'Intro'),
'info' : _(u"""<p>
Sofern vorhanden, werden die Intro-Information zwischen der
Überschrift und dem eigentlichen Inhalt der Community-Verwaltung
angezeigt.</p>
<p>
Falls Sie bei "Intro mehr ..." Informationen eingeben, wird beim
Intro-Text automatisch ein "Mehr"-Verweis angefügt.
</p>""") }
help_form['tab_navigation'] = {
'title' : _(u'Navigation'),
'info' : _(u"""<p>
Tragen Sie hier bitte die Menüpunkte des linken Navigationsbereichs
jeweils in einer eigenen Zeile ein.
</p>
<p>
<tt>Verweis | Beschreibung | Erläterung | "ausgewählt" = 0 oder 1 | "optisches Merkmal" = 0 oder 1</tt>
</p>
""") }
help_form['tab_frame'] = {
'title' : _(u'Seiteninfo'),
'info' : _(u"""<p>
Im rechten Seitenbereich können Sie auf aktuelle Ereignisse,
neue Angebote usw. hinweisen.
</p>""") }
help_form['tab_visibility'] = {
'title' : _(u'Sichtbarkeit'),
'info' : _(u"""<p>
Sie können die Sichtbarkeit der Community-Verwaltung auf unterschiedliche Weisen steuern.
</p>""") }
help_form['tab_more'] = {
'title' : _(u'Weiteres'),
'info' : _(u"""<p>
Hier finden Sie Optionen, die eher selten gebraucht werden.
</p>""") }
help_form['tab_username'] = {
'title' : _(u'Zugangsname'),
'info' : _(u"""<p>
Geben Sie hier bitte den Zugangsname des entsprechenden Community-Mitglieds ein.
</p>""") }
help_form['tab_email'] = {
'title' : _(u'E-Mail'),
'info' : _(u"""<p>
Geben Sie hier bitte die E-Mail-Adresse des Community-Mitglieds ein.
</p>""") }
help_form['tab_group_name_add'] = {
'title' : _(u'Grupppenname'),
'info' : _(u"""<p>
Mit diesem Formular können Sie Gruppennamen Ihrer Institution wie z.B. "Lerngruppe xy" ergänzen.
</p>""") }
help_form['tab_group_name_delete'] = {
'title' : _(u'Grupppennamen'),
'info' : _(u"""<p>
Mit diesem Formular können Sie Gruppennamen Ihrer Institution löschen.
</p>""") }
help_form['tab_group_user_change'] = {
'title' : _(u'Basisgrupppe'),
'info' : _(u"""<p>
Mit diesem Formular wählen Sie Basisgruppe, aus deren Mitglieder Sie anderen
Gruppen zurodnen möchten.
</p>""") }
help_form['tab_member_user_insert'] = {
'title' : _(u'Mitglied aufnehmen'),
'info' : _(u"""<p>
Mit diesem Formular können Sie ein neues Community-Mitglied in Ihrer Institution aufnehmen.
""") }
help_form['tab_group_user_insert'] = {
'title' : _(u'Mitglieder aufnehmen'),
'info' : _(u"""<p>
Mit diesem Formular können Sie neue Community-Mitglieder in Ihrer Institution aufnehmen.
""") }
help_form['tab_group_user_change'] = {
'title' : _(u'Grupppen'),
'info' : _(u"""<p>
Hiermit ändern Sie die Gruppenzugehörigkeit Ihrer Community-Mitglieder. Legen Sie dazu
bitte die Basisgrupppe sowie die gewünschten Zielgruppen aus.
</p>""") }
help_form['tab_primary_group_user_change'] = {
'title' : _(u'Grupppen'),
'info' : _(u"""<p>
Hiermit ändern Sie die Primärgruppenzugehörigkeit von Community-Mitgliedern.
</p>""") }
help_form['tab_group_user_delete'] = {
'title' : _(u'Grupppe'),
'info' : _(u"""<p>
Mit diesem Formular wählen Sie Gruppe, aus der Sie Mitglieder entfernen möchten.
</p>""") }
|
class Solution:
def minmaxGasDist(self, stations: List[int], K: int) -> float:
dists = self.getDists(stations)
left, right = min(dists) / K, max(dists)
while left + 10e-6 < right:
print(left, right)
count = 0
mid = (left + right) / 2
for i in range(len(stations) - 1):
d = abs(stations[i + 1] - stations[i])
count += math.ceil(d / mid) - 1
if count > K :
left = mid
else:
right = mid
count = 0
for i in range(len(stations) - 1):
d = abs(stations[i + 1] - stations[i])
count += math.ceil(d / left)
if count <= K:
return left
return right
def getDists(self, stations):
dists = []
for i in range(len(stations) - 1):
dists.append(abs(stations[i + 1] - stations[i]))
return dists
|
"""
author: "Md. Sabuj Sarker"
copyright: "Copyright 2017-2018, The Synamic Project"
credits: ["Md. Sabuj Sarker"]
license: "MIT"
maintainer: "Md. Sabuj Sarker"
email: "md.sabuj.sarker@gmail.com"
status: "Development"
"""
import unittest
from synamic.core.standalones.functions import parse_front_matter
class TestFrontMatterParser(unittest.TestCase):
def setUp(self):
self.invalid_frontmatter1 = """---
u:ttt
hhhs-----
"""
self.empty_frontmatter = """
the content is here, no front matter around here.
"""
self.valid_frontmatter = """----
name: My name
curl: somehow/curl
----
"""
self.empty_text = ""
def test_invalid(self):
res = parse_front_matter(self.invalid_frontmatter1)
self.assertTupleEqual(res, (None, None, None), "This should not be considered as a valid frontmattered text")
def test_valid(self):
res = parse_front_matter(self.valid_frontmatter)
self.assertEqual(res[0], True)
self.assertEqual(res[1], """name: My name
curl: somehow/curl""")
self.assertEqual(res[2], """ """)
def test_empty_frontmatter(self):
res = parse_front_matter(self.empty_frontmatter)
self.assertEqual(res[0], False, "No frontmatter must return False")
self.assertEqual(res[2], self.empty_frontmatter)
def test_empty_text(self):
res = parse_front_matter(self.empty_text)
self.assertTupleEqual(res[:2], (False, None), "the frontmatter is empty")
self.assertEqual(res[2], "", "Body must be empty as the text is")
|
import glob
import os
import random
import re
import shutil
import sys
from typing import List, Tuple
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import RandomSampler, DistributedSampler, DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import trange, tqdm
from transformers import PreTrainedModel, PreTrainedTokenizer, AdamW, get_linear_schedule_with_warmup, logger
from training.pick_model import evaluate
def train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> Tuple[int, float]:
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
# tb_writer = SummaryWriter(log_dir=os.path.join(args.output_dir, "log", "train"))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
def collate(examples):
inputs = [x[0] for x in examples]
labels = [x[1] for x in examples]
metadata = [x[2] for x in examples]
def collate_individual(samples):
length_of_first = samples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in samples)
if are_tensors_same_length:
return torch.stack(samples, dim=0)
else:
if tokenizer.pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have one."
)
return pad_sequence(samples, batch_first=True, padding_value=tokenizer.pad_token_id)
inputs = collate_individual(inputs)
labels = collate_individual(labels)
metadata = collate_individual(metadata)
return inputs, labels, metadata
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if (
args.model_name_or_path
and os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model_to_resize = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_resize.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproducibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0], file=sys.stdout, mininterval=10)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs, labels, _ = batch
# torch.set_printoptions(profile="full")
# print(f"Inputs : {tokenizer.convert_ids_to_tokens(inputs.tolist())}")
# print(f"Labels : {tokenizer.convert_ids_to_tokens(labels.tolist())}")
# exit(0)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(inputs, labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.continue_from_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
|
'''
Remove_json.py
Remove all json files in sub-directories.
Useful when you are cloning directories that have already been featurized
to get new feature embeddings with nlx-model repo.
'''
import os
def removejson(listdir):
for i in range(len(listdir)):
if listdir[i][-5:]=='.json':
os.remove(listdir[i])
listdir=os.listdir()
hostdir=os.getcwd()
for i in range(len(listdir)):
try:
os.chdir(hostdir+'/'+listdir[i])
listdir2=os.listdir()
removejson(listdir2)
except:
pass
|
import model
import torch
import torchvision
import torchvision.transforms as transforms
import os
# 默认参数声明
# batch_size = 64
# epochs = 60
# WORKERS = 4 # dataloder线程数
# ROOT = './dataset/' # 数据集保存路径
# pth_dir = './model_pth/' # 模型保存路径
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
img_size = 32 # 输入网络的图片大小
def create_dir_not_exist(path):
if not os.path.exists(path):
os.mkdir(path)
def train_loader(ROOT, batch_size, WORKERS):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.Resize([img_size, img_size]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = torchvision.datasets.CIFAR10(root=ROOT, train=True,
download=False, transform=transform_train)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True, num_workers=WORKERS)
return trainloader
def test_loader(ROOT, batch_size, WORKERS):
transform_test = transforms.Compose([
transforms.Resize([img_size, img_size]),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
testset = torchvision.datasets.CIFAR10(root=ROOT, train=False,
download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=WORKERS)
return testloader
|
from djangobench.base_settings import * # NOQA
INSTALLED_APPS = ['model_save_new']
|
# bug 2
# Print the squares of the numbers 0 to 9:
for num in range(10):
x = num**2
print(x)
print("Done")
|
from .exceptions import (LookupNotFoundException, NetworkFailureException, # noqa
APIKeyException) # noqa
from .options import ScriptOptions # noqa
|
import pandas as pd
def add_column_length(table_name, table_data):
indicies = [('w', 'wld_id'), ('p', 'hs_id'), ('b', 'bra_id')]
for index, column in indicies:
if index in table_name:
table_data[column + "_len"] = pd.Series( map(lambda x: len(str(x)), table_data.index.get_level_values(column)), index = table_data.index)
cols = table_data.columns.tolist()
cols = [column + "_len"] + cols[:-1]
table_data = table_data[cols]
return table_data
|
def main():
import webbrowser
recherche = 0
while True:
if recherche >= 2:
print("Vous avez fait " + str(recherche) + " recherches.")
recherche += 1
adresse = input("Quel adresse veut-tu ouvrir")
webbrowser.open(adresse)
if __name__ == "__main__":
main()
|
from datetime import timedelta
from pathlib import Path
from environs import Env
BASE_DIR = Path(__file__).resolve().parent.parent
env = Env()
env.read_env()
# Django environment
SECRET_KEY = env.str('SECRET_KEY')
DEBUG = env.bool("DEBUG", False)
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS')
DATABASES = {
'default': {
'ENGINE': env.str('POSTGRES_DRIVER'),
'NAME': env.str('POSTGRES_DB'),
'USER': env.str('POSTGRES_USER'),
'PASSWORD': env.str('POSTGRES_PASSWORD'),
'HOST': env.str('POSTGRES_HOST'),
'PORT': env.str('POSTGRES_PORT'),
}
}
INSTALLED_APPS = [
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'drf_yasg',
'rest_framework_simplejwt',
'rest_framework_simplejwt.token_blacklist',
'src.image_uploader',
]
MIDDLEWARE = [
# IMPORTANT: CORS policies has to go before other entries
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR / "staticfiles"
if DEBUG:
STATIC_DIR = BASE_DIR / 'static'
STATICFILES_DIRS = [STATIC_DIR, ]
else:
STATIC_ROOT = BASE_DIR / "staticfiles"
DEFAULT_AUTO_FIELwD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'media'
ALGORITHM = 'HS256'
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
}
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
}
REST_USE_JWT = True
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=50),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer', 'JWT',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': (
'rest_framework_simplejwt.tokens.AccessToken',
),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=15),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
DJOSER = {
"LOGIN_FIELD": "username",
}
SITE_ID = 1
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_ALL_ORIGINS = True
|
#! /usr/bin/env python
""" Create files for ht unit test """
import nmrglue.fileio.pipe as pipe
import nmrglue.process.pipe_proc as p
d, a = pipe.read("1D_time_real.fid")
d, a = p.ht(d, a)
pipe.write("ht1.glue", d, a, overwrite=True)
d, a = pipe.read("1D_time_real.fid")
d, a = p.ht(d, a, td=True)
pipe.write("ht2.glue", d, a, overwrite=True)
d, a = pipe.read("1D_time_real.fid")
d, a = p.ht(d, a, mode="ps0-0")
pipe.write("ht3.glue", d, a, overwrite=True)
d, a = pipe.read("1D_time_real.fid")
d, a = p.ht(d, a, zf=True)
pipe.write("ht5.glue", d, a, overwrite=True)
d, a = pipe.read("1D_time_real.fid")
d, a = p.ht(d, a, auto=True)
pipe.write("ht6.glue", d, a, overwrite=True)
d, a = pipe.read("freq_real.ft2")
d, a = p.ht(d, a)
pipe.write("ht7.glue", d, a, overwrite=True)
d, a = pipe.read("freq_real.ft2")
d, a = p.ht(d, a, zf=True, td=True)
pipe.write("ht8.glue", d, a, overwrite=True)
|
# Copyright 2016 Jake Dube
#
# ##### BEGIN GPL LICENSE BLOCK ######
# This file is part of MeshTools.
#
# MeshTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MeshTools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MeshTools. If not, see <http://www.gnu.org/licenses/>.
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.types import Panel, Operator, PropertyGroup, Scene
from bpy.utils import register_class, unregister_class
from bpy.props import FloatProperty, PointerProperty
bl_info = {
"name": "Mesh Tools - Bforartists version",
"author": "Jake Dube",
"version": (1, 1),
"blender": (2, 80, 0),
"location": "View3D > Mesh > Transform > Set Dimensions",
"description": "Sets dimensions for selected vertices.",
"category": "Mesh"}
def calc_bounds():
"""Calculates the bounding box for selected vertices. Requires applied scale to work correctly. """
# for some reason we must change into object mode for the calculations
mode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
mesh = bpy.context.object.data
verts = [v for v in mesh.vertices if v.select]
# [+x, -x, +y, -y, +z, -z]
v = verts[0].co
bounds = [v.x, v.x, v.y, v.y, v.z, v.z]
for v in verts:
if bounds[0] < v.co.x:
bounds[0] = v.co.x
if bounds[1] > v.co.x:
bounds[1] = v.co.x
if bounds[2] < v.co.y:
bounds[2] = v.co.y
if bounds[3] > v.co.y:
bounds[3] = v.co.y
if bounds[4] < v.co.z:
bounds[4] = v.co.z
if bounds[5] > v.co.z:
bounds[5] = v.co.z
bpy.ops.object.mode_set(mode=mode)
return bounds
def safe_divide(a, b):
if b != 0:
return a / b
return 1
class ED_OT_SetDimensions(Operator):
bl_label = "Set Dimensions"
bl_idname = "mesh_tools_addon.set_dimensions"
bl_description = "Sets dimensions of selected vertices"
bl_options = {'REGISTER', 'UNDO'}
bl_context = "editmode"
new_x : FloatProperty(name="X", min=0, default=1, unit='LENGTH')
new_y : FloatProperty(name="Y", min=0, default=1, unit='LENGTH')
new_z : FloatProperty(name="Z", min=0, default=1, unit='LENGTH')
def invoke(self, context, event):
bounds = calc_bounds()
self.new_x = bounds[0] - bounds[1]
self.new_y = bounds[2] - bounds[3]
self.new_z = bounds[4] - bounds[5]
return {'FINISHED'}
def execute(self, context):
bounds = calc_bounds()
bpy.ops.object.mode_set(mode='EDIT')
x = safe_divide(self.new_x, (bounds[0] - bounds[1]))
y = safe_divide(self.new_y, (bounds[2] - bounds[3]))
z = safe_divide(self.new_z, (bounds[4] - bounds[5]))
bpy.ops.transform.resize(value=(x, y, z))
return {'FINISHED'}
def draw(self, context):
layout = self.layout
box = layout.box()
box.label(text = "New dimensions:")
box.prop(self, "new_x")
box.prop(self, "new_y")
box.prop(self, "new_z")
def add_button(self, context):
self.layout.operator(ED_OT_SetDimensions.bl_idname, icon="PLUGIN")
classes = (
ED_OT_SetDimensions,
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.VIEW3D_MT_transform.append(add_button)
#bpy.types.VIEW3D_PT_transform.append(add_button)
def unregister():
from bpy.utils import unregister_class
for cls in classes:
unregister_class(cls)
bpy.types.VIEW3D_MT_transform.remove(add_button)
#bpy.types.VIEW3D_PT_transform.remove(add_button)
if __name__ == "__main__":
register()
|
import pybullet_envs
from stable_baselines3 import TD3_PER
model = TD3_PER('MlpPolicy', 'MinitaurBulletEnv-v0', verbose=1, tensorboard_log="results/long_TD3_PER_MinitaurBullet/")
model.learn(total_timesteps=3000000)
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
from datetime import datetime
import pytest
from fastapi import FastAPI
from fastapi.params import Query
from fastapi.routing import APIRouter
from pydantic.types import PositiveFloat
@pytest.fixture
def app() -> FastAPI:
api_router = APIRouter()
@api_router.get("/")
def _get_root():
return {"name": __name__, "timestamp": datetime.utcnow().isoformat()}
@api_router.get("/data")
def _get_data(x: PositiveFloat, y: int = Query(..., gt=3, lt=4)):
pass
_app = FastAPI()
_app.include_router(api_router)
return _app
|
# coding: utf-8
from __future__ import unicode_literals
from uuid import uuid4
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
url_or_none,
ExtractorError,
)
class IplaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ipla\.tv/.+/(?P<id>[0-9a-fA-F]+)'
_TESTS = [{
'url': 'https://www.ipla.tv/wideo/serial/Swiat-wedlug-Kiepskich/759/Sezon-1/760/Swiat-wedlug-Kiepskich-Odcinek-88/4121?seasonId=760',
'info_dict': {
'id': '4121',
'ext': 'mp4',
'title': 'Świat według Kiepskich - Odcinek 88', # I love when my code works so well
'age_limit': 12,
},
}]
user_agent_data = {
'deviceType': 'mobile',
'application': 'native',
'os': 'android',
'build': 41002,
'widevine': False,
'portal': 'ipla',
'player': 'flexi',
}
device_id = {
'type': 'other',
'value': str(uuid4()),
}
def _real_extract(self, url):
video_id = self._match_id(url)
media = self.get_info(video_id)
formats = []
for ptrciscute in media['playback']['mediaSources']:
formats.append({
"url": url_or_none(self.get_url(video_id, ptrciscute['id'])),
"height": int_or_none(ptrciscute["quality"][:-1])
})
self._sort_formats(formats)
return {
'id': video_id,
'title': media["displayInfo"]["title"],
'formats': formats,
'age_limit': int_or_none(media["displayInfo"]["ageGroup"])
}
def rpc(self, method, params):
params['userAgentData'] = self.user_agent_data
params['deviceId'] = self.device_id
params['clientId'] = params['deviceId']['value']
params['cpid'] = 1
return bytes(json.dumps({
'method': method,
'id': '2137',
'jsonrpc': '2.0',
'params': params,
}), encoding='utf-8')
def get_info(self, media_id):
req = self.rpc('prePlayData', {
'mediaId': media_id
})
headers = {
'Content-type': 'application/json'
}
res = self._download_json('https://b2c-mobile.redefine.pl/rpc/navigation/', media_id, data=req, headers=headers)
if not res.get('result'):
if res['error']['code'] == 13404:
raise ExtractorError('Video requires DRM protection', expected=True)
raise ExtractorError(f"Ipla said: {res['error']['message']} - {res['error']['data']['userMessage']}")
return res['result']['mediaItem']
def get_url(self, media_id, source_id):
req = self.rpc('getPseudoLicense', {
'mediaId': media_id,
'sourceId': source_id
})
headers = {
'Content-type': 'application/json'
}
res = self._download_json('https://b2c-mobile.redefine.pl/rpc/drm/', media_id, data=req, headers=headers)
if not res.get('result'):
raise ExtractorError(f"Ipla said: {res['error']['message']} - {res['error']['data']['userMessage']}")
return res['result']['url']
|
from phase_diagram.phase_diagram import PhaseDiagram
from src.point_in_curve import point_in_function
import numpy as np
from functools import partial
water = PhaseDiagram('H2O')
ureg = water.ureg
Q_ = ureg.Quantity
water_clapeyron_sv = partial(water._clapeyron_sv_lv, curve='sv')
water_clapeyron_lv = partial(water._clapeyron_sv_lv, curve='lv')
@ureg.wraps(ureg.m, (ureg.m, ureg.m, None, ureg.m, ureg.m), strict=False)
def straight_line_y(x0, range_=0, points=1, a=1, b=0):
"""A function created to test point_in_function"""
x = np.linspace(x0, x0 + range_, points)
y = a * x + b
return y
def test_point_in_straight_line_function():
assert point_in_function((Q_('3 m'), Q_('3 m')), straight_line_y)
def test_point_not_in_straight_line_function():
assert not point_in_function((Q_('3 m'), Q_('4 m')), straight_line_y)
def test_point_in_straight_line_function_tolerance():
assert point_in_function((Q_('3 m'), Q_('3.001 m')), straight_line_y)
def test_point_not_in_straight_line_function_tolerance():
assert not point_in_function((Q_('3 m'), Q_('3.002 m')), straight_line_y)
def test_point_water_01():
assert point_in_function((Q_('400 K'),Q_('246493.814 Pa') ), water._antoine_lv)
def test_point_water_02():
assert not point_in_function((Q_('400 K'),Q_('246493.813 Pa') ), water._antoine_lv)
def test_point_water_03():
assert point_in_function((Q_('100 K'),Q_('1.64e-12 Pa')), water_clapeyron_sv)
def test_point_water_04():
assert not point_in_function((Q_('100 K'),Q_('1.64e-3 Pa')), water_clapeyron_sv)
def test_point_water_05():
assert point_in_function((Q_('200 K'),Q_('0.875 Pa')), water_clapeyron_lv)
def test_point_water_06():
assert not point_in_function((Q_('200 K'),Q_('0.877 Pa')), water_clapeyron_lv)
def test_point_water_07():
assert point_in_function((Q_('270 K'),Q_('44150144.527 Pa')), water._clapeyron_sl)
def test_point_water_08():
assert not point_in_function((Q_('270 K'),Q_('44150144.529 Pa')), water._clapeyron_sl)
|
import ast
import datetime
import json
import requests
# from django.conf import settings
from django.conf import settings
TASKS_PATH = 'api/tasks'
TASKS_INFO_PATH = 'api/task/info/'
TASKS_EXEC_PATH = 'api/task/send-task/'
TASKS_ABORT_PATH = 'api/task/abort/'
class Task:
args = None
children = None
client = None
clock = None
eta = None
exception = None
exchange = None
expires = None
failed = None
kwargs = None
name = None
parent = None
parent_id = None
received = None
rejected = None
result = None
retried = None
retries = None
revoked = None
root = None
root_id = None
routing_key = None
runtime = None
sent = None
started = None
state = None
succeeded = None
timestamp = None
traceback = None
uuid = None
worker = None
def __init__(self, **entries):
self.__dict__.update(entries)
def to_dict(self):
return self.__dict__
def get_args(self):
return ast.literal_eval("[" + self.args[1:-1] + "]")
def get_started_date(self):
return datetime.datetime.utcfromtimestamp(float(self.started))
def get_received_date(self):
return datetime.datetime.utcfromtimestamp(float(self.received))
def get_succeeded_date(self):
return datetime.datetime.utcfromtimestamp(float(self.succeeded))
class FlowerView:
server_uri = ''
def __init__(self): # =settings.FLOWER_URL
self.server_uri = settings.FLOWER_URL
def get_tasks(self, page=0, num_items=20):
offset = num_items * page
resp = requests.get(settings.FLOWER_URL + TASKS_PATH + "?offset=" + str(offset) + "&limit=" + str(num_items))
if 200 <= resp.status_code < 400:
return [Task(**v) for k, v in json.loads(resp.content).items()]
else:
return {'error': 'Unable to retrieve tasks'}
def get_task_info(self, uuid):
resp = requests.get(self.server_uri + TASKS_INFO_PATH + uuid)
if 200 <= resp.status_code < 400:
return Task(**json.loads(resp.content))
else:
return {'error': 'Unable to retrieve task'}
def restart_task(self, uuid):
task = self.get_task_info(uuid)
resp = requests.post(self.server_uri + TASKS_EXEC_PATH + task.name,
json={"args": task.get_args() or [] if len(task.get_args()) == 0 else task.get_args()})
return 200 <= resp.status_code < 400
|
from typing import Dict
from domain.exceptions.models_exception import PathNotFound
from domain.models.models_info import ModelsInfo
from domain.models.paths import Paths
from domain.models.pretrained_models import PretrainedModels
from domain.services.contracts.abstract_path_service import AbstractPathService
from domain.services.contracts.abstract_models_architecture_service import AbstractModelsArchitectureService
from shared.helpers.json_helper import parse_json
class ModelsArchitectureService(AbstractModelsArchitectureService):
def __init__(self, path_service: AbstractPathService):
self.paths: Paths = path_service.get_paths()
def get_architectures(self) -> ModelsInfo:
try:
networks_dict: Dict = parse_json(self.paths.networks_path)
return ModelsInfo.parse_obj(networks_dict)
except Exception as e:
raise PathNotFound(additional_message=e.__str__(), path=self.paths.networks_path)
def get_pretrained_models(self) -> PretrainedModels:
try:
networks_dict: Dict = parse_json(self.paths.networks_path)
return PretrainedModels.parse_obj(networks_dict)
except Exception as e:
raise PathNotFound(additional_message=e.__str__(), path=self.paths.networks_path)
|
"""test_dependency_algorithm.py - tests :)
"""
from dependency_algorithm import Dependencies
import pytest
################################################################################
# Data structures to use in these tests
################################################################################
# Two mistakes: (1) Y doesn't exist, and (2) circular dependency between E and A
items_2_mistakes = {
'A': ['B', 'C', 'D'], # -- A is dependent on B, C, D,
'B': [], # -- B is dependent on nothing, etc.
'C': ['D'],
'D': ['B', 'E'],
'E': ['A'],
'F': [],
'Z': ['A', 'B', 'C', 'D', 'Y']
}
# One mistakes: circular dependency between E and A
items_1_mistakes = {
'A': ['B', 'C', 'D'], # -- A is dependent on B, C, D,
'B': [], # -- B is dependent on nothing, etc.
'C': ['D'],
'D': ['B', 'E'],
'E': ['A'],
'F': [],
'Z': ['A', 'B', 'C', 'D']
}
# No mistakes
items_0_mistakes = {
'A': ['B', 'C', 'D'], # -- A is dependent on B, C, D,
'B': [], # -- B is dependent on nothing, etc.
'C': ['D'],
'D': ['B', 'E'],
'E': ['F'],
'F': [],
'Z': ['A', 'B', 'C', 'D']
}
# Correct version of items_0_mistakes where all dependencies are complete
items_0_mistakes_complete = {
'B': [],
'F': [],
'E': ['F'],
'D': ['E', 'F', 'B'],
'C': ['E', 'F', 'B', 'D'],
'A': ['C', 'B', 'D', 'F', 'E'],
'Z': ['A', 'C', 'D', 'B', 'F', 'E']
}
# All possible correct orderings of the items in items_0_mistakes such that all
# dependencies resolve correctly
items_0_mistakes_all_possible_correct = [
['F', 'E', 'B', 'D', 'C', 'A', 'Z'],
['F', 'B', 'E', 'D', 'C', 'A', 'Z'],
['B', 'F', 'E', 'D', 'C', 'A', 'Z']
]
################################################################################
# Test the items_ data structures above
################################################################################
def test_existing_dependencies():
"""Does the existing dependency check work?
"""
deps = Dependencies(items_2_mistakes)
assert not deps.dependencies_exist(verbose=False)
deps = Dependencies(items_1_mistakes)
assert deps.dependencies_exist(verbose=False)
deps = Dependencies(items_0_mistakes)
assert deps.dependencies_exist(verbose=False)
def test_circular_dependencies():
"""Ensure that the circular dependency checker works
"""
deps = Dependencies(items_1_mistakes)
assert not deps.no_circular_dependencies()
deps = Dependencies(items_0_mistakes)
assert deps.no_circular_dependencies()
def test_complete_dependencies_check():
"""Check that the complete_dependencies and complete_dependencies_dict
methods are working successfully
"""
deps = Dependencies(items_0_mistakes)
test_passed = True
# Does deps.complete_dependencies work?
for item, in items_0_mistakes_complete.keys():
if set(deps.complete_dependencies(item)) != \
set(items_0_mistakes_complete[item]):
test_passed = False
assert test_passed
# Does deps.complete_dependencies_dict work?
items_0_mistakes_complete_set_dict = \
{k: set(v) for k, v in items_0_mistakes_complete.items()}
class_set_dict = \
{k: set(v) for k, v in deps.complete_dependencies_dict().items()}
assert items_0_mistakes_complete_set_dict == class_set_dict
def test_dependency_resolution():
"""Dependencies are ordered correctly such that they successfully resolve?
"""
deps = Dependencies(items_0_mistakes)
dependency_order = deps.resolve_dependencies()
assert dependency_order in items_0_mistakes_all_possible_correct
def test_all_possible_correct_orderings():
"""Check to see if we can successfully produce all possbile orderings of the
dependencies that resolve them
"""
deps = Dependencies(items_0_mistakes)
all_possible_correct_orderings = deps.all_possible_resolution_orders()
assert set(all_possible_correct_orderings) == \
set([tuple(x) for x in items_0_mistakes_all_possible_correct])
|
import tensorflow as tf
from keras.layers import Dense, Flatten, Dropout, Lambda, Activation, MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
import helper
# parameters to adjust
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3
INPUT_SHAPE = (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)
NUMBER_OF_EPOCHS = 8
NUMBER_OF_SAMPLES_PER_EPOCH = 20224
NUMBER_OF_VALIDATION_SAMPLES = 6400
LEARNING_RATE = 1e-4
# the input is directly 66,200,3 . Not using resize to 64X64
# Our model is based on NVIDIA's "End to End Learning for Self-Driving Cars" paper
# Source: https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
def build_model():
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=INPUT_SHAPE))
# starts with five convolutional and maxpooling layers
model.add(Convolution2D(24, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(36, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(48, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Dropout(0.5))
model.add(Flatten())
# Next, five fully connected layers
model.add(Dense(1164))
model.add(Activation('relu'))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dense(1))
model.summary()
return model
def train_model(model):
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='auto')
model.compile(optimizer=Adam(LEARNING_RATE), loss="mse", )
# create generators for training and validation
train_generator = helper.generator_training()
validation_generator = helper.generator_validation()
history = model.fit_generator(train_generator,
samples_per_epoch=NUMBER_OF_SAMPLES_PER_EPOCH,
nb_epoch=NUMBER_OF_EPOCHS,
validation_data=validation_generator,
nb_val_samples=NUMBER_OF_VALIDATION_SAMPLES,
callbacks=[checkpoint],
verbose=1)
def main():
model = build_model()
train_model(model)
helper.save_model(model)
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
if __name__ == '__main__':
pad_layer = nn.ZeroPad2d(padding=(-1, 0, 0, 0))
input = torch.randn(1, 1, 3, 3)
print(pad_layer(input).shape)
class TVLoss(nn.Module):
def __init__(self):
super(TVLoss, self).__init__()
self.crop_l = nn.ZeroPad2d(padding=(-1, 0, 0, 0))
self.crop_r = nn.ZeroPad2d(padding=(0, -1, 0, 0))
self.crop_t = nn.ZeroPad2d(padding=(0, 0, -1, 0))
self.crop_b = nn.ZeroPad2d(padding=(0, 0, 0, -1))
def forward(self, inputs):
diff_lr = F.mse_loss(self.crop_l(inputs), self.crop_r(inputs))
diff_tb = F.mse_loss(self.crop_t(inputs), self.crop_b(inputs))
return diff_lr.mean() + diff_tb.mean()
|
import itertools
from collections import OrderedDict
from pytest import raises
from triad.utils.iter import (
EmptyAwareIterable,
Slicer,
make_empty_aware,
slice_iterable,
to_kv_iterable,
)
def test_empty_aware_iterable():
i = _get_iterable("1,2,3")
e = make_empty_aware(i)
assert not e.empty
assert "1,2,3" == ",".join(e)
assert e.empty
i = _get_iterable("1")
e = EmptyAwareIterable(i)
assert not e.empty
assert not e.empty
assert "1" == ",".join(e)
assert e.empty
e = EmptyAwareIterable([])
assert e.empty
assert "" == ",".join(e)
assert e.empty
raises(StopIteration, lambda: e.peek())
i = _get_iterable("1,2,3")
e = EmptyAwareIterable(i)
assert not e.empty
assert "1,2" == ",".join(itertools.islice(e, 2))
assert not e.empty
assert "3" == ",".join(itertools.islice(e, 2))
assert e.empty
i = _get_iterable("1,2,3")
e = EmptyAwareIterable(iter(i))
assert not e.empty
assert "1" == e.peek()
assert "1,2" == ",".join(itertools.islice(e, 2))
assert not e.empty
assert "3" == e.peek()
assert "3" == ",".join(itertools.islice(e, 2))
assert e.empty
def test_empty_aware_iterable_recursive():
i = _get_iterable("1,2,3")
e = make_empty_aware(i)
ee = make_empty_aware(_wrap_iterable(e, True))
assert "1,2,3" == ",".join(ee)
i = _get_iterable("1,2,3")
e = make_empty_aware(i)
ee = make_empty_aware(_wrap_iterable(e, False))
assert "1t,2t,3t" == ",".join(ee)
def test_to_kv_iterable():
data1 = [(1, 1), (2, 2)]
data2 = OrderedDict(data1)
data3 = [[1, 1], (2, 2)]
data4 = [[1, 1, 3], (2, 2)]
data5 = [1, (2, 2)]
data6 = [(1, 1), (2, 2, 3)]
assert [] == list(to_kv_iterable(None, none_as_empty=True))
assert [] == list(to_kv_iterable(None))
assert [] == list(to_kv_iterable([]))
raises(ValueError, lambda: list(to_kv_iterable(None, none_as_empty=False)))
assert data1 == list(to_kv_iterable(data1))
assert data1 == list(to_kv_iterable(data2))
assert data1 == list(to_kv_iterable(data3))
raises(TypeError, lambda: list(to_kv_iterable(data4)))
raises(TypeError, lambda: list(to_kv_iterable(data5)))
raises(TypeError, lambda: list(to_kv_iterable(123)))
raises(ValueError, lambda: list(to_kv_iterable(data6)))
def test_slice_iterable():
# make sure empty iterable will yield no slice
ll = list(slice_iterable([], lambda n, c, l: n % 2 == 0))
assert 0 == len(ll)
assert_slice("", [], lambda n, c, l: n % 2 == 0, lambda x: x)
assert_slice("1,2-3,4-5", range(1, 6), lambda n, c, l: n % 2 == 0, lambda x: x)
assert_slice("1,2,3,4,5", range(1, 6), lambda n, c, l: c < l, lambda x: x)
assert_slice("1-2-3-4-5", range(1, 6), lambda n, c, l: c > l, lambda x: x)
# for each slice, only iterate some of them
assert_slice(
"1-3-5",
range(1, 6),
lambda n, c, l: n % 2 == 0,
lambda x: itertools.islice(x, 1),
)
assert_slice(
"1,2-3,4-5",
range(1, 6),
lambda n, c, l: n % 2 == 0,
lambda x: itertools.islice(x, 100),
)
assert_slice(
"--", range(1, 6), lambda n, c, l: n % 2 == 0, lambda x: itertools.islice(x, 0)
)
n = -1
def sl(it):
nonlocal n
n += 1
return itertools.islice(it, n)
assert_slice("-3-5", range(1, 6), lambda n, c, l: n % 2 == 0, sl)
def test_slicer():
assert_slicer("", [], 1, 0, lambda x: 1)
assert_slicer("", [], 0, 0, lambda x: 1)
assert_slicer("", [], 0, 1, lambda x: 1)
assert_slicer("", [], 1, 1, lambda x: 1)
assert_slicer(".000", [1, 1, 1], 0, 0, None)
assert_slicer(".000", [1, 1, 1], None, None, None)
assert_slicer(".0.1.2", [1, 1, 1], 1, 0, None)
assert_slicer(".00.1", [1, 1, 1], 2, 0, None)
assert_slicer(".0.1.2", [1, 1, 1], 0, 1, lambda x: 1)
assert_slicer(".0.1.2", [1, 1, 1], 0, 1, lambda x: 10)
assert_slicer(".00.1", [1, 1, 1], 0, 2, lambda x: 1)
assert_slicer(".0.1.2", [1, 1, 1], 1, 2, lambda x: 1)
assert_slicer(".00.1", [1, 1, 1], 10, 2, lambda x: 1)
assert_slicer(".000", [1, 1, 1], 10, 20, lambda x: 1)
assert_slicer(".0.1.2", [1, 1, 1], 1, "2k", lambda x: 1)
assert_slicer(".00.1", [1, 1, 1], None, "2k", lambda x: 1024)
class C(object):
def __init__(self):
self.arr = []
def c(self, no, current, last):
self.arr.append([current, last])
return current > last
c = C()
assert_slicer("", [], 1, 0, lambda x: 1, c.c)
assert_slicer("", [], 0, 0, lambda x: 1, c.c)
assert_slicer("", [], 0, 1, lambda x: 1, c.c)
assert_slicer("", [], 1, 1, lambda x: 1, c.c)
assert 0 == len(c.arr)
assert_slicer(".000", [1, 1, 1], 0, 0, None, c.c)
c = C()
assert_slicer(".0.1.2", [1, 2, 3], 0, 0, None, c.c)
assert [[2, 1], [3, 2]] == c.arr
c = C()
assert_slicer(".0.1.2", [1, 0, -1], 1, 0, None, c.c)
assert [[0, 1], [-1, 0]] == c.arr # is_boundary must be called anyway
c = C()
assert_slicer(".00.1", [1, 0, -1], 2, 0, None, c.c)
assert [[0, 1], [-1, 0]] == c.arr # is_boundary must be called anyway
c = C()
# size and row counters should reset after slicer taking effect
assert_slicer(".0.11", [1, 2, 1], 2, 0, None, c.c)
assert [[2, 1], [1, 2]] == c.arr
c = C()
assert_slicer(".00.1", [1, 0, -1], 0, 2, lambda x: 1, c.c)
assert [[0, 1], [-1, 0]] == c.arr
c = C()
assert_slicer(".0.1.2", [1, 1, 1], 1, 2, lambda x: 1, c.c)
assert [[1, 1], [1, 1]] == c.arr
c = C()
# size and row counters should reset after slicer taking effect
assert_slicer(".0.11", [1, 2, 1], 10, 2, lambda x: 1, c.c)
assert [[2, 1], [1, 2]] == c.arr
assert_slicer(".000", [1, 1, 1], 10, 20, lambda x: 1)
assert_slicer(".0.1.2", [1, 1, 1], 1, "2k", lambda x: 1)
assert_slicer(".00.1", [1, 1, 1], None, "2k", lambda x: 1024)
def assert_slice(expected, iterable, slicer, slice_proc):
ll = []
for x in slice_iterable(iterable, slicer):
assert not x.empty
assert isinstance(x, EmptyAwareIterable)
ll.append(",".join(map(str, slice_proc(x))))
s = "-".join(ll)
assert expected == s
def assert_slicer(expected, arr, max_row, max_size, sizer, slicer=None):
r = []
n = 0
c = Slicer(sizer, max_row, max_size, slicer=slicer)
for chunk in c.slice(arr):
assert isinstance(chunk, EmptyAwareIterable)
r.append(".")
for x in chunk:
r.append(str(n))
n += 1
assert expected == "".join(r)
def _get_iterable(s):
for ss in s.split(","):
yield ss
def _make_iterable(n):
while n > 0:
yield n
n -= 1
def _wrap_iterable(it, p):
if p:
for x in it:
yield x
else:
for x in it:
yield str(x) + "t"
|
import copy
import tarfile
import os
import os.path
import itertools
import math
import numpy as np
import scipy.signal as signal
from pysit.util.image_processing import blur_image
from pysit.gallery.gallery_base import GeneratedGalleryModel
from pysit import * #PML, Domain
from pysit.core.domain import PML
from pysit.util.io import write_data
from pysit.util.io import *
__all__ = ['LayeredMediumModel', 'layered_medium',
'Layer', 'three_layered_medium','four_layered_medium',
'set_model_from_file',]
class Layer(object):
def __init__(self, velocity, thickness, label=None, fixed=False):
self.velocity = velocity
self.thickness = thickness
self.label = label
self.fixed = fixed
_air = Layer(300.0, 30, 'air', fixed=True)
_water = Layer(1500.0, 120, 'water', fixed=True)
_rock = Layer(3200.0, 1800, 'rock')
water_rock = [_water, _rock]
_rock_velocitys = [-288.0, -150.0, -36.0, -18.0, -90.0, 360.0, -60.0, -450.0, 0.0, 78.0]
water_layered_rock = [_water] + [Layer(s+3300, 180, 'rock {0}'.format(i)) for s,i in zip(_rock_velocitys, itertools.count())]
class LayeredMediumModel(GeneratedGalleryModel):
""" Gallery model for a generic, flat, layered medium. """
model_name = "Layered"
valid_dimensions = (1,2,3)
@property #read only
def dimension(self):
return self.domain.dim
supported_physics = ('acoustic',)
@property
def z_length(self):
return round(float(sum([L.thickness for L in self.layers])),5)
def __init__(self, layers,
z_delta=None,
min_ppw_at_freq=(6,10.0), # 6ppw at 10hz
x_length=None, x_delta=None,
y_length=None, y_delta=None,
initial_model_style='smooth',
initial_config={'sigma':100.0, 'filtersize':100},
**kwargs):
""" Constructor for a constant background model with horizontal reflectors.
Parameters
----------
layers : list
List of Layer objects
z_delta : float, optional
Minimum mesh spacing in depth direction, see Notes.
min_ppw_at_freq : tuple (int, float)
Tuple with structure (min_ppw, peak_freq) to set the minimum points-per-wavelength at the given peak frequency.
x_length : float
Physical size in x direction
x_delta : float
Grid spacing in x direction
y_length : float
Physical size in y direction
y_delta : float
Grid spacing in y direction
initial_model_style : {'smooth', 'constant', 'gradient'}
Setup for the initial model.
initial_config : dict
Configuration parameters for initial models.
Notes
-----
* If z_delta is not set, min_ppw_at_freq is used. z_delta overrides use
of min_ppw_at_freq.
* Domain will be covered exactly, so z_delta is the maximum delta, it
might actually end up being smaller, as the delta is determined by the
mesh class.
"""
GeneratedGalleryModel.__init__(self)
self.layers = layers
self.min_z_delta = z_delta
self.min_ppw_at_freq = min_ppw_at_freq
self.x_length = x_length
self.x_delta = x_delta
self.y_length = y_length
self.y_delta = y_delta
self.initial_model_style = initial_model_style
self.initial_config = initial_config
# Set _domain and _mesh
self.build_domain_and_mesh(**kwargs)
# Set _initial_model and _true_model
self.rebuild_models()
def build_domain_and_mesh(self, **kwargs):
""" Constructs a mesh and domain for layered media. """
# Compute the total depth
z_length = self.z_length
x_length = self.x_length
x_delta = self.x_delta
y_length = self.y_length
y_delta = self.y_delta
# If the minimum z delta is not specified.
if self.min_z_delta is None: #use min_ppw & peak_frequency
min_ppw, peak_freq = self.min_ppw_at_freq
min_velocity = min([L.velocity for L in self.layers])
wavelength = min_velocity / peak_freq
z_delta = wavelength / min_ppw
else:
z_delta = self.min_z_delta
z_points = math.ceil((z_length+0.0000001)/z_delta)
# Set defualt z boundary conditions
z_lbc = kwargs['z_lbc'] if ('z_lbc' in list(kwargs.keys())) else PML(0.1*z_length, 100.0)
z_rbc = kwargs['z_rbc'] if ('z_rbc' in list(kwargs.keys())) else PML(0.1*z_length, 100.0)
domain_configs = list()
mesh_args = list()
# If a size of the x direction is specified, determine those parameters
if x_length is not None:
if x_delta is None:
x_delta = z_delta
x_points = math.ceil(float(x_length+0.00000001)/x_delta)
# Set defualt x boundary conditions
x_lbc = kwargs['x_lbc'] if ('x_lbc' in list(kwargs.keys())) else PML(0.1*x_length, 100.0)
x_rbc = kwargs['x_rbc'] if ('x_rbc' in list(kwargs.keys())) else PML(0.1*x_length, 100.0)
domain_configs.append((0, x_length, x_lbc, x_rbc))
mesh_args.append(x_points)
# the y dimension only exists for 3D proble, so only if x is defined
if y_length is not None:
if y_delta is None:
y_delta = z_delta
y_points = math.ceil(float(y_length)/y_delta)
# Set defualt y boundary conditions
y_lbc = kwargs['y_lbc'] if ('y_lbc' in list(kwargs.keys())) else PML(0.1*y_length, 100.0)
y_rbc = kwargs['y_rbc'] if ('y_rbc' in list(kwargs.keys())) else PML(0.1*y_length, 100.0)
domain_configs.append((0, y_length, y_lbc, y_rbc))
mesh_args.append(y_points)
domain_configs.append((0, z_length, z_lbc, z_rbc))
mesh_args.append(z_points)
self._domain = RectangularDomain(*domain_configs)
# Build mesh
mesh_args = [self._domain] + mesh_args
self._mesh = CartesianMesh(*mesh_args)
def rebuild_models(self):
""" Rebuild the true and initial models based on the current configuration."""
sh = self._mesh.shape(as_grid=True)
_shape_tuple = tuple([1]*(len(sh)-1) + [sh[-1]]) # ones in each dimension except for Z
_pad_tuple = [(0,n-1) for n in sh]
_pad_tuple[-1] = (0,0)
_pad_tuple = tuple(_pad_tuple)
# Construct true velocity profile
vp = np.zeros(_shape_tuple)
grid = self._mesh.mesh_coords(sparse=True)
ZZ = grid[-1].reshape(_shape_tuple)
total_filled = 0
for L in self.layers[::-1]:
cutoff_depth = self.z_length - total_filled
vp[ZZ <= cutoff_depth] = L.velocity
total_filled += L.thickness
# Construct initial velocity profile:
if self.initial_model_style == 'constant': # initial_config = {'velocity': 3000.0}
vp0 = np.ones(_shape_tuple)*self.initial_config['velocity']
elif self.initial_model_style == 'true': # initial_config = {}, set the initial model as the true model
vp0 = vp.reshape(-1, )
vp0.shape = vp.shape
elif self.initial_model_style == 'smooth': #initial_config = {'sigma':50.0, 'filtersize':8}
vp0 = blur_image(vp.reshape(-1,),
self.initial_config['filtersize'],
self.initial_config['sigma'],
mesh_deltas=(self._mesh.z.delta,))
vp0.shape = vp.shape
elif self.initial_model_style == 'gradient': # initial_config = {'gradient_slope': 1.0}
gs = self.initial_config['gradient_slope']
# collect the non-fixed layers for choosing the gradient bounds
velocities = [L.velocity for L in self.layers if not L.fixed]
cutoff_depth = 0
# find the first non-fixed layer to start the gradient at.
for L in self.layers:
if L.fixed:
cutoff_depth += L.thickness
else:
break
vp0 = vp.copy()
loc = np.where(ZZ > cutoff_depth)
vp0[loc] = np.linspace(min(velocities), gs*np.max(velocities), loc[0].size)
elif self.initial_model_style == 'layer':
vels_init = self.initial_config['initial_velocity']
thick_init = self.initial_config['initial_thickness']
layer_init = [Layer(s, t, 'Layer_init_ {0}'.format(i)) for s,t,i in zip(vels_init, thick_init, itertools.count())]
vp0 = np.zeros(_shape_tuple)
grid = self._mesh.mesh_coords(sparse=True)
ZZ = grid[-1].reshape(_shape_tuple)
total_filled = 0
for L in layer_init[::-1]:
cutoff_depth = self.z_length - total_filled
vp0[ZZ <= cutoff_depth] = L.velocity
total_filled += L.thickness
# Fix the fixed layers
old_depth = 0
for L in self.layers:
depth = old_depth + L.thickness
if L.fixed:
vp0[(ZZ >= old_depth) & (ZZ < depth)] = L.velocity
old_depth = depth
# Construct final padded velocity profiles
C = np.pad(vp, _pad_tuple, 'edge').reshape(self._mesh.shape())
C0 = np.pad(vp0, _pad_tuple, 'edge').reshape(self._mesh.shape())
self._true_model = C
self._initial_model = C0
def layered_medium(layers=water_layered_rock, **kwargs):
""" Friendly wrapper for instantiating the layered medium model. """
# Setup the defaults
model_config = dict(z_delta=None,
min_ppw_at_freq=(6,10.0), # 6ppw at 10hz
x_length=None, x_delta=None,
y_length=None, y_delta=None,
initial_model_style='smooth',
initial_config={'sigma':100.0, 'filtersize':100})
# Make any changes
model_config.update(kwargs)
return LayeredMediumModel(layers, **model_config).get_setup()
def three_layered_medium(vels=(1.5, 2.5, 3.5), dx=0.02, dz=0.02,
nx=181, nz=61, nbx=10, nbz=10, pml_width=[0.5,0.5],
water_layer_depth = 0.05,
# water_layer_depth = 0.0,
# initial_model_style = 'smooth',
# initial_config={'sigma': 1.0, 'filtersize': 8},
initial_model_style = 'gradient',
initial_config={'gradient_slope': 1.0},
TrueModelFileName=None, InitialModelFileName=None,
**kwargs):
n_layer1 = nz // 3
n_layer2 = nz // 3
n_layer3 = nz - n_layer1 - n_layer2
n_layer1 = n_layer1 # + nbz
n_layer3 = n_layer3 # + nbz
nxt = nx # + 2*nbx
nzt = nz # + 2*nbz
Layer1 = Layer(vels[0], n_layer1*dz, 'Layer1', fixed=False)
Layer2 = Layer(vels[1], n_layer2*dz, 'Layer2', fixed=False)
Layer3 = Layer(vels[2], (n_layer3-1)*dz, 'Layer3', fixed=False)
Layerall = [Layer1] + [Layer2] + [Layer3]
x_lbc = kwargs['x_lbc'] if ('x_lbc' in kwargs) else PML(0.1, 100)
x_rbc = kwargs['x_rbc'] if ('x_rbc' in kwargs) else PML(0.1, 100)
z_lbc = kwargs['z_lbc'] if ('z_lbc' in kwargs) else PML(0.1, 100)
z_rbc = kwargs['z_rbc'] if ('z_rbc' in kwargs) else PML(0.1, 100)
kwargs['x_lbc'] = PML(pml_width[0], 100)
kwargs['x_rbc'] = PML(pml_width[0], 100)
kwargs['z_lbc'] = PML(pml_width[1], 100)
kwargs['z_rbc'] = PML(pml_width[1], 100)
model_config = dict(z_delta=dz,
x_length=dx*(nxt-1), x_delta=dx,
initial_model_style=initial_model_style,
initial_config=initial_config, **kwargs)
C, C0, m, d = LayeredMediumModel(Layerall, **model_config).get_setup()
if initial_model_style == 'gradient':
nz_water = int(water_layer_depth/dz) + 1
C1 = np.ones(m._shapes[(False,True)])*vels[0]
c_z = np.linspace(vels[0], vels[-1], nz-nz_water)
for i in range(nx):
C1[i, nz_water:nz] = c_z
C0 = np.reshape(C1, C0.shape)
if TrueModelFileName is not None:
ot = (0.0,0.0)
dt = (dz, dx)
nt = m._shapes[(False, True)]
B = C.reshape(nt).transpose()
nt = (nt[1], nt[0])
write_data(TrueModelFileName, B, ot, dt, nt)
if InitialModelFileName is not None:
ot = (0.0,0.0)
dt = (dz, dx)
nt = m._shapes[(False, True)]
B = C0.reshape(nt).transpose()
nt = (nt[1], nt[0])
write_data(InitialModelFileName, B, ot, dt, nt)
return C, C0, m, d
def four_layered_medium(model_param=None, initial_model_style=None, initial_config=None,
dx = 0.01, dz = 0.01, water_layer_depth = 0.05, TrueModelFileName=None,
InitialModelFileName=None, **kwargs):
x_length = model_param['x_length']
z_depth = model_param['z_depth']
layer_thickness = model_param['layer_thickness']
vels = model_param['velocity']
nbx = 10
nbz = 10
pml_width = [0.5,0.5]
nx = int(x_length/dx) + 1
nz = int(z_depth/dz) + 1
nl = np.shape(layer_thickness)[0]
n_layer = np.zeros(nl)
for i in range(nl):
n_layer[i] = int(layer_thickness[i]/dz)
n_layer[0] = n_layer[0] # + nbz
n_layer[nl-1]= n_layer[nl-1] # + 1 # + nbz
nxt = nx # + 2*nbx
nzt = nz # + 2*nbz
Layerall = list()
for i in range(nl):
Layers = Layer(vels[i], n_layer[i]*dz, fixed=False)
Layerall += [Layers]
x_lbc = kwargs['x_lbc'] if ('x_lbc' in kwargs) else PML(0.1, 100)
x_rbc = kwargs['x_rbc'] if ('x_rbc' in kwargs) else PML(0.1, 100)
z_lbc = kwargs['z_lbc'] if ('z_lbc' in kwargs) else PML(0.1, 100)
z_rbc = kwargs['z_rbc'] if ('z_rbc' in kwargs) else PML(0.1, 100)
kwargs['x_lbc'] = PML(pml_width[0], 100)
kwargs['x_rbc'] = PML(pml_width[0], 100)
kwargs['z_lbc'] = PML(pml_width[1], 100)
kwargs['z_rbc'] = PML(pml_width[1], 100)
model_config = dict(z_delta=dz,
x_length=dx*(nxt-1), x_delta=dx,
initial_model_style=initial_model_style,
initial_config=initial_config, **kwargs)
C, C0, m, d = LayeredMediumModel(Layerall, **model_config).get_setup()
if TrueModelFileName is not None:
ot = (0.0,0.0)
dt = (dz, dx)
nt = m._shapes[(False, True)]
B = C.reshape(nt).transpose()
nt = (nt[1], nt[0])
write_data(TrueModelFileName, B, ot, dt, nt)
if InitialModelFileName is not None:
ot = (0.0,0.0)
dt = (dz, dx)
nt = m._shapes[(False, True)]
B = C0.reshape(nt).transpose()
nt = (nt[1], nt[0])
write_data(InitialModelFileName, B, ot, dt, nt)
return C, C0, m, d
def set_model_from_file(Modelfile,
initial_config={'sigma': 1.0, 'filtersize': 8},
initial_model_style='smooth',
**kwargs):
"""
function to set up the model from the Modelfile
Input:
Modelfile: The name of the model file. The file should following struction
A.data - the data of the model
A.o - the origions of each dimension
A.d - the delta of each dimension
A.n - the size of each dimension
Optional Input:
If you want to create a smooth model from the given velocity model, you can
use the following inputs:
initial_config: 'sigma' and 'filtersize' define the level of the smoothness
initial_model_style: default is 'smooth', you can also select 'gradient' then
you will get a linear velocity model
Key word arguments:
You can define the PML as follows:
kwargs['x_lbc'] = PML(0.1, 100)
kwargs['x_rbc'] = PML(0.1, 100)
kwargs['z_lbc'] = PML(0.1, 100)
kwargs['z_rbc'] = PML(0.1, 100)
"""
[vels, ot, dt, nt] = read_data(Modelfile)
C, C0, m, d = three_layered_medium(dx=dt[1], dz=dt[0],
nx=nt[1], nz=nt[0],
initial_model_style=initial_model_style,
initial_config=initial_config,
TrueModelFileName=None, InitialModelFileName=None,
**kwargs)
C = vels.transpose().reshape(C.shape)
# C0 = copy.deepcopy(C)
return C, m, d
if __name__ == '__main__':
import matplotlib.pyplot as plt
# ASD = LayeredMediumModel(water_layered_rock)
# ASD = LayeredMediumModel(water_layered_rock, initial_model_style='smooth', initial_config={'sigma':100, 'filtersize':150})
# ASD = LayeredMediumModel(water_layered_rock, initial_model_style='gradient')
# ASD = LayeredMediumModel(water_layered_rock, initial_model_style='constant', initial_config={'velocity':3000})
# SD = LayeredMediumModel(water_layered_rock, x_length=2000.0, y_length=1000.0)
C, C0, m, d = layered_medium(x_length=2000)
fig = plt.figure()
fig.add_subplot(2,1,1)
vis.plot(C, m)
fig.add_subplot(2,1,2)
vis.plot(C0, m)
plt.show()
C, C0, m, d = three_layered_medium(TrueModelFileName='testtrue.mat',InitialModelFileName='testInitial.mat',
initial_model_style='smooth',
initial_config={'sigma': 2.0, 'filtersize': 16})
C, m, d = set_model_from_file('testtrue.mat')
C0, m, d = set_model_from_file('testInitial.mat')
fig = plt.figure()
fig.add_subplot(2, 1, 1)
vis.plot(C, m)
fig.add_subplot(2, 1, 2)
vis.plot(C0, m)
plt.show()
# print(np.max(C-C0))
|
from . import gui, model
|
from teleapi import TelegramApi
api = TelegramApi(token='TOKEN')
api_proxy = TelegramApi(token='TOKEN', proxy='https://USERNAME:PASSWORD@IP:PORT')
message = api.send_message(-100, 'Hello')
print(message.text)
message = api.forward_message(-100, -100, 1) # 1 - id message
print(message.text)
photo = api.send_photo(-100, '/PATH/TO/FILE/SOME_FILE')
print(photo.text)
photo = api.send_photo(-100, 'https://exapmle.com/test.jpg')
print(photo.text)
|
import sys
import unittest
import pynsive
class WhenCreatingThePluginManager(unittest.TestCase):
def setUp(self):
self.manager = pynsive.PluginManager()
def tearDown(self):
self.manager.destroy()
def test_correct_meta_path_insertion(self):
finder_index = sys.meta_path.index(self.manager.finder)
if sys.version_info >= (3, 1, 0):
self.assertEqual(0, finder_index)
else:
self.assertEqual(len(sys.meta_path) - 1, finder_index)
|
from .AccountAdapter import AccountAdapter
from .LocalFileSystemAccountAdapter import LocalFileSystemAccountAdapter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.