hexsha
stringlengths
40
40
size
int64
7
1.04M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
247
max_stars_repo_name
stringlengths
4
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
368k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
247
max_issues_repo_name
stringlengths
4
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
247
max_forks_repo_name
stringlengths
4
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.04M
avg_line_length
float64
1.77
618k
max_line_length
int64
1
1.02M
alphanum_fraction
float64
0
1
original_content
stringlengths
7
1.04M
filtered:remove_function_no_docstring
int64
-102
942k
filtered:remove_class_no_docstring
int64
-354
977k
filtered:remove_delete_markers
int64
0
60.1k
841500952b93f537e08fe02a7b887c918465fe6f
334
py
Python
pinauth/demo/models.py
jtiai/pinauth
f84d068c366f5a63e4ce4da3936d4c0577cb0da8
[ "BSD-2-Clause" ]
null
null
null
pinauth/demo/models.py
jtiai/pinauth
f84d068c366f5a63e4ce4da3936d4c0577cb0da8
[ "BSD-2-Clause" ]
null
null
null
pinauth/demo/models.py
jtiai/pinauth
f84d068c366f5a63e4ce4da3936d4c0577cb0da8
[ "BSD-2-Clause" ]
null
null
null
from django.conf import settings from django.db import models import pyotp class UserPSK(models.Model): """Strores custom secret key per user""" user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='psk', on_delete=models.CASCADE) secret_key = models.CharField(max_length=16, default=pyotp.random_base32)
30.363636
103
0.778443
from django.conf import settings from django.db import models import pyotp class UserPSK(models.Model): """Strores custom secret key per user""" user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='psk', on_delete=models.CASCADE) secret_key = models.CharField(max_length=16, default=pyotp.random_base32)
0
0
0
daa49ffdbfcd4ecb29504c0653edf67087038404
39,897
py
Python
sunspec2/tests/test_modbus_client.py
geofragkos/RBAC_Centralized
e1501e9437ef9f5712d802c2e680561d1a5a86ac
[ "Apache-2.0" ]
null
null
null
sunspec2/tests/test_modbus_client.py
geofragkos/RBAC_Centralized
e1501e9437ef9f5712d802c2e680561d1a5a86ac
[ "Apache-2.0" ]
null
null
null
sunspec2/tests/test_modbus_client.py
geofragkos/RBAC_Centralized
e1501e9437ef9f5712d802c2e680561d1a5a86ac
[ "Apache-2.0" ]
null
null
null
import sunspec2.modbus.client as client import pytest import socket import sunspec2.tests.mock_socket as MockSocket import serial import sunspec2.tests.mock_port as MockPort if __name__ == "__main__": pass
54.504098
120
0.559491
import sunspec2.modbus.client as client import pytest import socket import sunspec2.tests.mock_socket as MockSocket import serial import sunspec2.tests.mock_port as MockPort class TestSunSpecModbusClientPoint: def test_read(self, monkeypatch): monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) # tcp d_tcp = client.SunSpecModbusClientDeviceTCP(slave_id=1, ipaddr='127.0.0.1', ipport=8502) tcp_buffer = [b'\x00\x00\x00\x00\x00\t\x01\x03\x06', b'SunS\x00\x01', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00B', b'\x00\x00\x00\x00\x00\x8b\x01\x03\x88', b'\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00~', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00@', b'\x00\x00\x00\x00\x00\x87\x01\x03\x84', b'\x00~\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00\xff' b'\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80' b'\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\xff\xff'] d_tcp.client.connect() d_tcp.client.socket._set_buffer(tcp_buffer) d_tcp.scan() assert d_tcp.common[0].SN.value == 'sn-123456789' assert not d_tcp.common[0].SN.dirty d_tcp.common[0].SN.value = 'will be overwritten by read' assert d_tcp.common[0].SN.value == 'will be overwritten by read' assert d_tcp.common[0].SN.dirty d_tcp.client.socket.clear_buffer() tcp_p_buffer = [b'\x00\x00\x00\x00\x00#\x01\x03 ', b'sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] d_tcp.client.socket._set_buffer(tcp_p_buffer) d_tcp.common[0].SN.read() assert d_tcp.common[0].SN.value == 'sn-123456789' assert not d_tcp.common[0].SN.dirty # rtu d_rtu = client.SunSpecModbusClientDeviceRTU(slave_id=1, name="COM2") rtu_buffer = [b'\x01\x03\x06Su', b'nS\x00\x01\x8d\xe4', b'\x01\x03\x02\x00B', b'8u', b'\x01\x03\x88\x00\x01', b'\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x01\x00\x00M\xf9', b'\x01\x03\x02\x00~', b'8d', b'\x01\x03\x02\x00@', b'\xb9\xb4', b'\x01\x03\x84\x00~', b'\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00' b'\xff\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xffI', b'\x01\x03\x02\xff\xff', b'\xb9\xf4'] d_rtu.open() d_rtu.client.serial._set_buffer(rtu_buffer) d_rtu.scan() assert d_rtu.common[0].SN.value == 'sn-123456789' assert not d_rtu.common[0].SN.dirty d_rtu.common[0].SN.value = 'will be overwritten by read' assert d_rtu.common[0].SN.value == 'will be overwritten by read' assert d_rtu.common[0].SN.dirty d_rtu.client.serial.clear_buffer() tcp_p_buffer = [b'\x01\x03 sn', b'-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd5\xb8'] d_rtu.client.serial._set_buffer(tcp_p_buffer) d_rtu.common[0].SN.read() assert d_rtu.common[0].SN.value == 'sn-123456789' assert not d_rtu.common[0].SN.dirty def test_write(self, monkeypatch): monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) # tcp d_tcp = client.SunSpecModbusClientDeviceTCP(slave_id=1, ipaddr='127.0.0.1', ipport=8502) tcp_buffer = [b'\x00\x00\x00\x00\x00\t\x01\x03\x06', b'SunS\x00\x01', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00B', b'\x00\x00\x00\x00\x00\x8b\x01\x03\x88', b'\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00~', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00@', b'\x00\x00\x00\x00\x00\x87\x01\x03\x84', b'\x00~\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00\xff' b'\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80' b'\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\xff\xff'] d_tcp.client.connect() d_tcp.client.socket._set_buffer(tcp_buffer) d_tcp.scan() assert d_tcp.common[0].SN.value == 'sn-123456789' assert not d_tcp.common[0].SN.dirty d_tcp.common[0].SN.value = 'sn-000' assert d_tcp.common[0].SN.value == 'sn-000' assert d_tcp.common[0].SN.dirty tcp_write_buffer = [b'\x00\x00\x00\x00\x00\x06\x01\x10\x9c', b't\x00\x10'] d_tcp.client.socket.clear_buffer() d_tcp.client.socket._set_buffer(tcp_write_buffer) d_tcp.common[0].write() d_tcp.common[0].SN.value = 'will be overwritten by read' assert d_tcp.common[0].SN.value == 'will be overwritten by read' assert d_tcp.common[0].SN.dirty tcp_read_buffer = [b'\x00\x00\x00\x00\x00#\x01\x03 ', b'sn-000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] d_tcp.client.socket.clear_buffer() d_tcp.client.socket._set_buffer(tcp_read_buffer) d_tcp.common[0].SN.read() assert d_tcp.common[0].SN.value == 'sn-000' assert not d_tcp.common[0].SN.dirty # rtu d_rtu = client.SunSpecModbusClientDeviceRTU(slave_id=1, name="COM2") rtu_buffer = [b'\x01\x03\x06Su', b'nS\x00\x01\x8d\xe4', b'\x01\x03\x02\x00B', b'8u', b'\x01\x03\x88\x00\x01', b'\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x01\x00\x00M\xf9', b'\x01\x03\x02\x00~', b'8d', b'\x01\x03\x02\x00@', b'\xb9\xb4', b'\x01\x03\x84\x00~', b'\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00' b'\xff\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xffI', b'\x01\x03\x02\xff\xff', b'\xb9\xf4'] d_rtu.open() d_rtu.client.serial._set_buffer(rtu_buffer) d_rtu.scan() assert d_rtu.common[0].SN.value == 'sn-123456789' assert not d_rtu.common[0].SN.dirty d_rtu.common[0].SN.value = 'sn-000' assert d_rtu.common[0].SN.value == 'sn-000' assert d_rtu.common[0].SN.dirty rtu_write_buffer = [b'\x01\x10\x9ct\x00', b'\x10\xaf\x8f'] d_rtu.client.serial.clear_buffer() d_rtu.client.serial._set_buffer(rtu_write_buffer) d_rtu.common[0].write() d_rtu.common[0].SN.value = 'will be overwritten by read' assert d_rtu.common[0].SN.value == 'will be overwritten by read' assert d_rtu.common[0].SN.dirty rtu_read_buffer = [b'\x01\x03 sn', b'-000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r\xfb'] d_rtu.client.serial.clear_buffer() d_rtu.client.serial._set_buffer(rtu_read_buffer) d_rtu.common[0].SN.read() assert d_rtu.common[0].SN.value == 'sn-000' assert not d_rtu.common[0].SN.dirty class TestSunSpecModbusClientGroup: def test_read(self, monkeypatch): monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) # tcp d_tcp = client.SunSpecModbusClientDeviceTCP(slave_id=1, ipaddr='127.0.0.1', ipport=8502) tcp_buffer = [b'\x00\x00\x00\x00\x00\t\x01\x03\x06', b'SunS\x00\x01', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00B', b'\x00\x00\x00\x00\x00\x8b\x01\x03\x88', b'\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00~', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00@', b'\x00\x00\x00\x00\x00\x87\x01\x03\x84', b'\x00~\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00\xff' b'\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80' b'\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\xff\xff'] d_tcp.client.connect() d_tcp.client.socket._set_buffer(tcp_buffer) d_tcp.scan() assert d_tcp.common[0].SN.value == "sn-123456789" assert d_tcp.common[0].Vr.value == "1.2.3" assert not d_tcp.common[0].SN.dirty assert not d_tcp.common[0].Vr.dirty d_tcp.common[0].SN.value = 'this will overwrite from read' d_tcp.common[0].Vr.value = 'this will overwrite from read' assert d_tcp.common[0].SN.value == 'this will overwrite from read' assert d_tcp.common[0].Vr.value == 'this will overwrite from read' assert d_tcp.common[0].SN.dirty assert d_tcp.common[0].Vr.dirty tcp_read_buffer = [b'\x00\x00\x00\x00\x00\x87\x01\x03\x84', b'\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] d_tcp.client.socket.clear_buffer() d_tcp.client.socket._set_buffer(tcp_read_buffer) d_tcp.common[0].read() assert d_tcp.common[0].SN.value == "sn-123456789" assert d_tcp.common[0].Vr.value == "1.2.3" assert not d_tcp.common[0].SN.dirty assert not d_tcp.common[0].Vr.dirty # rtu d_rtu = client.SunSpecModbusClientDeviceRTU(slave_id=1, name="COM2") rtu_buffer = [b'\x01\x03\x06Su', b'nS\x00\x01\x8d\xe4', b'\x01\x03\x02\x00B', b'8u', b'\x01\x03\x88\x00\x01', b'\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x01\x00\x00M\xf9', b'\x01\x03\x02\x00~', b'8d', b'\x01\x03\x02\x00@', b'\xb9\xb4', b'\x01\x03\x84\x00~', b'\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00' b'\xff\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xffI', b'\x01\x03\x02\xff\xff', b'\xb9\xf4'] d_rtu.open() d_rtu.client.serial._set_buffer(rtu_buffer) d_rtu.scan() assert d_rtu.common[0].SN.value == "sn-123456789" assert d_rtu.common[0].Vr.value == "1.2.3" assert not d_rtu.common[0].SN.dirty assert not d_rtu.common[0].Vr.dirty d_rtu.common[0].SN.value = 'this will overwrite from read' d_rtu.common[0].Vr.value = 'this will overwrite from read' assert d_rtu.common[0].SN.value == 'this will overwrite from read' assert d_rtu.common[0].Vr.value == 'this will overwrite from read' assert d_rtu.common[0].SN.dirty assert d_rtu.common[0].Vr.dirty rtu_read_buffer = [b'\x01\x03\x84\x00\x01', b'\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00H\xef'] d_rtu.client.serial.clear_buffer() d_rtu.client.serial._set_buffer(rtu_read_buffer) d_rtu.common[0].read() assert d_rtu.common[0].SN.value == "sn-123456789" assert d_rtu.common[0].Vr.value == "1.2.3" assert not d_rtu.common[0].SN.dirty assert not d_rtu.common[0].Vr.dirty def test_write(self, monkeypatch): monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) # tcp d_tcp = client.SunSpecModbusClientDeviceTCP(slave_id=1, ipaddr='127.0.0.1', ipport=8502) tcp_buffer = [b'\x00\x00\x00\x00\x00\t\x01\x03\x06', b'SunS\x00\x01', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00B', b'\x00\x00\x00\x00\x00\x8b\x01\x03\x88', b'\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00~', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00@', b'\x00\x00\x00\x00\x00\x87\x01\x03\x84', b'\x00~\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00\xff' b'\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80' b'\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\xff\xff'] d_tcp.client.connect() d_tcp.client.socket._set_buffer(tcp_buffer) d_tcp.scan() assert d_tcp.common[0].SN.value == "sn-123456789" assert d_tcp.common[0].Vr.value == "1.2.3" assert not d_tcp.common[0].SN.dirty assert not d_tcp.common[0].Vr.dirty d_tcp.common[0].SN.value = 'sn-000' d_tcp.common[0].Vr.value = 'v0.0.0' assert d_tcp.common[0].SN.value == "sn-000" assert d_tcp.common[0].Vr.value == "v0.0.0" assert d_tcp.common[0].SN.dirty assert d_tcp.common[0].Vr.dirty tcp_write_buffer = [b'\x00\x00\x00\x00\x00\x06\x01\x10\x9c', b'l\x00\x18'] d_tcp.client.socket.clear_buffer() d_tcp.client.socket._set_buffer(tcp_write_buffer) d_tcp.common[0].write() d_tcp.common[0].SN.value = 'this will overwrite from read' d_tcp.common[0].Vr.value = 'this will overwrite from read' assert d_tcp.common[0].SN.value == 'this will overwrite from read' assert d_tcp.common[0].Vr.value == 'this will overwrite from read' assert d_tcp.common[0].SN.dirty assert d_tcp.common[0].Vr.dirty tcp_read_buffer = [b'\x00\x00\x00\x00\x00\x87\x01\x03\x84', b'\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00opt_a_b_c' b'\x00\x00\x00\x00\x00\x00\x00v0.0.0\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00sn-000\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'] d_tcp.client.socket.clear_buffer() d_tcp.client.socket._set_buffer(tcp_read_buffer) d_tcp.common[0].read() assert d_tcp.common[0].SN.value == "sn-000" assert d_tcp.common[0].Vr.value == "v0.0.0" assert not d_tcp.common[0].SN.dirty assert not d_tcp.common[0].Vr.dirty # rtu d_rtu = client.SunSpecModbusClientDeviceRTU(slave_id=1, name="COM2") rtu_buffer = [b'\x01\x03\x06Su', b'nS\x00\x01\x8d\xe4', b'\x01\x03\x02\x00B', b'8u', b'\x01\x03\x88\x00\x01', b'\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x01\x00\x00M\xf9', b'\x01\x03\x02\x00~', b'8d', b'\x01\x03\x02\x00@', b'\xb9\xb4', b'\x01\x03\x84\x00~', b'\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00' b'\xff\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xffI', b'\x01\x03\x02\xff\xff', b'\xb9\xf4'] d_rtu.open() d_rtu.client.serial._set_buffer(rtu_buffer) d_rtu.scan() assert d_rtu.common[0].SN.value == "sn-123456789" assert d_rtu.common[0].Vr.value == "1.2.3" assert not d_rtu.common[0].SN.dirty assert not d_rtu.common[0].Vr.dirty d_rtu.common[0].SN.value = 'sn-000' d_rtu.common[0].Vr.value = 'v0.0.0' assert d_rtu.common[0].SN.value == "sn-000" assert d_rtu.common[0].Vr.value == "v0.0.0" assert d_rtu.common[0].SN.dirty assert d_rtu.common[0].Vr.dirty rtu_write_buffer = [b'\x01\x10\x9cl\x00', b'\x18.N'] d_rtu.client.serial.clear_buffer() d_rtu.client.serial._set_buffer(rtu_write_buffer) d_rtu.common[0].write() d_rtu.common[0].SN.value = 'this will overwrite from read' d_rtu.common[0].Vr.value = 'this will overwrite from read' assert d_rtu.common[0].SN.value == 'this will overwrite from read' assert d_rtu.common[0].Vr.value == 'this will overwrite from read' assert d_rtu.common[0].SN.dirty assert d_rtu.common[0].Vr.dirty rtu_read_buffer = [b'\x01\x03\x84\x00\x01', b'\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00opt_a_b_c\x00' b'\x00\x00\x00\x00\x00\x00v0.0.0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'sn-000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd4h'] d_rtu.client.serial.clear_buffer() d_rtu.client.serial._set_buffer(rtu_read_buffer) d_rtu.common[0].read() assert d_rtu.common[0].SN.value == "sn-000" assert d_rtu.common[0].Vr.value == "v0.0.0" assert not d_rtu.common[0].SN.dirty assert not d_rtu.common[0].Vr.dirty def test_write_points(self): pass class TestSunSpecModbusClientModel: def test___init__(self): c = client.SunSpecModbusClientModel(704) assert c.model_id == 704 assert c.model_addr == 0 assert c.model_len == 0 assert c.model_def['id'] == 704 assert c.error_info == '' assert c.gdef['name'] == 'DERCtlAC' assert c.mid is None assert c.device is None assert c.model == c def test_error(self): c = client.SunSpecModbusClientModel(704) c.add_error('test error') assert c.error_info == 'test error\n' class TestSunSpecModbusClientDevice: def test___init__(self): d = client.SunSpecModbusClientDevice() assert d.did assert d.retry_count == 2 assert d.base_addr_list == [40000, 0, 50000] assert d.base_addr is None def test_connect(self): pass def test_disconnect(self): pass def test_close(self): pass def test_read(self): pass def test_write(self): pass def test_scan(self, monkeypatch): # tcp scan monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) c_tcp = client.SunSpecModbusClientDeviceTCP() tcp_req_check = [b'\x00\x00\x00\x00\x00\x06\x01\x03\x9c@\x00\x03', b'\x00\x00\x00\x00\x00\x06\x01\x03\x9cC\x00\x01', b'\x00\x00\x00\x00\x00\x06\x01\x03\x9cB\x00D', b'\x00\x00\x00\x00\x00\x06\x01\x03\x9c\x86\x00\x01', b'\x00\x00\x00\x00\x00\x06\x01\x03\x9c\x87\x00\x01', b'\x00\x00\x00\x00\x00\x06\x01\x03\x9c\x86\x00B', b'\x00\x00\x00\x00\x00\x06\x01\x03\x9c\xc8\x00\x01'] tcp_buffer = [b'\x00\x00\x00\x00\x00\t\x01\x03\x06', b'SunS\x00\x01', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00B', b'\x00\x00\x00\x00\x00\x8b\x01\x03\x88', b'\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00~', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\x00@', b'\x00\x00\x00\x00\x00\x87\x01\x03\x84', b'\x00~\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00\xff' b'\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80' b'\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff', b'\x00\x00\x00\x00\x00\x05\x01\x03\x02', b'\xff\xff'] c_tcp.client.connect() c_tcp.client.socket._set_buffer(tcp_buffer) c_tcp.scan() assert c_tcp.common assert c_tcp.volt_var for req in range(len(tcp_req_check)): assert tcp_req_check[req] == c_tcp.client.socket.request[req] # rtu scan monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) c_rtu = client.SunSpecModbusClientDeviceRTU(1, "COMM2") rtu_req_check = [b'\x01\x03\x9c@\x00\x03*O', b'\x01\x03\x9cC\x00\x01[\x8e', b'\x01\x03\x9cB\x00D\xcb\xbd', b'\x01\x03\x9c\x86\x00\x01K\xb3', b'\x01\x03\x9c\x87\x00\x01\x1as', b'\x01\x03\x9c\x86\x00B\nB', b'\x01\x03\x9c\xc8\x00\x01+\xa4'] rtu_buffer = [b'\x01\x03\x06Su', b'nS\x00\x01\x8d\xe4', b'\x01\x03\x02\x00B', b'8u', b'\x01\x03\x88\x00\x01', b'\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00opt_a_b_c\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x01\x00\x00M\xf9', b'\x01\x03\x02\x00~', b'8d', b'\x01\x03\x02\x00@', b'\xb9\xb4', b'\x01\x03\x84\x00~', b'\x00@\x00\x03\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x80\x00\x80\x00' b'\xff\xff\xff\xff\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00' b'\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff' b'\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\xff\xff\x80\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xffI', b'\x01\x03\x02\xff\xff', b'\xb9\xf4'] c_rtu.open() c_rtu.client.serial._set_buffer(rtu_buffer) c_rtu.scan() assert c_rtu.common assert c_rtu.volt_var for req in range(len(rtu_req_check)): assert rtu_req_check[req] == c_rtu.client.serial.request[req] class TestSunSpecModbusClientDeviceTCP: def test___init__(self): d = client.SunSpecModbusClientDeviceTCP() assert d.slave_id == 1 assert d.ipaddr == '127.0.0.1' assert d.ipport == 502 assert d.timeout is None assert d.ctx is None assert d.trace_func is None assert d.max_count == 125 assert d.client.__class__.__name__ == 'ModbusClientTCP' def test_connect(self, monkeypatch): d = client.SunSpecModbusClientDeviceTCP() with pytest.raises(Exception) as exc: d.connect() assert 'Connection error' in str(exc.value) monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) d.connect() assert d.client.socket is not None assert d.client.socket.connected is True assert d.client.socket.ipaddr == '127.0.0.1' assert d.client.socket.ipport == 502 assert d.client.socket.timeout == 2 def test_disconnect(self, monkeypatch): monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) d = client.SunSpecModbusClientDeviceTCP() d.client.connect() assert d.client.socket d.client.disconnect() assert d.client.socket is None def test_read(self, monkeypatch): monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) d = client.SunSpecModbusClientDeviceTCP() buffer = [b'\x00\x00\x00\x00\x00\x8f\x01\x03\x8c', b'SunS\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00opt_a_b_c' b'\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x01\x00\x00'] check_req = b'\x00\x00\x00\x00\x00\x06\x01\x03\x9c@\x00F' d.client.connect() d.client.socket._set_buffer(buffer) assert d.read(40000, 70) == buffer[1] assert d.client.socket.request[0] == check_req def test_write(self, monkeypatch): monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket) d = client.SunSpecModbusClientDeviceTCP() d.client.connect() data_to_write = b'sn-000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' buffer = [b'\x00\x00\x00\x00\x00\x06\x01\x10\x9c', b't\x00\x10'] d.client.socket._set_buffer(buffer) d.client.write(40052, data_to_write) check_req = b"\x00\x00\x00\x00\x00'\x01\x10\x9ct\x00\x10 sn-000\x00\x00\x00\x00\x00\x00\x00" \ b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" assert d.client.socket.request[0] == check_req class TestSunSpecModbusClientDeviceRTU: def test___init__(self, monkeypatch): monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) d = client.SunSpecModbusClientDeviceRTU(1, "COMM2") assert d.slave_id == 1 assert d.name == "COMM2" assert d.client.__class__.__name__ == "ModbusClientRTU" assert d.ctx is None assert d.trace_func is None assert d.max_count == 125 def test_open(self, monkeypatch): monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) d = client.SunSpecModbusClientDeviceRTU(1, "COMM2") d.open() assert d.client.serial.connected def test_close(self, monkeypatch): monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) d = client.SunSpecModbusClientDeviceRTU(1, "COMM2") d.open() d.close() assert not d.client.serial.connected def test_read(self, monkeypatch): monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) d = client.SunSpecModbusClientDeviceRTU(1, "COMM2") d.open() in_buff = [b'\x01\x03\x8cSu', b'nS\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00opt_a_b_c\x00' b'\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\xb7d'] check_req = b'\x01\x03\x9c@\x00F\xeb\xbc' d.client.serial._set_buffer(in_buff) check_read = in_buff[0] + in_buff[1] assert d.read(40000, 70) == check_read[3:-2] assert d.client.serial.request[0] == check_req def test_write(self, monkeypatch): monkeypatch.setattr(serial, 'Serial', MockPort.mock_port) d = client.SunSpecModbusClientDeviceRTU(1, "COMM2") d.open() data_to_write = b'v0.0.0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00sn-000\x00\x00\x00\x00\x00\x00\x00' \ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' buffer = [b'\x01\x10\x9cl\x00', b'\x18.N'] d.client.serial._set_buffer(buffer) d.write(40044, data_to_write) check_req = b'\x01\x10\x9cl\x00\x180v0.0.0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00sn-000\x00' \ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \ b'\x00\x00\x00\x00\x00\xad\xff' assert d.client.serial.request[0] == check_req if __name__ == "__main__": pass
38,805
93
780
63d1e648c07ea50bbe78568f68efbb6c5cf12e1d
5,365
py
Python
frag_pele/Helpers/find_dihedrals.py
danielSoler93/FrAG_PELE
0203d8caf51400d2cd4961281c3cc497441ff6fb
[ "MIT" ]
26
2019-05-17T08:21:23.000Z
2022-03-17T22:27:30.000Z
frag_pele/Helpers/find_dihedrals.py
nostrumbiodiscovery/frag_pele
2809c071afa18322613d8432ab271cdf13739883
[ "MIT" ]
37
2019-09-04T08:47:51.000Z
2021-07-13T12:57:23.000Z
frag_pele/Helpers/find_dihedrals.py
nostrumbiodiscovery/frag_pele
2809c071afa18322613d8432ab271cdf13739883
[ "MIT" ]
9
2019-05-17T08:04:32.000Z
2021-04-07T03:54:53.000Z
import os import glob import numpy as np import networkx as nx import peleffy from frag_pele.constants import SCHRODINGER from peleffy.topology import Topology from peleffy.forcefield import OpenForceField, OPLS2005ForceField from peleffy.topology import Molecule from peleffy.utils import Logger from peleffy.topology import molecule from peleffy.utils.toolkits import RDKitToolkitWrapper class ComputeDihedrals(object): """ A class to produce a library of dihedral angles. """ def __init__(self, pdb_file, forcefield='OPLS2005'): """ Initializes a ComputeDihedrals object Parameters ---------- topology_obj : An peleffy.topology.Topology A Topology object that contains the ligand's information mode: str Whether to extract all dihedrals or only those marked as flexible """ self._pdb_file = pdb_file self._forcefield = forcefield self._topology, self._molecule = self.load_molecule() self.dihedral_library = {} def calculate_cluster_angles(self, dihedral_list): """ Calculate dihedral angles from pdb Parameters ---------- pdb_file: str Path to the cluster representative conformation dihedral_list: list List of the tuples containing the atoms that form the dihedrals match_indexes: bool Whether to use the atom indices from the dihedral list or match to the cluster structure before """ rdkit_wrapper = RDKitToolkitWrapper() pdb_dihedrals = [] # use the input molecule as template since the cluster structures # probably will not have proper stereochemistry mol = molecule.Molecule(self._pdb_file, connectivity_template=self._molecule.rdkit_molecule) # we use substructure matching to ensure that the indices in the # clusters pdb and the input ligand are the same for dihedral in dihedral_list: names = [self._topology.atoms[atom].PDB_name for atom in dihedral] angle = get_dihedral(mol, *dihedral, units="degrees") pdb_dihedrals.append(names+[angle]) self.dihedral_library[self._pdb_file] = pdb_dihedrals def calculate(self): """ Calculate dihedrals library from the bce output """ logger = Logger() logger.info(' - Calculating dihedral library') self._calculate_all_dihedrals() def get_dihedral(mol, atom1, atom2, atom3, atom4, units="radians"): """ It calculates the value of the dihedral angle in the specified units (default radians) Parameters ---------- molecule : an offpele.topology.Molecule The offpele's Molecule object atom1 : int Index of the first atom in the dihedral atom2 : int Index of the second atom in the dihedral atom3 : int Index of the third atom in the dihedral atom4 : int Index of the fourth atom in the dihedral units : str The units in which to calculate the angle (default is radians, can be radians or degrees) """ from rdkit.Chem import rdMolTransforms if units == "degrees": angle = rdMolTransforms.GetDihedralDeg(mol.rdkit_molecule.GetConformer(), atom1, atom2, atom3, atom4) else: angle = rdMolTransforms.GetDihedralRad(mol.rdkit_molecule.GetConformer(), atom1, atom2, atom3, atom4) return angle
38.876812
114
0.660205
import os import glob import numpy as np import networkx as nx import peleffy from frag_pele.constants import SCHRODINGER from peleffy.topology import Topology from peleffy.forcefield import OpenForceField, OPLS2005ForceField from peleffy.topology import Molecule from peleffy.utils import Logger from peleffy.topology import molecule from peleffy.utils.toolkits import RDKitToolkitWrapper class ComputeDihedrals(object): """ A class to produce a library of dihedral angles. """ def __init__(self, pdb_file, forcefield='OPLS2005'): """ Initializes a ComputeDihedrals object Parameters ---------- topology_obj : An peleffy.topology.Topology A Topology object that contains the ligand's information mode: str Whether to extract all dihedrals or only those marked as flexible """ self._pdb_file = pdb_file self._forcefield = forcefield self._topology, self._molecule = self.load_molecule() self.dihedral_library = {} def load_molecule(self): os.environ['SCHRODINGER'] = SCHRODINGER m = Molecule(self._pdb_file) if self._forcefield == 'OPLS2005': ff = OPLS2005ForceField() elif self._forcefield == 'OpenForceField': ff = OpenForceField('openff_unconstrained-1.2.0.offxml') else: raise ValueError("Not valid ForceField. Pick between 'OPLS2005' and 'OpenForceField'") parameters = ff.parameterize(m) top = peleffy.topology.Topology(m, parameters) return top, m def list_all_dihedrals(self): dihedrals = [] seen_dihedrals = set() for proper in self._topology.propers: dihedral_indexes = (proper.atom1_idx, proper.atom2_idx, proper.atom3_idx, proper.atom4_idx) if dihedral_indexes in seen_dihedrals: continue seen_dihedrals.add(dihedral_indexes) dihedrals.append(list(dihedral_indexes)) return dihedrals def calculate_cluster_angles(self, dihedral_list): """ Calculate dihedral angles from pdb Parameters ---------- pdb_file: str Path to the cluster representative conformation dihedral_list: list List of the tuples containing the atoms that form the dihedrals match_indexes: bool Whether to use the atom indices from the dihedral list or match to the cluster structure before """ rdkit_wrapper = RDKitToolkitWrapper() pdb_dihedrals = [] # use the input molecule as template since the cluster structures # probably will not have proper stereochemistry mol = molecule.Molecule(self._pdb_file, connectivity_template=self._molecule.rdkit_molecule) # we use substructure matching to ensure that the indices in the # clusters pdb and the input ligand are the same for dihedral in dihedral_list: names = [self._topology.atoms[atom].PDB_name for atom in dihedral] angle = get_dihedral(mol, *dihedral, units="degrees") pdb_dihedrals.append(names+[angle]) self.dihedral_library[self._pdb_file] = pdb_dihedrals def _calculate_all_dihedrals(self): dihedrals = self.list_all_dihedrals() self.calculate_cluster_angles(dihedrals) def calculate(self): """ Calculate dihedrals library from the bce output """ logger = Logger() logger.info(' - Calculating dihedral library') self._calculate_all_dihedrals() def get_dihedral(mol, atom1, atom2, atom3, atom4, units="radians"): """ It calculates the value of the dihedral angle in the specified units (default radians) Parameters ---------- molecule : an offpele.topology.Molecule The offpele's Molecule object atom1 : int Index of the first atom in the dihedral atom2 : int Index of the second atom in the dihedral atom3 : int Index of the third atom in the dihedral atom4 : int Index of the fourth atom in the dihedral units : str The units in which to calculate the angle (default is radians, can be radians or degrees) """ from rdkit.Chem import rdMolTransforms if units == "degrees": angle = rdMolTransforms.GetDihedralDeg(mol.rdkit_molecule.GetConformer(), atom1, atom2, atom3, atom4) else: angle = rdMolTransforms.GetDihedralRad(mol.rdkit_molecule.GetConformer(), atom1, atom2, atom3, atom4) return angle def select_dihedrals(input_dihedrals_list, selected_dihedrals_atoms): selected_dihedrals = [] for dihedrals in input_dihedrals_list: atom1, atom2, atom3, atom4, angle = dihedrals atoms = [atom1, atom2, atom3, atom4] for atoms_of_dih in selected_dihedrals_atoms: if atoms_of_dih == atoms or atoms_of_dih[::-1] == atoms: # Try in both reading senses print(f"Constraining dihedral: {atoms}") selected_dihedrals.append([*atoms, angle]) if len(selected_dihedrals) == 0: raise ValueError(f"The dihedral formed by {selected_dihedrals_atoms} were not found in ligand dihedrals.") return selected_dihedrals
1,759
0
108
5d7041b6db70633f288345561f480d112b593353
5,715
py
Python
tests/unit/lib/utils/test_handler_observer.py
zhuhaow/aws-sam-cli
59d82ec6848b5a0cdd544d8ada838d4d34052971
[ "Apache-2.0" ]
2,959
2018-05-08T21:48:56.000Z
2020-08-24T14:35:39.000Z
tests/unit/lib/utils/test_handler_observer.py
zhuhaow/aws-sam-cli
59d82ec6848b5a0cdd544d8ada838d4d34052971
[ "Apache-2.0" ]
1,469
2018-05-08T22:44:28.000Z
2020-08-24T20:19:24.000Z
tests/unit/lib/utils/test_handler_observer.py
zhuhaow/aws-sam-cli
59d82ec6848b5a0cdd544d8ada838d4d34052971
[ "Apache-2.0" ]
642
2018-05-08T22:09:19.000Z
2020-08-17T09:04:37.000Z
import re from unittest.case import TestCase from unittest.mock import MagicMock, patch, ANY from samcli.lib.utils.path_observer import HandlerObserver, PathHandler, StaticFolderWrapper
39.143836
107
0.707962
import re from unittest.case import TestCase from unittest.mock import MagicMock, patch, ANY from samcli.lib.utils.path_observer import HandlerObserver, PathHandler, StaticFolderWrapper class TestPathHandler(TestCase): def test_init(self): handler_mock = MagicMock() path_mock = MagicMock() create_mock = MagicMock() delete_mock = MagicMock() bundle = PathHandler(handler_mock, path_mock, True, True, create_mock, delete_mock) self.assertEqual(bundle.event_handler, handler_mock) self.assertEqual(bundle.path, path_mock) self.assertEqual(bundle.self_create, create_mock) self.assertEqual(bundle.self_delete, delete_mock) self.assertTrue(bundle.recursive) self.assertTrue(bundle.static_folder) class TestStaticFolderWrapper(TestCase): def setUp(self) -> None: self.observer = MagicMock() self.path_handler = MagicMock() self.initial_watch = MagicMock() self.wrapper = StaticFolderWrapper(self.observer, self.initial_watch, self.path_handler) def test_on_parent_change_on_delete(self): watch_mock = MagicMock() self.wrapper._watch = watch_mock self.wrapper._path_handler.path.exists.return_value = False self.wrapper._on_parent_change(MagicMock()) self.path_handler.self_delete.assert_called_once_with() self.observer.unschedule.assert_called_once_with(watch_mock) self.assertIsNone(self.wrapper._watch) def test_on_parent_change_on_create(self): watch_mock = MagicMock() self.observer.schedule_handler.return_value = watch_mock self.wrapper._watch = None self.wrapper._path_handler.path.exists.return_value = True self.wrapper._on_parent_change(MagicMock()) self.path_handler.self_create.assert_called_once_with() self.observer.schedule_handler.assert_called_once_with(self.wrapper._path_handler) self.assertEqual(self.wrapper._watch, watch_mock) @patch("samcli.lib.utils.path_observer.RegexMatchingEventHandler") @patch("samcli.lib.utils.path_observer.PathHandler") def test_get_dir_parent_path_handler(self, path_handler_mock, event_handler_mock): path_mock = MagicMock() path_mock.resolve.return_value.parent = "/parent/" path_mock.resolve.return_value.__str__.return_value = "/parent/dir/" self.path_handler.path = path_mock event_handler = MagicMock() event_handler_mock.return_value = event_handler path_handler = MagicMock() path_handler_mock.return_value = path_handler result = self.wrapper.get_dir_parent_path_handler() self.assertEqual(result, path_handler) path_handler_mock.assert_called_once_with(path="/parent/", event_handler=event_handler) escaped_path = re.escape("/parent/dir/") event_handler_mock.assert_called_once_with( regexes=[f"^{escaped_path}$"], ignore_regexes=[], ignore_directories=False, case_sensitive=True ) class TestHandlerObserver(TestCase): def setUp(self) -> None: self.observer = HandlerObserver() def test_schedule_handlers(self): bundle_1 = MagicMock() bundle_2 = MagicMock() watch_1 = MagicMock() watch_2 = MagicMock() schedule_handler_mock = MagicMock() schedule_handler_mock.side_effect = [watch_1, watch_2] self.observer.schedule_handler = schedule_handler_mock result = self.observer.schedule_handlers([bundle_1, bundle_2]) self.assertEqual(result, [watch_1, watch_2]) schedule_handler_mock.assert_any_call(bundle_1) schedule_handler_mock.assert_any_call(bundle_2) @patch("samcli.lib.utils.path_observer.StaticFolderWrapper") def test_schedule_handler_not_static(self, wrapper_mock: MagicMock): bundle = MagicMock() event_handler = MagicMock() bundle.event_handler = event_handler bundle.path = "dir" bundle.recursive = True bundle.static_folder = False watch = MagicMock() schedule_mock = MagicMock() schedule_mock.return_value = watch self.observer.schedule = schedule_mock result = self.observer.schedule_handler(bundle) self.assertEqual(result, watch) schedule_mock.assert_any_call(bundle.event_handler, "dir", True) wrapper_mock.assert_not_called() @patch("samcli.lib.utils.path_observer.StaticFolderWrapper") def test_schedule_handler_static(self, wrapper_mock: MagicMock): bundle = MagicMock() event_handler = MagicMock() bundle.event_handler = event_handler bundle.path = "dir" bundle.recursive = True bundle.static_folder = True watch = MagicMock() parent_bundle = MagicMock() event_handler = MagicMock() parent_bundle.event_handler = event_handler parent_bundle.path = "parent" parent_bundle.recursive = False parent_bundle.static_folder = False parent_watch = MagicMock() schedule_mock = MagicMock() schedule_mock.side_effect = [watch, parent_watch] self.observer.schedule = schedule_mock wrapper = MagicMock() wrapper_mock.return_value = wrapper wrapper.get_dir_parent_path_handler.return_value = parent_bundle result = self.observer.schedule_handler(bundle) self.assertEqual(result, parent_watch) schedule_mock.assert_any_call(bundle.event_handler, "dir", True) schedule_mock.assert_any_call(parent_bundle.event_handler, "parent", False) wrapper_mock.assert_called_once_with(self.observer, watch, bundle)
4,914
517
95
a2f1769aaf31e2b6656d870ee3954314a4c10aea
9,533
py
Python
brocade-ssh.py
IBM/ibm-qradar-brocade
6df41d74e23f3018fedcacd8f7d73a38a579a9ac
[ "Apache-2.0" ]
null
null
null
brocade-ssh.py
IBM/ibm-qradar-brocade
6df41d74e23f3018fedcacd8f7d73a38a579a9ac
[ "Apache-2.0" ]
null
null
null
brocade-ssh.py
IBM/ibm-qradar-brocade
6df41d74e23f3018fedcacd8f7d73a38a579a9ac
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # # Copyright IBM Corp. 2016 All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # ########################################################## # # # Disclaimer # # # # The script is provided here is only a sample. # # # # There is no official support on the script by me # # # # or IBM. # # # # Do NOT use the script in the production environment. # # # # You may use the script as guideline to create the # # # # custom action response best suited to your needs. # # # ########################################################## import re import sys import datetime import subprocess ## ** Jump to main() function ** def updatePolicy( lstAction ): """ IN: list( default_ipv4,ipv4,active,1,any,tcp,22,permit,#rules_in_policy ) """ global offence_ip, offence_port, max_rules, dry_run epoch = datetime.datetime.now().strftime('%s') # Add epoch timestamp to policy name. qradar_policy_name = 'qr_{0}' . format(epoch) policy_name,policy_type,p_active,\ rule_id,rule_ip,rule_proto,\ rule_port,rule_action,policy_rule_count = lstAction CLONERULE = 'ipfilter --clone {0} -from {1}' . \ format(qradar_policy_name, policy_name ) DELRULE = 'ipfilter --delrule {0} -rule {1}' . \ format(qradar_policy_name, rule_id ) ADDRULE_DENY = 'ipfilter --addrule {0} -rule {1} -sip {2} -dp {3} -proto {4} -act {5}'. \ format( qradar_policy_name, rule_id, offence_ip, offence_port, rule_proto, 'deny' ) new_rule_id = int(max_rules) + 1 ADDRULE_PERMIT = 'ipfilter --addrule {0} -rule {1} -sip {2} -dp {3} -proto {4} -act {5}'. \ format( qradar_policy_name, new_rule_id, 'any', offence_port, rule_proto, 'permit' ) SAVERULE = 'ipfilter --save {0}' . \ format(qradar_policy_name ) ACTIVATE = 'ipfilter --activate {0}' . \ format(qradar_policy_name ) print('{0}\n{1}\n{2}\n{3}\n{4}\n{5}'. format( CLONERULE, DELRULE, ADDRULE_DENY, ADDRULE_PERMIT, SAVERULE, ACTIVATE ) ) print('Blocking further connections from {0} on port {1}'. format( offence_ip, offence_port ) ) # Add ACTIVATE to following tuple SEQ = ( CLONERULE, DELRULE, ADDRULE_DENY, ADDRULE_PERMIT, SAVERULE, ACTIVATE ) switch_cmds = ' ; ' . join( SEQ ) runCli( switch_cmds ) def loadData( switch_cmd ): """ This function is written to get the currently active rules from the switch configuration. Once the active rules are retrieved, they are flattened in a list. Parameters - IN : Switch command to execute - OUT: list in following format for all rules found for all policies default_ipv4,ipv4,active,1,any,tcp,22,permit """ global active_policies active_policies.clear() spaces = re.compile('\s+') commas = re.compile(',') lst_rules = [] # the user has specified a # output file with switch output # load it (simulation mode) #f = open('switch-output', 'r') #data = f.read() #f.close() data = runCli( switch_cmd ) # Process the output received from # (ipfilter --show) switch command # or loaded from output file for line in data.split('\n'): # Skip empty lines & header line if re.search(r'^$|^Rule', line): continue # Extract policy_name, type and state # from line beginning with Name if re.search(r'^Name',line): line2 = commas.sub('',line) p_name = line2.split(':')[1].split(' ')[1].strip() p_type = line2.split(':')[2].split(' ')[1].strip() p_state = line2.split(':')[3].split(' ')[1].strip() continue # Consider only the active policy if p_state == 'active' and \ p_type == ip_type : # creating a set of policies active_policies.add( p_name ) # convert space to commas csv_line = spaces.sub(',',line.strip()) # create a tuple rec = ( p_name, p_type, p_state, csv_line ) # create a comma separated values record csv_rec = ',' . join( rec ) # store the value in a list lst_rules.append(csv_rec) return lst_rules def getRuleCount( lstRules, policy_name ): """ This function return the rule count for a given policy indicated by policy_name Parameters: - IN : 1. List containing all the rules 2. Name of the policy - Out: # of rules in the policy. """ count = 0 for x in lstRules: if x.split(',')[0] == policy_name: count +=1 return count def checkRules( lstRules ): """ IN: list ( default_ipv4,ipv4,active,1,any,tcp,22,permit ) OUT: list( default_ipv4,ipv4,active,1,any,tcp,22,permit,#rules_in_policy) """ global active_policies, offence_port, ip_type, max_rules lstAction = [] ipAlreadyBlocked = False # Iterate through the set with # active policy name for active_policy in active_policies: rule_count = 0 take_action= False # Iterate through all rules (ipv4 & ipv6) # stored in the list for rule in lstRules: p_name = rule.split(',')[0] p_type = rule.split(',')[1] p_state = rule.split(',')[2] rule_id = rule.split(',')[3] rule_ip = rule.split(',')[4] rule_proto = rule.split(',')[5] # account for port range here if rule.split(',')[7] == '-' : p_begin = rule.split(',')[6] p_end = rule.split(',')[8] rule_port = p_begin + '-' + p_end rule_action= rule.split(',')[9] else: rule_port = rule.split(',')[6] rule_action= rule.split(',')[7] # ip_type can be ipv4 or ipv6 # determined globally if p_type == ip_type and \ p_name == active_policy: # get total rules count for this policy if rule_count == 0: rule_count = getRuleCount( lstRules, p_name ) max_rules = rule_count port_matched = getPortMatch( rule_port ) # check if the offensive ip is alrady blocked # abort the further execution if port_matched and \ rule_ip == offence_ip and \ rule_action == 'deny': print('No policy change required') print('IP {0} is alredy blocked for port {1}'.\ format(offence_ip,rule_port) ) break if port_matched and \ rule_action == 'permit' and \ rule_ip == 'any': tup = ( rule, str(rule_count) ) action_rec = ',' . join( tup ) lstAction = list(action_rec.split(',')) # abort iner loop take_action = True break # abort outer loop if take_action: break return lstAction def runCli( cli_cmd ): """ Purpose: Run any external command and return the output Parameters: - IN 1. cli command to run - OUT 1. output of cli command """ global system, remote_user lst_cmd = [] lst_cmd.append( 'ssh' ) lst_cmd.append( '-o StrictHostKeyChecking=no' ) lst_cmd.append( remote_user + "@" + system ) lst_cmd.append( cli_cmd ) print(cli_cmd) try: # Command execution using subprocess stdout = subprocess.check_output( \ lst_cmd, universal_newlines = True, shell = False ) if stdout != None: return stdout except KeyboardInterrupt: print( "User abort ..\n" ) sys.exit( 1 ) except subprocess.CalledProcessError: print( "Error connecting to remote host !! aborting !!! \n") sys.exit( 1 ) if __name__ == '__main__': argc = len(sys.argv) - 1 if argc == 0 or argc > 3 : Usage() system,ip_address,command = sys.argv[1:] remote_user = 'qradaradmin' active_policies = {'index_ignore'} cmd_to_port_dict = { 'ssh' : 22, 'https' : 443, 'telnet' : 23, 'http' : 80 } max_rules = 0 offence_port = cmd_to_port_dict[command] offence_ip = ip_address dry_run = False if offence_ip.find('.') > 0: ip_type = 'ipv4' else: ip_type = 'ipv6' if dry_run: print('*** Simulation only ***' ) print('Switch IP: {0}\nOffence IP:{1}\nOffence Port:{2}\n'. format(system,ip_address,offence_port) ) MSG = """ NOTE: The current syslog configuration does not write protocol in the event. The switch on the other hand has rules for both tcp and udp protocols. As no protocol information is sent along with the event 1st rule matching the ip / port will be chosen irrespective of the protocol. This may lead to unintended blocking rule. Needs to be fixed in rsyslog event. """ print(MSG) main()
21.864679
92
0.634323
#!/usr/bin/env python3 # # Copyright IBM Corp. 2016 All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # ########################################################## # # # Disclaimer # # # # The script is provided here is only a sample. # # # # There is no official support on the script by me # # # # or IBM. # # # # Do NOT use the script in the production environment. # # # # You may use the script as guideline to create the # # # # custom action response best suited to your needs. # # # ########################################################## import re import sys import datetime import subprocess ## ** Jump to main() function ** def main(): lstAction = [] lstRules = loadData('ipfilter --show') lstAction = checkRules( lstRules ) if len(lstAction) > 0: updatePolicy( lstAction ) def updatePolicy( lstAction ): """ IN: list( default_ipv4,ipv4,active,1,any,tcp,22,permit,#rules_in_policy ) """ global offence_ip, offence_port, max_rules, dry_run epoch = datetime.datetime.now().strftime('%s') # Add epoch timestamp to policy name. qradar_policy_name = 'qr_{0}' . format(epoch) policy_name,policy_type,p_active,\ rule_id,rule_ip,rule_proto,\ rule_port,rule_action,policy_rule_count = lstAction CLONERULE = 'ipfilter --clone {0} -from {1}' . \ format(qradar_policy_name, policy_name ) DELRULE = 'ipfilter --delrule {0} -rule {1}' . \ format(qradar_policy_name, rule_id ) ADDRULE_DENY = 'ipfilter --addrule {0} -rule {1} -sip {2} -dp {3} -proto {4} -act {5}'. \ format( qradar_policy_name, rule_id, offence_ip, offence_port, rule_proto, 'deny' ) new_rule_id = int(max_rules) + 1 ADDRULE_PERMIT = 'ipfilter --addrule {0} -rule {1} -sip {2} -dp {3} -proto {4} -act {5}'. \ format( qradar_policy_name, new_rule_id, 'any', offence_port, rule_proto, 'permit' ) SAVERULE = 'ipfilter --save {0}' . \ format(qradar_policy_name ) ACTIVATE = 'ipfilter --activate {0}' . \ format(qradar_policy_name ) print('{0}\n{1}\n{2}\n{3}\n{4}\n{5}'. format( CLONERULE, DELRULE, ADDRULE_DENY, ADDRULE_PERMIT, SAVERULE, ACTIVATE ) ) print('Blocking further connections from {0} on port {1}'. format( offence_ip, offence_port ) ) # Add ACTIVATE to following tuple SEQ = ( CLONERULE, DELRULE, ADDRULE_DENY, ADDRULE_PERMIT, SAVERULE, ACTIVATE ) switch_cmds = ' ; ' . join( SEQ ) runCli( switch_cmds ) def loadData( switch_cmd ): """ This function is written to get the currently active rules from the switch configuration. Once the active rules are retrieved, they are flattened in a list. Parameters - IN : Switch command to execute - OUT: list in following format for all rules found for all policies default_ipv4,ipv4,active,1,any,tcp,22,permit """ global active_policies active_policies.clear() spaces = re.compile('\s+') commas = re.compile(',') lst_rules = [] # the user has specified a # output file with switch output # load it (simulation mode) #f = open('switch-output', 'r') #data = f.read() #f.close() data = runCli( switch_cmd ) # Process the output received from # (ipfilter --show) switch command # or loaded from output file for line in data.split('\n'): # Skip empty lines & header line if re.search(r'^$|^Rule', line): continue # Extract policy_name, type and state # from line beginning with Name if re.search(r'^Name',line): line2 = commas.sub('',line) p_name = line2.split(':')[1].split(' ')[1].strip() p_type = line2.split(':')[2].split(' ')[1].strip() p_state = line2.split(':')[3].split(' ')[1].strip() continue # Consider only the active policy if p_state == 'active' and \ p_type == ip_type : # creating a set of policies active_policies.add( p_name ) # convert space to commas csv_line = spaces.sub(',',line.strip()) # create a tuple rec = ( p_name, p_type, p_state, csv_line ) # create a comma separated values record csv_rec = ',' . join( rec ) # store the value in a list lst_rules.append(csv_rec) return lst_rules def getRuleCount( lstRules, policy_name ): """ This function return the rule count for a given policy indicated by policy_name Parameters: - IN : 1. List containing all the rules 2. Name of the policy - Out: # of rules in the policy. """ count = 0 for x in lstRules: if x.split(',')[0] == policy_name: count +=1 return count def checkRules( lstRules ): """ IN: list ( default_ipv4,ipv4,active,1,any,tcp,22,permit ) OUT: list( default_ipv4,ipv4,active,1,any,tcp,22,permit,#rules_in_policy) """ global active_policies, offence_port, ip_type, max_rules lstAction = [] ipAlreadyBlocked = False # Iterate through the set with # active policy name for active_policy in active_policies: rule_count = 0 take_action= False # Iterate through all rules (ipv4 & ipv6) # stored in the list for rule in lstRules: p_name = rule.split(',')[0] p_type = rule.split(',')[1] p_state = rule.split(',')[2] rule_id = rule.split(',')[3] rule_ip = rule.split(',')[4] rule_proto = rule.split(',')[5] # account for port range here if rule.split(',')[7] == '-' : p_begin = rule.split(',')[6] p_end = rule.split(',')[8] rule_port = p_begin + '-' + p_end rule_action= rule.split(',')[9] else: rule_port = rule.split(',')[6] rule_action= rule.split(',')[7] # ip_type can be ipv4 or ipv6 # determined globally if p_type == ip_type and \ p_name == active_policy: # get total rules count for this policy if rule_count == 0: rule_count = getRuleCount( lstRules, p_name ) max_rules = rule_count port_matched = getPortMatch( rule_port ) # check if the offensive ip is alrady blocked # abort the further execution if port_matched and \ rule_ip == offence_ip and \ rule_action == 'deny': print('No policy change required') print('IP {0} is alredy blocked for port {1}'.\ format(offence_ip,rule_port) ) break if port_matched and \ rule_action == 'permit' and \ rule_ip == 'any': tup = ( rule, str(rule_count) ) action_rec = ',' . join( tup ) lstAction = list(action_rec.split(',')) # abort iner loop take_action = True break # abort outer loop if take_action: break return lstAction def getPortMatch( rule_port ): global offence_port if re.search(r'-',rule_port): port_begin = rule_port.split('-')[0] port_end = rule_port.split('-')[1] if offence_port >= int(port_begin) and \ offence_port <= int(port_end): return True else: if int(rule_port) == offence_port: return True return False def runCli( cli_cmd ): """ Purpose: Run any external command and return the output Parameters: - IN 1. cli command to run - OUT 1. output of cli command """ global system, remote_user lst_cmd = [] lst_cmd.append( 'ssh' ) lst_cmd.append( '-o StrictHostKeyChecking=no' ) lst_cmd.append( remote_user + "@" + system ) lst_cmd.append( cli_cmd ) print(cli_cmd) try: # Command execution using subprocess stdout = subprocess.check_output( \ lst_cmd, universal_newlines = True, shell = False ) if stdout != None: return stdout except KeyboardInterrupt: print( "User abort ..\n" ) sys.exit( 1 ) except subprocess.CalledProcessError: print( "Error connecting to remote host !! aborting !!! \n") sys.exit( 1 ) def Usage(): global system,ip_address,command msg = """ Usage: {0} <system_ip|FQDN> <offense ip> <cmd(ssh|telnet|http|https)> """ print( msg . format(system,ip_address,command)) sys.exit() if __name__ == '__main__': argc = len(sys.argv) - 1 if argc == 0 or argc > 3 : Usage() system,ip_address,command = sys.argv[1:] remote_user = 'qradaradmin' active_policies = {'index_ignore'} cmd_to_port_dict = { 'ssh' : 22, 'https' : 443, 'telnet' : 23, 'http' : 80 } max_rules = 0 offence_port = cmd_to_port_dict[command] offence_ip = ip_address dry_run = False if offence_ip.find('.') > 0: ip_type = 'ipv4' else: ip_type = 'ipv6' if dry_run: print('*** Simulation only ***' ) print('Switch IP: {0}\nOffence IP:{1}\nOffence Port:{2}\n'. format(system,ip_address,offence_port) ) MSG = """ NOTE: The current syslog configuration does not write protocol in the event. The switch on the other hand has rules for both tcp and udp protocols. As no protocol information is sent along with the event 1st rule matching the ip / port will be chosen irrespective of the protocol. This may lead to unintended blocking rule. Needs to be fixed in rsyslog event. """ print(MSG) main()
631
0
72
5319787bd12168d5330cd1dd212c418d6ab71ea1
10,023
py
Python
contextily/plotting.py
martinfleis/contextily
fc00861c9756821f62de27bb06ef6771abc517d6
[ "BSD-3-Clause" ]
182
2020-04-08T15:56:50.000Z
2022-03-24T15:02:19.000Z
contextily/plotting.py
martinfleis/contextily
fc00861c9756821f62de27bb06ef6771abc517d6
[ "BSD-3-Clause" ]
66
2020-04-09T06:23:50.000Z
2022-02-20T19:04:38.000Z
contextily/plotting.py
martinfleis/contextily
fc00861c9756821f62de27bb06ef6771abc517d6
[ "BSD-3-Clause" ]
28
2020-04-08T12:53:44.000Z
2021-12-18T01:05:58.000Z
"""Tools to plot basemaps""" import warnings import numpy as np from . import providers from xyzservices import TileProvider from .tile import bounds2img, _sm2ll, warp_tiles, _warper from rasterio.enums import Resampling from rasterio.warp import transform_bounds from matplotlib import patheffects from matplotlib.pyplot import draw INTERPOLATION = "bilinear" ZOOM = "auto" ATTRIBUTION_SIZE = 8 def add_basemap( ax, zoom=ZOOM, source=None, interpolation=INTERPOLATION, attribution=None, attribution_size=ATTRIBUTION_SIZE, reset_extent=True, crs=None, resampling=Resampling.bilinear, **extra_imshow_args ): """ Add a (web/local) basemap to `ax`. Parameters ---------- ax : AxesSubplot Matplotlib axes object on which to add the basemap. The extent of the axes is assumed to be in Spherical Mercator (EPSG:3857), unless the `crs` keyword is specified. zoom : int or 'auto' [Optional. Default='auto'] Level of detail for the basemap. If 'auto', it is calculated automatically. Ignored if `source` is a local file. source : xyzservices.TileProvider object or str [Optional. Default: Stamen Terrain web tiles] The tile source: web tile provider or path to local file. The web tile provider can be in the form of a :class:`xyzservices.TileProvider` object or a URL. The placeholders for the XYZ in the URL need to be `{x}`, `{y}`, `{z}`, respectively. For local file paths, the file is read with `rasterio` and all bands are loaded into the basemap. IMPORTANT: tiles are assumed to be in the Spherical Mercator projection (EPSG:3857), unless the `crs` keyword is specified. interpolation : str [Optional. Default='bilinear'] Interpolation algorithm to be passed to `imshow`. See `matplotlib.pyplot.imshow` for further details. attribution : str [Optional. Defaults to attribution specified by the source] Text to be added at the bottom of the axis. This defaults to the attribution of the provider specified in `source` if available. Specify False to not automatically add an attribution, or a string to pass a custom attribution. attribution_size : int [Optional. Defaults to `ATTRIBUTION_SIZE`]. Font size to render attribution text with. reset_extent : bool [Optional. Default=True] If True, the extent of the basemap added is reset to the original extent (xlim, ylim) of `ax` crs : None or str or CRS [Optional. Default=None] coordinate reference system (CRS), expressed in any format permitted by rasterio, to use for the resulting basemap. If None (default), no warping is performed and the original Spherical Mercator (EPSG:3857) is used. resampling : <enum 'Resampling'> [Optional. Default=Resampling.bilinear] Resampling method for executing warping, expressed as a `rasterio.enums.Resampling` method **extra_imshow_args : Other parameters to be passed to `imshow`. Examples -------- >>> import geopandas >>> import contextily as ctx >>> db = geopandas.read_file(ps.examples.get_path('virginia.shp')) Ensure the data is in Spherical Mercator: >>> db = db.to_crs(epsg=3857) Add a web basemap: >>> ax = db.plot(alpha=0.5, color='k', figsize=(6, 6)) >>> ctx.add_basemap(ax, source=url) >>> plt.show() Or download a basemap to a local file and then plot it: >>> source = 'virginia.tiff' >>> _ = ctx.bounds2raster(*db.total_bounds, zoom=6, source=source) >>> ax = db.plot(alpha=0.5, color='k', figsize=(6, 6)) >>> ctx.add_basemap(ax, source=source) >>> plt.show() """ xmin, xmax, ymin, ymax = ax.axis() # If web source if ( source is None or isinstance(source, (dict, TileProvider)) or (isinstance(source, str) and source[:4] == "http") ): # Extent left, right, bottom, top = xmin, xmax, ymin, ymax # Convert extent from `crs` into WM for tile query if crs is not None: left, right, bottom, top = _reproj_bb( left, right, bottom, top, crs, {"init": "epsg:3857"} ) # Download image image, extent = bounds2img( left, bottom, right, top, zoom=zoom, source=source, ll=False ) # Warping if crs is not None: image, extent = warp_tiles(image, extent, t_crs=crs, resampling=resampling) # Check if overlay if _is_overlay(source) and 'zorder' not in extra_imshow_args: # If zorder was not set then make it 9 otherwise leave it extra_imshow_args['zorder'] = 9 # If local source else: import rasterio as rio # Read file with rio.open(source) as raster: if reset_extent: from rasterio.mask import mask as riomask # Read window if crs: left, bottom, right, top = rio.warp.transform_bounds( crs, raster.crs, xmin, ymin, xmax, ymax ) else: left, bottom, right, top = xmin, ymin, xmax, ymax window = [ { "type": "Polygon", "coordinates": ( ( (left, bottom), (right, bottom), (right, top), (left, top), (left, bottom), ), ), } ] image, img_transform = riomask(raster, window, crop=True) extent = left, right, bottom, top else: # Read full image = np.array([band for band in raster.read()]) img_transform = raster.transform bb = raster.bounds extent = bb.left, bb.right, bb.bottom, bb.top # Warp if (crs is not None) and (raster.crs != crs): image, bounds, _ = _warper( image, img_transform, raster.crs, crs, resampling ) extent = bounds.left, bounds.right, bounds.bottom, bounds.top image = image.transpose(1, 2, 0) # Plotting if image.shape[2] == 1: image = image[:, :, 0] img = ax.imshow( image, extent=extent, interpolation=interpolation, **extra_imshow_args ) if reset_extent: ax.axis((xmin, xmax, ymin, ymax)) else: max_bounds = ( min(xmin, extent[0]), max(xmax, extent[1]), min(ymin, extent[2]), max(ymax, extent[3]), ) ax.axis(max_bounds) # Add attribution text if source is None: source = providers.Stamen.Terrain if isinstance(source, (dict, TileProvider)) and attribution is None: attribution = source.get("attribution") if attribution: add_attribution(ax, attribution, font_size=attribution_size) return def _is_overlay(source): """ Check if the identified source is an overlay (partially transparent) layer. Parameters ---------- source : dict The tile source: web tile provider. Must be preprocessed as into a dictionary, not just a string. Returns ------- bool Notes ----- This function is based on a very similar javascript version found in leaflet: https://github.com/leaflet-extras/leaflet-providers/blob/9eb968f8442ea492626c9c8f0dac8ede484e6905/preview/preview.js#L56-L70 """ if not isinstance(source, dict): return False if source.get('opacity', 1.0) < 1.0: return True overlayPatterns = [ '^(OpenWeatherMap|OpenSeaMap)', 'OpenMapSurfer.(Hybrid|AdminBounds|ContourLines|Hillshade|ElementsAtRisk)', 'Stamen.Toner(Hybrid|Lines|Labels)', 'CartoDB.(Positron|DarkMatter|Voyager)OnlyLabels', 'Hydda.RoadsAndLabels', '^JusticeMap', 'OpenPtMap', 'OpenRailwayMap', 'OpenFireMap', 'SafeCast' ] import re return bool(re.match('(' + '|'.join(overlayPatterns) + ')', source.get('name', ''))) def add_attribution(ax, text, font_size=ATTRIBUTION_SIZE, **kwargs): """ Utility to add attribution text. Parameters ---------- ax : AxesSubplot Matplotlib axes object on which to add the attribution text. text : str Text to be added at the bottom of the axis. font_size : int [Optional. Defaults to 8] Font size in which to render the attribution text. **kwargs : Additional keywords to pass to the matplotlib `text` method. Returns ------- matplotlib.text.Text Matplotlib Text object added to the plot. """ # Add draw() as it resizes the axis and allows the wrapping to work as # expected. See https://github.com/darribas/contextily/issues/95 for some # details on the issue draw() text_artist = ax.text( 0.005, 0.005, text, transform=ax.transAxes, size=font_size, path_effects=[patheffects.withStroke(linewidth=2, foreground="w")], wrap=True, **kwargs, ) # hack to have the text wrapped in the ax extent, for some explanation see # https://stackoverflow.com/questions/48079364/wrapping-text-not-working-in-matplotlib wrap_width = ax.get_window_extent().width * 0.99 text_artist._get_wrap_line_width = lambda: wrap_width return text_artist
34.681661
128
0.594632
"""Tools to plot basemaps""" import warnings import numpy as np from . import providers from xyzservices import TileProvider from .tile import bounds2img, _sm2ll, warp_tiles, _warper from rasterio.enums import Resampling from rasterio.warp import transform_bounds from matplotlib import patheffects from matplotlib.pyplot import draw INTERPOLATION = "bilinear" ZOOM = "auto" ATTRIBUTION_SIZE = 8 def add_basemap( ax, zoom=ZOOM, source=None, interpolation=INTERPOLATION, attribution=None, attribution_size=ATTRIBUTION_SIZE, reset_extent=True, crs=None, resampling=Resampling.bilinear, **extra_imshow_args ): """ Add a (web/local) basemap to `ax`. Parameters ---------- ax : AxesSubplot Matplotlib axes object on which to add the basemap. The extent of the axes is assumed to be in Spherical Mercator (EPSG:3857), unless the `crs` keyword is specified. zoom : int or 'auto' [Optional. Default='auto'] Level of detail for the basemap. If 'auto', it is calculated automatically. Ignored if `source` is a local file. source : xyzservices.TileProvider object or str [Optional. Default: Stamen Terrain web tiles] The tile source: web tile provider or path to local file. The web tile provider can be in the form of a :class:`xyzservices.TileProvider` object or a URL. The placeholders for the XYZ in the URL need to be `{x}`, `{y}`, `{z}`, respectively. For local file paths, the file is read with `rasterio` and all bands are loaded into the basemap. IMPORTANT: tiles are assumed to be in the Spherical Mercator projection (EPSG:3857), unless the `crs` keyword is specified. interpolation : str [Optional. Default='bilinear'] Interpolation algorithm to be passed to `imshow`. See `matplotlib.pyplot.imshow` for further details. attribution : str [Optional. Defaults to attribution specified by the source] Text to be added at the bottom of the axis. This defaults to the attribution of the provider specified in `source` if available. Specify False to not automatically add an attribution, or a string to pass a custom attribution. attribution_size : int [Optional. Defaults to `ATTRIBUTION_SIZE`]. Font size to render attribution text with. reset_extent : bool [Optional. Default=True] If True, the extent of the basemap added is reset to the original extent (xlim, ylim) of `ax` crs : None or str or CRS [Optional. Default=None] coordinate reference system (CRS), expressed in any format permitted by rasterio, to use for the resulting basemap. If None (default), no warping is performed and the original Spherical Mercator (EPSG:3857) is used. resampling : <enum 'Resampling'> [Optional. Default=Resampling.bilinear] Resampling method for executing warping, expressed as a `rasterio.enums.Resampling` method **extra_imshow_args : Other parameters to be passed to `imshow`. Examples -------- >>> import geopandas >>> import contextily as ctx >>> db = geopandas.read_file(ps.examples.get_path('virginia.shp')) Ensure the data is in Spherical Mercator: >>> db = db.to_crs(epsg=3857) Add a web basemap: >>> ax = db.plot(alpha=0.5, color='k', figsize=(6, 6)) >>> ctx.add_basemap(ax, source=url) >>> plt.show() Or download a basemap to a local file and then plot it: >>> source = 'virginia.tiff' >>> _ = ctx.bounds2raster(*db.total_bounds, zoom=6, source=source) >>> ax = db.plot(alpha=0.5, color='k', figsize=(6, 6)) >>> ctx.add_basemap(ax, source=source) >>> plt.show() """ xmin, xmax, ymin, ymax = ax.axis() # If web source if ( source is None or isinstance(source, (dict, TileProvider)) or (isinstance(source, str) and source[:4] == "http") ): # Extent left, right, bottom, top = xmin, xmax, ymin, ymax # Convert extent from `crs` into WM for tile query if crs is not None: left, right, bottom, top = _reproj_bb( left, right, bottom, top, crs, {"init": "epsg:3857"} ) # Download image image, extent = bounds2img( left, bottom, right, top, zoom=zoom, source=source, ll=False ) # Warping if crs is not None: image, extent = warp_tiles(image, extent, t_crs=crs, resampling=resampling) # Check if overlay if _is_overlay(source) and 'zorder' not in extra_imshow_args: # If zorder was not set then make it 9 otherwise leave it extra_imshow_args['zorder'] = 9 # If local source else: import rasterio as rio # Read file with rio.open(source) as raster: if reset_extent: from rasterio.mask import mask as riomask # Read window if crs: left, bottom, right, top = rio.warp.transform_bounds( crs, raster.crs, xmin, ymin, xmax, ymax ) else: left, bottom, right, top = xmin, ymin, xmax, ymax window = [ { "type": "Polygon", "coordinates": ( ( (left, bottom), (right, bottom), (right, top), (left, top), (left, bottom), ), ), } ] image, img_transform = riomask(raster, window, crop=True) extent = left, right, bottom, top else: # Read full image = np.array([band for band in raster.read()]) img_transform = raster.transform bb = raster.bounds extent = bb.left, bb.right, bb.bottom, bb.top # Warp if (crs is not None) and (raster.crs != crs): image, bounds, _ = _warper( image, img_transform, raster.crs, crs, resampling ) extent = bounds.left, bounds.right, bounds.bottom, bounds.top image = image.transpose(1, 2, 0) # Plotting if image.shape[2] == 1: image = image[:, :, 0] img = ax.imshow( image, extent=extent, interpolation=interpolation, **extra_imshow_args ) if reset_extent: ax.axis((xmin, xmax, ymin, ymax)) else: max_bounds = ( min(xmin, extent[0]), max(xmax, extent[1]), min(ymin, extent[2]), max(ymax, extent[3]), ) ax.axis(max_bounds) # Add attribution text if source is None: source = providers.Stamen.Terrain if isinstance(source, (dict, TileProvider)) and attribution is None: attribution = source.get("attribution") if attribution: add_attribution(ax, attribution, font_size=attribution_size) return def _reproj_bb(left, right, bottom, top, s_crs, t_crs): n_l, n_b, n_r, n_t = transform_bounds(s_crs, t_crs, left, bottom, right, top) return n_l, n_r, n_b, n_t def _is_overlay(source): """ Check if the identified source is an overlay (partially transparent) layer. Parameters ---------- source : dict The tile source: web tile provider. Must be preprocessed as into a dictionary, not just a string. Returns ------- bool Notes ----- This function is based on a very similar javascript version found in leaflet: https://github.com/leaflet-extras/leaflet-providers/blob/9eb968f8442ea492626c9c8f0dac8ede484e6905/preview/preview.js#L56-L70 """ if not isinstance(source, dict): return False if source.get('opacity', 1.0) < 1.0: return True overlayPatterns = [ '^(OpenWeatherMap|OpenSeaMap)', 'OpenMapSurfer.(Hybrid|AdminBounds|ContourLines|Hillshade|ElementsAtRisk)', 'Stamen.Toner(Hybrid|Lines|Labels)', 'CartoDB.(Positron|DarkMatter|Voyager)OnlyLabels', 'Hydda.RoadsAndLabels', '^JusticeMap', 'OpenPtMap', 'OpenRailwayMap', 'OpenFireMap', 'SafeCast' ] import re return bool(re.match('(' + '|'.join(overlayPatterns) + ')', source.get('name', ''))) def add_attribution(ax, text, font_size=ATTRIBUTION_SIZE, **kwargs): """ Utility to add attribution text. Parameters ---------- ax : AxesSubplot Matplotlib axes object on which to add the attribution text. text : str Text to be added at the bottom of the axis. font_size : int [Optional. Defaults to 8] Font size in which to render the attribution text. **kwargs : Additional keywords to pass to the matplotlib `text` method. Returns ------- matplotlib.text.Text Matplotlib Text object added to the plot. """ # Add draw() as it resizes the axis and allows the wrapping to work as # expected. See https://github.com/darribas/contextily/issues/95 for some # details on the issue draw() text_artist = ax.text( 0.005, 0.005, text, transform=ax.transAxes, size=font_size, path_effects=[patheffects.withStroke(linewidth=2, foreground="w")], wrap=True, **kwargs, ) # hack to have the text wrapped in the ax extent, for some explanation see # https://stackoverflow.com/questions/48079364/wrapping-text-not-working-in-matplotlib wrap_width = ax.get_window_extent().width * 0.99 text_artist._get_wrap_line_width = lambda: wrap_width return text_artist
146
0
23
55f1899331577c5bb1215c2698fc08ac300abe62
6,356
py
Python
code/utils/math_graph.py
ytin16/STGCN-keras
055f33023bd0dda3090ad3dd61d8e967a3692907
[ "MIT" ]
32
2020-02-26T12:56:32.000Z
2022-03-21T13:04:55.000Z
code/utils/math_graph.py
ytin16/STGCN-keras
055f33023bd0dda3090ad3dd61d8e967a3692907
[ "MIT" ]
1
2020-07-14T04:32:26.000Z
2020-08-02T07:05:17.000Z
code/utils/math_graph.py
ytin16/STGCN-keras
055f33023bd0dda3090ad3dd61d8e967a3692907
[ "MIT" ]
13
2020-08-04T08:44:02.000Z
2022-02-21T08:36:31.000Z
# -*- coding: utf-8 -*-# ''' # Name: math_graph # Description: # Author: neu # Date: 2020/7/28 ''' import numpy as np import pandas as pd from scipy.sparse.linalg import eigs import scipy.sparse as sp from scipy.sparse.linalg.eigen.arpack import eigsh, ArpackNoConvergence def weight_matrix(file_path, sigma2=0.1, epsilon=0.5, scaling=True): ''' Load weight matrix function. 加载权重矩阵 :param file_path: str, the path of saved weight matrix file. :param sigma2: float, scalar of matrix W. :param epsilon: float, thresholds to control the sparsity of matrix W. :param scaling: bool, whether applies numerical scaling on W. :return: np.ndarray, [n_route, n_route]. ''' try: W = pd.read_csv(file_path, header=None).values except FileNotFoundError: print(f'ERROR: input file was not found in {file_path}.') # check whether W is a 0/1 matrix. if set(np.unique(W)) == {0, 1}: print('The input graph is a 0/1 matrix; set "scaling" to False.') scaling = False if scaling: # 根据真实距离计算邻接矩阵 n = W.shape[0] W = W / 10000. W2, W_mask = W * W, np.ones([n, n]) - np.identity(n) # refer to Eq.10 return np.exp(-W2 / sigma2) * (np.exp(-W2 / sigma2) >= epsilon) * W_mask else: return W # 对邻接矩阵进行归一化处理 # 在邻接矩阵中加入自连接 # 对拉普拉斯矩阵进行归一化处理 def scaled_laplacian(W): ''' Normalized graph Laplacian function. 归一化图拉普拉斯矩阵 :param W: np.ndarray, [n_route, n_route], weighted adjacency matrix of G. :return: np.matrix, [n_route, n_route]. ''' # d -> diagonal degree matrix n, d = np.shape(W)[0], np.sum(W, axis=1) # L -> graph Laplacian L = -W L[np.diag_indices_from(L)] = d for i in range(n): for j in range(n): if (d[i] > 0) and (d[j] > 0): L[i, j] = L[i, j] / np.sqrt(d[i] * d[j]) # lambda_max \approx 2.0, the largest eigenvalues of L. lambda_max = eigs(L, k=1, which='LR')[0][0].real return np.mat(2 * L / lambda_max - np.identity(n)) # 重新调整对称归一化的图拉普拉斯矩阵,得到其简化版本 def first_approx(W, n): ''' 1st-order approximation function. 1阶近似函数 :param W: np.ndarray, [n_route, n_route], weighted adjacency matrix of G. :param n: int, number of routes / size of graph. :return: np.ndarray, [n_route, n_route]. ''' A = W + np.identity(n) d = np.sum(A, axis=1) sinvD = np.sqrt(np.mat(np.diag(d)).I) # refer to Eq.5 return np.mat(np.identity(n) + sinvD * A * sinvD) # 计算直到k阶的切比雪夫多项式 def chebyshev_polynomial(X, k): # 返回一个稀疏矩阵列表 """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices.""" print("Calculating Chebyshev polynomials up to order {}...".format(k)) T_k = list() T_k.append(sp.eye(X.shape[0]).tocsr()) # T0(X) = I T_k.append(X) # T1(X) = L~ # 定义切比雪夫递归公式 def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X): """ :param T_k_minus_one: T(k-1)(L~) :param T_k_minus_two: T(k-2)(L~) :param X: L~ :return: Tk(L~) """ # 将输入转化为csr矩阵(压缩稀疏行矩阵) X_ = sp.csr_matrix(X, copy=True) # 递归公式:Tk(L~) = 2L~ * T(k-1)(L~) - T(k-2)(L~) return 2 * X_.dot(T_k_minus_one) - T_k_minus_two for i in range(2, k + 1): T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X)) # 返回切比雪夫多项式列表 return T_k def cheb_poly_approx(L, Ks, n): ''' Chebyshev polynomials approximation function. 切比雪夫多项式近似 :param L: np.matrix, [n_route, n_route], graph Laplacian. :param Ks: int, kernel size of spatial convolution. :param n: int, number of routes / size of graph. :return: np.ndarray, [n_route, Ks*n_route]. ''' L0, L1 = np.mat(np.identity(n)), np.mat(np.copy(L)) if Ks > 1: L_list = [np.copy(L0), np.copy(L1)] for i in range(Ks - 2): Ln = np.mat(2 * L * L1 - L0) L_list.append(np.copy(Ln)) L0, L1 = np.matrix(np.copy(L1)), np.matrix(np.copy(Ln)) # L_lsit [Ks, n*n], Lk [n, Ks*n] return np.concatenate(L_list, axis=-1) elif Ks == 1: return np.asarray(L0) else: raise ValueError(f'ERROR: the size of spatial kernel must be greater than 1, but received "{Ks}".') # 将稀疏矩阵转化为元组表示
31.156863
107
0.609346
# -*- coding: utf-8 -*-# ''' # Name: math_graph # Description: # Author: neu # Date: 2020/7/28 ''' import numpy as np import pandas as pd from scipy.sparse.linalg import eigs import scipy.sparse as sp from scipy.sparse.linalg.eigen.arpack import eigsh, ArpackNoConvergence def weight_matrix(file_path, sigma2=0.1, epsilon=0.5, scaling=True): ''' Load weight matrix function. 加载权重矩阵 :param file_path: str, the path of saved weight matrix file. :param sigma2: float, scalar of matrix W. :param epsilon: float, thresholds to control the sparsity of matrix W. :param scaling: bool, whether applies numerical scaling on W. :return: np.ndarray, [n_route, n_route]. ''' try: W = pd.read_csv(file_path, header=None).values except FileNotFoundError: print(f'ERROR: input file was not found in {file_path}.') # check whether W is a 0/1 matrix. if set(np.unique(W)) == {0, 1}: print('The input graph is a 0/1 matrix; set "scaling" to False.') scaling = False if scaling: # 根据真实距离计算邻接矩阵 n = W.shape[0] W = W / 10000. W2, W_mask = W * W, np.ones([n, n]) - np.identity(n) # refer to Eq.10 return np.exp(-W2 / sigma2) * (np.exp(-W2 / sigma2) >= epsilon) * W_mask else: return W # 对邻接矩阵进行归一化处理 def normalize_adj(adj, symmetric=True): # 如果邻接矩阵为对称矩阵,得到对称归一化邻接矩阵 # D^(-1/2) * A * D^(-1/2) if symmetric: # A.sum(axis=1):计算矩阵的每一行元素之和,得到节点的度矩阵D # np.power(x, n):数组元素求n次方,得到D^(-1/2) # sp.diags()函数根据给定的对象创建对角矩阵,对角线上的元素为给定对象中的元素 d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0) # tocsr()函数将矩阵转化为压缩稀疏行矩阵 a_norm = adj.dot(d).transpose().dot(d).tocsr() # 如果邻接矩阵不是对称矩阵,得到随机游走正则化拉普拉斯算子 # D^(-1) * A else: d = sp.diags(np.power(np.array(adj.sum(1)), -1).flatten(), 0) a_norm = d.dot(adj).tocsr() return a_norm # 在邻接矩阵中加入自连接 def preprocess_adj(adj, symmetric=True): adj = adj + sp.eye(adj.shape[0]) # 对加入自连接的邻接矩阵进行对称归一化处理 adj = normalize_adj(adj, symmetric) return adj # 对拉普拉斯矩阵进行归一化处理 def normalized_laplacian(adj, symmetric=True): # 对称归一化的邻接矩阵,D ^ (-1/2) * A * D ^ (-1/2) adj_normalized = normalize_adj(adj, symmetric) # 得到对称规范化的图拉普拉斯矩阵,L = I - D ^ (-1/2) * A * D ^ (-1/2) laplacian = sp.eye(adj.shape[0]) - adj_normalized return laplacian def scaled_laplacian(W): ''' Normalized graph Laplacian function. 归一化图拉普拉斯矩阵 :param W: np.ndarray, [n_route, n_route], weighted adjacency matrix of G. :return: np.matrix, [n_route, n_route]. ''' # d -> diagonal degree matrix n, d = np.shape(W)[0], np.sum(W, axis=1) # L -> graph Laplacian L = -W L[np.diag_indices_from(L)] = d for i in range(n): for j in range(n): if (d[i] > 0) and (d[j] > 0): L[i, j] = L[i, j] / np.sqrt(d[i] * d[j]) # lambda_max \approx 2.0, the largest eigenvalues of L. lambda_max = eigs(L, k=1, which='LR')[0][0].real return np.mat(2 * L / lambda_max - np.identity(n)) # 重新调整对称归一化的图拉普拉斯矩阵,得到其简化版本 def rescale_laplacian(laplacian): try: print('Calculating largest eigenvalue of normalized graph Laplacian...') # 计算对称归一化图拉普拉斯矩阵的最大特征值 largest_eigval = eigsh(laplacian, 1, which='LM', return_eigenvectors=False)[0] # 如果计算过程不收敛 except ArpackNoConvergence: print('Eigenvalue calculation did not converge! Using largest_eigval=2 instead.') largest_eigval = 2 # 调整后的对称归一化图拉普拉斯矩阵,L~ = 2 / Lambda * L - I scaled_laplacian = (2. / largest_eigval) * laplacian - sp.eye(laplacian.shape[0]) return scaled_laplacian def first_approx(W, n): ''' 1st-order approximation function. 1阶近似函数 :param W: np.ndarray, [n_route, n_route], weighted adjacency matrix of G. :param n: int, number of routes / size of graph. :return: np.ndarray, [n_route, n_route]. ''' A = W + np.identity(n) d = np.sum(A, axis=1) sinvD = np.sqrt(np.mat(np.diag(d)).I) # refer to Eq.5 return np.mat(np.identity(n) + sinvD * A * sinvD) # 计算直到k阶的切比雪夫多项式 def chebyshev_polynomial(X, k): # 返回一个稀疏矩阵列表 """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices.""" print("Calculating Chebyshev polynomials up to order {}...".format(k)) T_k = list() T_k.append(sp.eye(X.shape[0]).tocsr()) # T0(X) = I T_k.append(X) # T1(X) = L~ # 定义切比雪夫递归公式 def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X): """ :param T_k_minus_one: T(k-1)(L~) :param T_k_minus_two: T(k-2)(L~) :param X: L~ :return: Tk(L~) """ # 将输入转化为csr矩阵(压缩稀疏行矩阵) X_ = sp.csr_matrix(X, copy=True) # 递归公式:Tk(L~) = 2L~ * T(k-1)(L~) - T(k-2)(L~) return 2 * X_.dot(T_k_minus_one) - T_k_minus_two for i in range(2, k + 1): T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X)) # 返回切比雪夫多项式列表 return T_k def cheb_poly_approx(L, Ks, n): ''' Chebyshev polynomials approximation function. 切比雪夫多项式近似 :param L: np.matrix, [n_route, n_route], graph Laplacian. :param Ks: int, kernel size of spatial convolution. :param n: int, number of routes / size of graph. :return: np.ndarray, [n_route, Ks*n_route]. ''' L0, L1 = np.mat(np.identity(n)), np.mat(np.copy(L)) if Ks > 1: L_list = [np.copy(L0), np.copy(L1)] for i in range(Ks - 2): Ln = np.mat(2 * L * L1 - L0) L_list.append(np.copy(Ln)) L0, L1 = np.matrix(np.copy(L1)), np.matrix(np.copy(Ln)) # L_lsit [Ks, n*n], Lk [n, Ks*n] return np.concatenate(L_list, axis=-1) elif Ks == 1: return np.asarray(L0) else: raise ValueError(f'ERROR: the size of spatial kernel must be greater than 1, but received "{Ks}".') # 将稀疏矩阵转化为元组表示 def sparse_to_tuple(sparse_mx): if not sp.isspmatrix_coo(sparse_mx): # 将稀疏矩阵转化为coo矩阵形式 # coo矩阵采用三个数组分别存储行、列和非零元素值的信息 sparse_mx = sparse_mx.tocoo() # np.vstack()函数沿着数组的某条轴堆叠数组 # 获取非零元素的位置索引 coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose() # 获取矩阵的非零元素 values = sparse_mx.data # 获取矩阵的形状 shape = sparse_mx.shape return coords, values, shape
2,517
0
110
b202aa1277601365717ffb75ffe7653c293e80ad
4,454
py
Python
demos/emotion_transfer/missing_data.py
mathurinm/torch_itl
e3d92d753bd51ccf585029129110c93bbf9b5fd0
[ "MIT" ]
null
null
null
demos/emotion_transfer/missing_data.py
mathurinm/torch_itl
e3d92d753bd51ccf585029129110c93bbf9b5fd0
[ "MIT" ]
null
null
null
demos/emotion_transfer/missing_data.py
mathurinm/torch_itl
e3d92d753bd51ccf585029129110c93bbf9b5fd0
[ "MIT" ]
null
null
null
# This script illustrates the stability of learning vITL # for emotion transfer in the presence of missing data # Runtime ~1h on laptop # ---------------------------------- # Imports # ---------------------------------- import os import torch import matplotlib.pyplot as plt import sys import importlib if importlib.util.find_spec('torch_itl') is None: path_to_lib = os.getcwd()[:-23] sys.path.append(path_to_lib) from torch_itl.sampler import CircularEmoSampler from torch_itl.model import DecomposableIdentity from torch_itl.kernel import Gaussian from torch_itl.estimator import EmoTransfer from torch_itl.datasets import get_data_landmarks # %% # ---------------------------------- # Reading input/output data # ---------------------------------- # Please replace those values with the right path to # the extracted landmarks on your computer. # See utils/README.md path_to_rafd = '../../torch_itl/datasets/Rafd_Aligned/Rafd_LANDMARKS' path_to_kdef = '../../torch_itl/datasets/KDEF_Aligned/KDEF_LANDMARKS' # test of import data_train, data_test = get_data_landmarks('KDEF', path_to_kdef) n, m, nf = data_train.shape print('Testing import, data dimensions:', n, m, nf) # %% # ---------------------------------- # Defining our model # ---------------------------------- print('Defining the model') # define Landmarks kernel gamma_inp = 0.07 kernel_input = Gaussian(gamma_inp) # define emotion kernel gamma_out = 0.4 kernel_output = Gaussian(gamma_out) # define functional model model = DecomposableIdentity(kernel_input, kernel_output, nf) # define emotion sampler sampler = CircularEmoSampler() # define regularization lbda = 2e-5 # define the emotion transfer estimator est = EmoTransfer(model, lbda, sampler, inp_emotion='joint') #%% # ---------------------------------- # Learning in the presence of missing data -KDEF # ---------------------------------- print('Learning with missing data KDEF') # number of random masks of each size n_loops = 4 # results tensor test_losses_kdef = torch.zeros(10, n_loops, n) for kfold in range(10): get_data_landmarks('KDEF', path_to_kdef, kfold=kfold) mask_list = [torch.randperm(n * m).reshape(n, m) for j in range(n_loops)] for j in range(n_loops): mask_level = mask_list[j] for i in torch.arange(n * m)[::7]: mask = (mask_level >= i) est.fit_partial(data_train, mask) test_losses_kdef[kfold, j, i // 7] = est.risk(data_test) print('done with kfold ', kfold) # %% #torch.save(test_losses_kdef, 'kdef_partial.pt') # %% # ---------------------------------- # Learning in the presence of missing data -Rafd # ---------------------------------- print('Learning with missing data RaFD') # number of random masks of each size n_loops = 4 # results tensor n = 61 test_losses_rafd = torch.zeros(10, n_loops, n) for kfold in range(1, 11): get_data_landmarks('RaFD', path_to_rafd, kfold=kfold) n, m, _ = data_train.shape mask_list = [torch.randperm(n * m).reshape(n, m) for j in range(n_loops)] for j in range(n_loops): mask_level = mask_list[j] for i in torch.arange(n * m)[::7]: mask = (mask_level >= i) est.fit_partial(data_train, mask) test_losses_rafd[kfold - 1, j, i // 7] = est.risk(data_test) #%% #torch.save(test_losses_rafd, 'rafd_partial.pt') #%% idx_kdef = torch.arange(test_losses_kdef.shape[2]*m)[::7].float() / test_losses_kdef.shape[2] / m idx_rafd = torch.arange(test_losses_rafd.shape[2]*m)[::7].float() / n/m #%% mean_kdef = test_losses_kdef.mean(1).mean(0) max_kdef , _ = test_losses_kdef.mean(1).max(axis=0) min_kdef , _ = test_losses_kdef.mean(1).min(axis=0) mean_rafd = test_losses_rafd.mean(1).mean(0) max_rafd , _ = test_losses_rafd.mean(1).max(axis=0) min_rafd , _ = test_losses_rafd.mean(1).min(axis=0) #%% plt.figure() plt.xlabel("% of missing data") plt.ylabel("$\log_{10}$ Test MSE") plt.plot(idx_kdef, torch.log(mean_kdef), c='black', label='KDEF mean', marker=',') plt.plot(idx_kdef, torch.log(min_kdef), c='black', label='KDEF min-max', linestyle='--') plt.plot(idx_kdef, torch.log(max_kdef), c='black', linestyle='--') plt.plot(idx_rafd, torch.log(mean_rafd), c='grey', label='RaFD mean', marker=',') plt.plot(idx_rafd, torch.log(min_rafd), c='grey', label='RaFD min-max', linestyle='--') plt.plot(idx_rafd, torch.log(max_rafd), c='grey', linestyle='--') plt.legend(loc='upper left') plt.savefig('partial_observation.pdf') plt.show()
36.211382
97
0.656713
# This script illustrates the stability of learning vITL # for emotion transfer in the presence of missing data # Runtime ~1h on laptop # ---------------------------------- # Imports # ---------------------------------- import os import torch import matplotlib.pyplot as plt import sys import importlib if importlib.util.find_spec('torch_itl') is None: path_to_lib = os.getcwd()[:-23] sys.path.append(path_to_lib) from torch_itl.sampler import CircularEmoSampler from torch_itl.model import DecomposableIdentity from torch_itl.kernel import Gaussian from torch_itl.estimator import EmoTransfer from torch_itl.datasets import get_data_landmarks # %% # ---------------------------------- # Reading input/output data # ---------------------------------- # Please replace those values with the right path to # the extracted landmarks on your computer. # See utils/README.md path_to_rafd = '../../torch_itl/datasets/Rafd_Aligned/Rafd_LANDMARKS' path_to_kdef = '../../torch_itl/datasets/KDEF_Aligned/KDEF_LANDMARKS' # test of import data_train, data_test = get_data_landmarks('KDEF', path_to_kdef) n, m, nf = data_train.shape print('Testing import, data dimensions:', n, m, nf) # %% # ---------------------------------- # Defining our model # ---------------------------------- print('Defining the model') # define Landmarks kernel gamma_inp = 0.07 kernel_input = Gaussian(gamma_inp) # define emotion kernel gamma_out = 0.4 kernel_output = Gaussian(gamma_out) # define functional model model = DecomposableIdentity(kernel_input, kernel_output, nf) # define emotion sampler sampler = CircularEmoSampler() # define regularization lbda = 2e-5 # define the emotion transfer estimator est = EmoTransfer(model, lbda, sampler, inp_emotion='joint') #%% # ---------------------------------- # Learning in the presence of missing data -KDEF # ---------------------------------- print('Learning with missing data KDEF') # number of random masks of each size n_loops = 4 # results tensor test_losses_kdef = torch.zeros(10, n_loops, n) for kfold in range(10): get_data_landmarks('KDEF', path_to_kdef, kfold=kfold) mask_list = [torch.randperm(n * m).reshape(n, m) for j in range(n_loops)] for j in range(n_loops): mask_level = mask_list[j] for i in torch.arange(n * m)[::7]: mask = (mask_level >= i) est.fit_partial(data_train, mask) test_losses_kdef[kfold, j, i // 7] = est.risk(data_test) print('done with kfold ', kfold) # %% #torch.save(test_losses_kdef, 'kdef_partial.pt') # %% # ---------------------------------- # Learning in the presence of missing data -Rafd # ---------------------------------- print('Learning with missing data RaFD') # number of random masks of each size n_loops = 4 # results tensor n = 61 test_losses_rafd = torch.zeros(10, n_loops, n) for kfold in range(1, 11): get_data_landmarks('RaFD', path_to_rafd, kfold=kfold) n, m, _ = data_train.shape mask_list = [torch.randperm(n * m).reshape(n, m) for j in range(n_loops)] for j in range(n_loops): mask_level = mask_list[j] for i in torch.arange(n * m)[::7]: mask = (mask_level >= i) est.fit_partial(data_train, mask) test_losses_rafd[kfold - 1, j, i // 7] = est.risk(data_test) #%% #torch.save(test_losses_rafd, 'rafd_partial.pt') #%% idx_kdef = torch.arange(test_losses_kdef.shape[2]*m)[::7].float() / test_losses_kdef.shape[2] / m idx_rafd = torch.arange(test_losses_rafd.shape[2]*m)[::7].float() / n/m #%% mean_kdef = test_losses_kdef.mean(1).mean(0) max_kdef , _ = test_losses_kdef.mean(1).max(axis=0) min_kdef , _ = test_losses_kdef.mean(1).min(axis=0) mean_rafd = test_losses_rafd.mean(1).mean(0) max_rafd , _ = test_losses_rafd.mean(1).max(axis=0) min_rafd , _ = test_losses_rafd.mean(1).min(axis=0) #%% plt.figure() plt.xlabel("% of missing data") plt.ylabel("$\log_{10}$ Test MSE") plt.plot(idx_kdef, torch.log(mean_kdef), c='black', label='KDEF mean', marker=',') plt.plot(idx_kdef, torch.log(min_kdef), c='black', label='KDEF min-max', linestyle='--') plt.plot(idx_kdef, torch.log(max_kdef), c='black', linestyle='--') plt.plot(idx_rafd, torch.log(mean_rafd), c='grey', label='RaFD mean', marker=',') plt.plot(idx_rafd, torch.log(min_rafd), c='grey', label='RaFD min-max', linestyle='--') plt.plot(idx_rafd, torch.log(max_rafd), c='grey', linestyle='--') plt.legend(loc='upper left') plt.savefig('partial_observation.pdf') plt.show()
0
0
0
0db29654cc31afa1db743481460a2490c2bc30e4
293
py
Python
frappe/integration_broker/doctype/integration_service/test_integration_service.py
badili/frappe
8177d7e745f511fcc4da50fed9e291a58172a613
[ "MIT" ]
null
null
null
frappe/integration_broker/doctype/integration_service/test_integration_service.py
badili/frappe
8177d7e745f511fcc4da50fed9e291a58172a613
[ "MIT" ]
null
null
null
frappe/integration_broker/doctype/integration_service/test_integration_service.py
badili/frappe
8177d7e745f511fcc4da50fed9e291a58172a613
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest # test_records = frappe.get_test_records('Integration Service')
22.538462
63
0.788396
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest # test_records = frappe.get_test_records('Integration Service') class TestIntegrationService(unittest.TestCase): pass
0
33
23
6acd30c910e000241963b44f2b671c682200da94
2,997
py
Python
api_1.2/containerd/events/content_pb2.py
siemens/pycontainerd
9b1184ecbcc91144ad6903403818b5b8989a32f3
[ "Apache-2.0" ]
24
2019-12-16T12:38:51.000Z
2022-02-16T18:44:20.000Z
api_1.2/containerd/events/content_pb2.py
siemens/pycontainerd
9b1184ecbcc91144ad6903403818b5b8989a32f3
[ "Apache-2.0" ]
9
2020-03-03T07:42:40.000Z
2021-09-01T10:11:18.000Z
api_1.4/containerd/events/content_pb2.py
siemens/pycontainerd
9b1184ecbcc91144ad6903403818b5b8989a32f3
[ "Apache-2.0" ]
10
2019-12-16T11:20:23.000Z
2022-01-24T01:53:13.000Z
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: containerd/events/content.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from containerd.vendor.gogoproto import gogo_pb2 as containerd_dot_vendor_dot_gogoproto_dot_gogo__pb2 from containerd.protobuf.plugin import fieldpath_pb2 as containerd_dot_protobuf_dot_plugin_dot_fieldpath__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='containerd/events/content.proto', package='containerd.events', syntax='proto3', serialized_options=b'Z2github.com/containerd/containerd/api/events;events\240\364\036\001', create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x1f\x63ontainerd/events/content.proto\x12\x11\x63ontainerd.events\x1a&containerd/vendor/gogoproto/gogo.proto\x1a*containerd/protobuf/plugin/fieldpath.proto\"S\n\rContentDelete\x12\x42\n\x06\x64igest\x18\x01 \x01(\tB2\xda\xde\x1f*github.com/opencontainers/go-digest.Digest\xc8\xde\x1f\x00\x42\x38Z2github.com/containerd/containerd/api/events;events\xa0\xf4\x1e\x01X\x00X\x01\x62\x06proto3' , dependencies=[containerd_dot_vendor_dot_gogoproto_dot_gogo__pb2.DESCRIPTOR,containerd_dot_protobuf_dot_plugin_dot_fieldpath__pb2.DESCRIPTOR,]) _CONTENTDELETE = _descriptor.Descriptor( name='ContentDelete', full_name='containerd.events.ContentDelete', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='digest', full_name='containerd.events.ContentDelete.digest', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\332\336\037*github.com/opencontainers/go-digest.Digest\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=138, serialized_end=221, ) DESCRIPTOR.message_types_by_name['ContentDelete'] = _CONTENTDELETE _sym_db.RegisterFileDescriptor(DESCRIPTOR) ContentDelete = _reflection.GeneratedProtocolMessageType('ContentDelete', (_message.Message,), { 'DESCRIPTOR' : _CONTENTDELETE, '__module__' : 'containerd.events.content_pb2' # @@protoc_insertion_point(class_scope:containerd.events.ContentDelete) }) _sym_db.RegisterMessage(ContentDelete) DESCRIPTOR._options = None _CONTENTDELETE.fields_by_name['digest']._options = None # @@protoc_insertion_point(module_scope)
39.434211
409
0.797798
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: containerd/events/content.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from containerd.vendor.gogoproto import gogo_pb2 as containerd_dot_vendor_dot_gogoproto_dot_gogo__pb2 from containerd.protobuf.plugin import fieldpath_pb2 as containerd_dot_protobuf_dot_plugin_dot_fieldpath__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='containerd/events/content.proto', package='containerd.events', syntax='proto3', serialized_options=b'Z2github.com/containerd/containerd/api/events;events\240\364\036\001', create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x1f\x63ontainerd/events/content.proto\x12\x11\x63ontainerd.events\x1a&containerd/vendor/gogoproto/gogo.proto\x1a*containerd/protobuf/plugin/fieldpath.proto\"S\n\rContentDelete\x12\x42\n\x06\x64igest\x18\x01 \x01(\tB2\xda\xde\x1f*github.com/opencontainers/go-digest.Digest\xc8\xde\x1f\x00\x42\x38Z2github.com/containerd/containerd/api/events;events\xa0\xf4\x1e\x01X\x00X\x01\x62\x06proto3' , dependencies=[containerd_dot_vendor_dot_gogoproto_dot_gogo__pb2.DESCRIPTOR,containerd_dot_protobuf_dot_plugin_dot_fieldpath__pb2.DESCRIPTOR,]) _CONTENTDELETE = _descriptor.Descriptor( name='ContentDelete', full_name='containerd.events.ContentDelete', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='digest', full_name='containerd.events.ContentDelete.digest', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\332\336\037*github.com/opencontainers/go-digest.Digest\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=138, serialized_end=221, ) DESCRIPTOR.message_types_by_name['ContentDelete'] = _CONTENTDELETE _sym_db.RegisterFileDescriptor(DESCRIPTOR) ContentDelete = _reflection.GeneratedProtocolMessageType('ContentDelete', (_message.Message,), { 'DESCRIPTOR' : _CONTENTDELETE, '__module__' : 'containerd.events.content_pb2' # @@protoc_insertion_point(class_scope:containerd.events.ContentDelete) }) _sym_db.RegisterMessage(ContentDelete) DESCRIPTOR._options = None _CONTENTDELETE.fields_by_name['digest']._options = None # @@protoc_insertion_point(module_scope)
0
0
0
bde9c13c3f3cd04175f01be8067a6fc020128c14
1,858
py
Python
embeded_img_processing.py
disanda/MSV
066ed236a4c5df8b4b5e366020fe2954b7a6915a
[ "Apache-2.0", "MIT" ]
31
2021-08-21T10:05:26.000Z
2021-12-22T12:36:46.000Z
embeded_img_processing.py
disanda/MSV
066ed236a4c5df8b4b5e366020fe2954b7a6915a
[ "Apache-2.0", "MIT" ]
null
null
null
embeded_img_processing.py
disanda/MSV
066ed236a4c5df8b4b5e366020fe2954b7a6915a
[ "Apache-2.0", "MIT" ]
6
2021-08-21T10:05:38.000Z
2021-12-10T14:11:27.000Z
#Embedded_ImageProcessing, Just for StyleGAN_v1 FFHQ import numpy as np import math import torch import torchvision import model.E.E_Blur as BE from model.stylegan1.net import Generator, Mapping #StyleGANv1 #Params use_gpu = False device = torch.device("cuda" if use_gpu else "cpu") img_size = 1024 GAN_path = './checkpoint/stylegan_v1/ffhq1024/' direction = 'eyeglasses' #smile, eyeglasses, pose, age, gender direction_path = './latentvectors/directions/stylegan_ffhq_%s_w_boundary.npy'%direction w_path = './latentvectors/faces/i3_cxx2.pt' #Loading Pre-trained Model, Directions Gs = Generator(startf=16, maxf=512, layer_count=int(math.log(img_size,2)-1), latent_size=512, channels=3) Gs.load_state_dict(torch.load(GAN_path+'Gs_dict.pth', map_location=device)) # E = BE.BE() # E.load_state_dict(torch.load('./checkpoint/E/styleganv1.pth',map_location=torch.device('cpu'))) direction = np.load(direction_path) #[[1, 512] interfaceGAN direction = torch.tensor(direction).float() direction = direction.expand(18,512) print(direction.shape) w = torch.load(w_path, map_location=device).clone().squeeze(0) print(w.shape) # discovering face semantic attribute dirrections bonus= 70 #bonus (-10) <- (-5) <- 0 ->5 ->10 start= 0 # default 0, if not 0, will be bed performance end= 3 # default 3 or 4. if 3, it will keep face features (glasses). if 4, it will keep dirrection features (Smile). w[start:start+end] = (w+bonus*direction)[start:start+end] #w = w + bonus*direction w = w.reshape(1,18,512) with torch.no_grad(): img = Gs.forward(w,8) # 8->1024 torchvision.utils.save_image(img*0.5+0.5, './img_bonus%d_start%d_end%d.png'%(bonus,start,end)) ## end=3 人物ID的特征明显,end=4 direction的特征明显, end>4 空间纠缠严重 #smile: bonue*100, start=0, end=4(end不到4作用不大,end或bonus越大越猖狂) #glass: bonue*200, start=0, end=4(end超过6开始崩,bonus也不宜过大) #pose: bonue*5-10, start=0, end=3
39.531915
116
0.74704
#Embedded_ImageProcessing, Just for StyleGAN_v1 FFHQ import numpy as np import math import torch import torchvision import model.E.E_Blur as BE from model.stylegan1.net import Generator, Mapping #StyleGANv1 #Params use_gpu = False device = torch.device("cuda" if use_gpu else "cpu") img_size = 1024 GAN_path = './checkpoint/stylegan_v1/ffhq1024/' direction = 'eyeglasses' #smile, eyeglasses, pose, age, gender direction_path = './latentvectors/directions/stylegan_ffhq_%s_w_boundary.npy'%direction w_path = './latentvectors/faces/i3_cxx2.pt' #Loading Pre-trained Model, Directions Gs = Generator(startf=16, maxf=512, layer_count=int(math.log(img_size,2)-1), latent_size=512, channels=3) Gs.load_state_dict(torch.load(GAN_path+'Gs_dict.pth', map_location=device)) # E = BE.BE() # E.load_state_dict(torch.load('./checkpoint/E/styleganv1.pth',map_location=torch.device('cpu'))) direction = np.load(direction_path) #[[1, 512] interfaceGAN direction = torch.tensor(direction).float() direction = direction.expand(18,512) print(direction.shape) w = torch.load(w_path, map_location=device).clone().squeeze(0) print(w.shape) # discovering face semantic attribute dirrections bonus= 70 #bonus (-10) <- (-5) <- 0 ->5 ->10 start= 0 # default 0, if not 0, will be bed performance end= 3 # default 3 or 4. if 3, it will keep face features (glasses). if 4, it will keep dirrection features (Smile). w[start:start+end] = (w+bonus*direction)[start:start+end] #w = w + bonus*direction w = w.reshape(1,18,512) with torch.no_grad(): img = Gs.forward(w,8) # 8->1024 torchvision.utils.save_image(img*0.5+0.5, './img_bonus%d_start%d_end%d.png'%(bonus,start,end)) ## end=3 人物ID的特征明显,end=4 direction的特征明显, end>4 空间纠缠严重 #smile: bonue*100, start=0, end=4(end不到4作用不大,end或bonus越大越猖狂) #glass: bonue*200, start=0, end=4(end超过6开始崩,bonus也不宜过大) #pose: bonue*5-10, start=0, end=3
0
0
0
edbac87d22696c5f1f737f4d1358a21a6926a523
373
py
Python
board/urls.py
suomiy/message-board
0ad469c618acabfed872e3fb2ccb1ab21512bd47
[ "MIT" ]
null
null
null
board/urls.py
suomiy/message-board
0ad469c618acabfed872e3fb2ccb1ab21512bd47
[ "MIT" ]
null
null
null
board/urls.py
suomiy/message-board
0ad469c618acabfed872e3fb2ccb1ab21512bd47
[ "MIT" ]
null
null
null
from django.conf.urls import url from . import views from board.views import PostDelete urlpatterns = [ url(r'^$', views.index, name='index'), url(r'^login/$', views.login, name='login'), url(r'^logout/$', views.logout, name='logout'), url(r'^register/$', views.register, name='register'), url(r'^delete/(?P<pk>\d+)$', PostDelete.as_view(), name='delete') ]
31.083333
66
0.651475
from django.conf.urls import url from . import views from board.views import PostDelete urlpatterns = [ url(r'^$', views.index, name='index'), url(r'^login/$', views.login, name='login'), url(r'^logout/$', views.logout, name='logout'), url(r'^register/$', views.register, name='register'), url(r'^delete/(?P<pk>\d+)$', PostDelete.as_view(), name='delete') ]
0
0
0
59146e826514dfd29d5834e02f858113ef763390
555
py
Python
app/planes/jet_gx_plane.py
egenerat/gae-django
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
[ "MIT" ]
3
2016-07-08T23:49:32.000Z
2018-04-15T22:55:01.000Z
app/planes/jet_gx_plane.py
egenerat/gae-django
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
[ "MIT" ]
27
2017-02-05T15:57:04.000Z
2018-04-15T22:57:26.000Z
app/planes/jet_gx_plane.py
egenerat/gae-django
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from app.common.target_urls import SHOP_GX_ID from app.planes.jet_plane import JetPlane
25.227273
51
0.695495
# -*- coding: utf-8 -*- from app.common.target_urls import SHOP_GX_ID from app.planes.jet_plane import JetPlane class JetGXPlane(JetPlane): engines_nb = 2 consumption_per_hour = 2014 fuel_capacity = 25382 minimum_kerosene_before_mission = fuel_capacity # 7 (max hours one way) * speed * 2 (2 ways) plane_range = 6335 plane_range_stopover = 9955 price = 2650000 shop_plane_type = SHOP_GX_ID plane_capacity = 19 speed = 905 def __init__(self, **kwargs): super(JetGXPlane, self).__init__(**kwargs)
59
360
23
d51a4194265a656c05b009484ed30285707f4645
1,038
py
Python
answer/models.py
Jonas1015/codingpride
0418186ee55456cb7035f3afd50120116746f8d7
[ "MIT" ]
1
2021-01-24T18:22:55.000Z
2021-01-24T18:22:55.000Z
answer/models.py
CodingPridePlatform/codingpride
0418186ee55456cb7035f3afd50120116746f8d7
[ "MIT" ]
16
2021-01-24T18:58:46.000Z
2021-05-01T11:02:46.000Z
answer/models.py
Jonas1015/codingpride
0418186ee55456cb7035f3afd50120116746f8d7
[ "MIT" ]
8
2021-01-19T18:56:56.000Z
2021-02-01T11:36:33.000Z
from uuid import uuid4 from ckeditor_uploader.fields import RichTextUploadingField from django.conf import settings from django.db import models from question.models import * User = settings.AUTH_USER_MODEL
32.4375
76
0.688825
from uuid import uuid4 from ckeditor_uploader.fields import RichTextUploadingField from django.conf import settings from django.db import models from question.models import * User = settings.AUTH_USER_MODEL class Answer(models.Model): question = models.ForeignKey( Question, related_name='answers', on_delete=models.CASCADE) description = RichTextUploadingField(blank=False) slug = models.SlugField(max_length=250) date_answered = models.DateTimeField( auto_now_add=True, verbose_name="date published") date_updated = models.DateTimeField( auto_now=True, verbose_name="date updated") author = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True) def __str__(self): return self.question.title + " - " + "Answer" + " - " + str(self.id) def save(self, *args, **kwargs): self.slug = slugify( 'answer - ' + self.question.title) + "-" + str(uuid4()) return super().save(*args, **kwargs)
227
577
23
6c7b67d4465a0cf6e9ace534ff92c738613c1ec5
532
py
Python
Task/FizzBuzz/Python/fizzbuzz-7.py
djgoku/RosettaCodeData
91df62d46142e921b3eacdb52b0316c39ee236bc
[ "Info-ZIP" ]
null
null
null
Task/FizzBuzz/Python/fizzbuzz-7.py
djgoku/RosettaCodeData
91df62d46142e921b3eacdb52b0316c39ee236bc
[ "Info-ZIP" ]
null
null
null
Task/FizzBuzz/Python/fizzbuzz-7.py
djgoku/RosettaCodeData
91df62d46142e921b3eacdb52b0316c39ee236bc
[ "Info-ZIP" ]
null
null
null
>>> ' '.join(''.join(''.join(['' if i%3 else 'F', '' if i%5 else 'B']) or str('00')) for i in range(1,16)) '00 00 F 00 B F 00 00 F B 00 F 00 00 FB' >>> _ '00 00 F 00 B F 00 00 F B 00 F 00 00 FB' >>> _.replace('FB','11').replace('F','01').replace('B','10').split()[::-1] ['11', '00', '00', '01', '00', '10', '01', '00', '00', '01', '10', '00', '01', '00', '00'] >>> '0b' + ''.join(_) '0b110000010010010000011000010000' >>> eval(_) 810092048 >>>
35.466667
90
0.415414
>>> ' '.join(''.join(''.join(['' if i%3 else 'F', '' if i%5 else 'B']) or str('00')) for i in range(1,16)) '00 00 F 00 B F 00 00 F B 00 F 00 00 FB' >>> _ '00 00 F 00 B F 00 00 F B 00 F 00 00 FB' >>> _.replace('FB','11').replace('F','01').replace('B','10').split()[::-1] ['11', '00', '00', '01', '00', '10', '01', '00', '00', '01', '10', '00', '01', '00', '00'] >>> '0b' + ''.join(_) '0b110000010010010000011000010000' >>> eval(_) 810092048 >>>
0
0
0
2cd1a279b7cf7df392db12b7769200319e3e1dbe
1,534
py
Python
demo/demo.py
derkan/flask-locale
f94e1bb6bcbd658f1c729ec6de58a8a4f5afd86d
[ "BSD-3-Clause" ]
46
2015-10-09T14:42:41.000Z
2021-11-16T12:44:33.000Z
demo/demo.py
derkan/flask-locale
f94e1bb6bcbd658f1c729ec6de58a8a4f5afd86d
[ "BSD-3-Clause" ]
3
2018-02-05T21:22:26.000Z
2021-10-20T14:24:24.000Z
demo/demo.py
derkan/flask-locale
f94e1bb6bcbd658f1c729ec6de58a8a4f5afd86d
[ "BSD-3-Clause" ]
6
2017-02-24T06:51:37.000Z
2020-09-21T08:55:04.000Z
# -*- coding: utf-8 -*- from flask import Flask, request, render_template, g, session, redirect, current_app from flask_locale import Locale, _ app = Flask(__name__) # DEFAULT_LOCALE is the language used for keys ins translation files: app.config['DEFAULT_LOCALE'] = 'tr_TR' app.config['LOCALE_PATH'] = 'translations' app.config['SECRET_KEY'] = 'translations****' locale = Locale(app) @locale.localeselector @app.route("/") @app.route("/locale") if __name__ == '__main__': app.run(debug=True)
31.306122
98
0.702086
# -*- coding: utf-8 -*- from flask import Flask, request, render_template, g, session, redirect, current_app from flask_locale import Locale, _ app = Flask(__name__) # DEFAULT_LOCALE is the language used for keys ins translation files: app.config['DEFAULT_LOCALE'] = 'tr_TR' app.config['LOCALE_PATH'] = 'translations' app.config['SECRET_KEY'] = 'translations****' locale = Locale(app) @locale.localeselector def get_locale(): # if a user is logged in, use the locale from the session # define a default value instead of None to set it to specific locale if not setting is found. locale_code = session.get('locale', None) if locale_code is not None: current_app.logger.info("Locale is: %s" % locale_code) return locale_code # otherwise try to guess the language from the user accept # header the browser transmits. We support tr/fr/en in this # example. The best match wins. locale_code = request.accept_languages.best_match(['tr_TR', 'fr_FR', 'en_US']) current_app.logger.info("Locale match: %s" % locale_code) return locale_code @app.route("/") def index(): # How we do translation in python code: py_translated = _('Hello') # How we do translation in template: return render_template('locale.html', name='Erkan', py_translated=py_translated) @app.route("/locale") def change_locale(): new_locale = request.args.get('locale', None) session['locale'] = new_locale return redirect('/') if __name__ == '__main__': app.run(debug=True)
960
0
66
3c33d204c42708208bd9e270753c1a0f71dfe90d
13,017
py
Python
models/model/seq2seq_nl_with_frames.py
Chucooleg/alfred
250cdc8b1e75dd6acb9e20d3c616beec63307a46
[ "MIT" ]
1
2021-07-19T01:58:51.000Z
2021-07-19T01:58:51.000Z
models/model/seq2seq_nl_with_frames.py
Chucooleg/alfred
250cdc8b1e75dd6acb9e20d3c616beec63307a46
[ "MIT" ]
null
null
null
models/model/seq2seq_nl_with_frames.py
Chucooleg/alfred
250cdc8b1e75dd6acb9e20d3c616beec63307a46
[ "MIT" ]
null
null
null
import os import torch import numpy as np import nn.vnn as vnn import collections from torch import nn from torch.nn import functional as F from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence from model.seq2seq import Module as Base from models.utils.metric import compute_f1, compute_exact from nltk.translate.bleu_score import sentence_bleu # time import time from collections import defaultdict
40.30031
178
0.571791
import os import torch import numpy as np import nn.vnn as vnn import collections from torch import nn from torch.nn import functional as F from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence from model.seq2seq import Module as Base from models.utils.metric import compute_f1, compute_exact from nltk.translate.bleu_score import sentence_bleu # time import time from collections import defaultdict class Module(Base): def __init__(self, args, vocab, object_vocab): ''' Seq2Seq agent ''' super().__init__(args, vocab, object_vocab) self.predict_high_level_goal = args.predict_goal_level_instruction # encoder and self-attention encoder = vnn.ActionFrameAttnEncoderFullSeq self.enc = encoder( emb=self.emb_action_low, dframe=args.dframe, dhid=args.dhid, act_dropout=args.act_dropout, vis_dropout=args.vis_dropout, bidirectional=True) # language decoder decoder = vnn.LanguageDecoder self.dec = decoder(self.emb_word, 2*args.dhid, attn_dropout=args.attn_dropout, hstate_dropout=args.hstate_dropout, word_dropout=args.word_dropout, input_dropout=args.input_dropout, train_teacher_forcing=args.train_teacher_forcing, train_student_forcing_prob=args.train_student_forcing_prob) # dropouts self.vis_dropout = nn.Dropout(args.vis_dropout) self.act_dropout = nn.Dropout(args.act_dropout, inplace=True) # internal states self.state_t = None self.e_t = None self.test_mode = False # paths self.root_path = os.getcwd() self.feat_pt = 'feat_conv.pt' # params self.max_subgoals = 25 # reset model self.reset() def featurize(self, batch): '''tensoroze and pad batch input''' # time time_report = defaultdict(int) device = torch.device('cuda') if self.args.gpu else torch.device('cpu') feat = collections.defaultdict(list) for ex in batch: ######### # outputs ######### # time start_time = time.time() # serialize segments self.serialize_lang_action(ex) if not (self.test_mode or self.demo_mode): # goal and instr language lang_goal, lang_instr = ex['num']['lang_goal'], ex['num']['lang_instr'] # zero inputs if specified lang_goal = self.zero_input(lang_goal) if self.args.zero_goal else lang_goal lang_instr = self.zero_input(lang_instr) if self.args.zero_instr else lang_instr if self.predict_high_level_goal: # append goal feat['lang_instr'].append(lang_goal) else: # append instr feat['lang_instr'].append(lang_instr) # time time_report['featurize_outputs'] += time.time() - start_time ######### # inputs ######### # time start_time = time.time() # low-level action feat['action_low'].append([a['action'] for a in ex['num']['action_low']]) # time time_report['featurize_input_action_low'] += time.time() - start_time # time start_time = time.time() # load Resnet features from disk root = self.get_task_root(ex) # time torch_load_start_time = time.time() # shape (num gold frames for task, 512, 7, 7) im = torch.load(os.path.join(root, self.feat_pt)) # time time_report['featurize_torch_load_time'] += time.time() - torch_load_start_time num_low_actions = len(ex['plan']['low_actions']) num_feat_frames = im.shape[0] if num_low_actions != num_feat_frames: keep = [None] * len(ex['plan']['low_actions']) for i, d in enumerate(ex['images']): # only add frames linked with low-level actions # (i.e. skip filler frames like smooth rotations and dish washing) if keep[d['low_idx']] is None: keep[d['low_idx']] = im[i] # keep has shape (num gold low-level actions L, 512, 7, 7) keep.append(keep[-1]) # stop frame feat['frames'].append(torch.stack(keep, dim=0)) else: feat['frames'].append(torch.cat([im, im[-1].unsqueeze(0)], dim=0)) # add stop frame # time time_report['featurize_input_resnet_features'] += time.time() - start_time # time start_time = time.time() feat['action_low_seq_lengths'] = [] # tensorization and padding for k, v in feat.items(): # input if k in {'action_low'}: # action embedding and padding # list of length B. each shaped (L,), value is integer action index. seqs = [torch.tensor(vv, device=device) for vv in v] # (B, T) with T = max(L) pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad) # (B,). Each value is L for the example seq_lengths = np.array(list(map(len, v))) feat[k] = pad_seq feat[k+'_seq_lengths'] = seq_lengths elif not 'seq_lengths' in k: # default: tensorize and pad sequence # list of length B. each shaped (L,), value is integer action index. seqs = [torch.tensor(vv, device=device, dtype=torch.float if ('frames' in k) else torch.long) for vv in v] # (B, T, *) with T = max(L) # (B, T, 512, 7, 7) for k='frames' pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad) feat[k] = pad_seq # time time_report['featurize_tensorization_and_padding'] += time.time() - start_time return feat, time_report def serialize_lang_action(self, feat): ''' append segmented instr language and low-level actions into single sequences ''' is_serialized = not isinstance(feat['num']['action_low'][0], list) if not is_serialized: feat['num']['action_low'] = [a for a_group in feat['num']['action_low'] for a in a_group] if not (self.test_mode or self.demo_mode): feat['num']['lang_instr'] = [word for desc in feat['num']['lang_instr'] for word in desc] def forward(self, feat, max_decode=300, validate_teacher_forcing=False, validate_sample_output=False, **kwparams): # encode entire sequence of low-level actions cont_act, enc_act = self.enc(feat) # run decoder until entire sentence is finished state_0 = cont_act, torch.zeros_like(cont_act) res, _ = self.dec(enc_act, feat, max_decode=max_decode, state_0=state_0, validate_teacher_forcing=validate_teacher_forcing, validate_sample_output=validate_sample_output) feat.update(res) return feat def reset(self): ''' reset internal states (used for real-time execution during eval) ''' self.r_state = { 'state_t': None, 'e_t': None, 'cont_act': None, 'enc_act': None } def step(self, feat, prev_word=None): ''' forward the model for a single time-step (used for real-time execution during eval) ''' # encode action features if self.r_state['cont_act'] is None and self.r_state['enc_act'] is None: self.r_state['cont_act'], self.r_state['enc_act'] = self.enc(feat) # initialize embedding and hidden states if self.r_state['e_t'] is None and self.r_state['state_t'] is None: self.r_state['e_t'] = self.dec.go.repeat(self.r_state['enc_act'].size(0), 1) self.r_state['state_t'] = self.r_state['cont_act'], torch.zeros_like(self.r_state['cont_act']) # previous e_t = self.embed_lang(prev_word) if prev_word is not None else self.r_state['e_t'] # decode and save embedding and hidden states out_word_low, state_t, *_ = self.dec.step(self.r_state['enc_act'], e_t=e_t, state_tm1=self.r_state['state_t']) # save states self.r_state['state_t'] = state_t self.r_state['e_t'] = self.dec.emb(out_word_low.max(1)[1]) # output formatting feat['out_word_low'] = out_word_low.unsqueeze(0) return feat def extract_preds(self, out, batch, feat, clean_special_tokens=True): ''' output processing ''' pred = {} # feat['out_lang_instr'] has shape (B, T, vocab size) for ex, lang_instr, probs in zip(batch, feat['out_lang_instr'].max(2)[1].tolist(), feat['out_lang_probs'].tolist()): # remove padding tokens if self.pad in lang_instr: pad_start_idx = lang_instr.index(self.pad) lang_instr = lang_instr[:pad_start_idx] probs = probs[:pad_start_idx] if clean_special_tokens: if self.word_stop_token in lang_instr: stop_start_idx = lang_instr.index(self.word_stop_token) lang_instr = lang_instr[:stop_start_idx] probs = probs[:stop_start_idx] # index to word tokens words = self.vocab['word'].index2word(lang_instr) task_id_ann = self.get_task_and_ann_id(ex) pred[task_id_ann] = { 'lang_instr': ' '.join(words), 'lang_probs': probs, } return pred def embed_word(self, lang): ''' embed language called only in step -- eval_* modules ''' device = torch.device('cuda') if self.args.gpu else torch.device('cpu') lang_num = torch.tensor(self.vocab['word'].word2index(lang), device=device) # point back to self.emb_word lang_emb = self.dec.emb(lang_num).unsqueeze(0) return lang_emb def compute_loss(self, out, batch, feat): ''' loss function for Seq2Seq agent ''' losses = dict() # GT and predictions if self.training: # (B*T, Vocab Size), raw unormalized scores p_lang_instr = out['out_lang_instr'].view(-1, len(self.vocab['word'])) else: # Trim prediction to match sequence lengths first gold_lang_instr_length = feat['lang_instr'].shape[1] p_lang_instr = out['out_lang_instr'][:, :gold_lang_instr_length, :].reshape(-1, len(self.vocab['word'])) l_lang_instr = feat['lang_instr'].view(-1) # language instruction loss pad_valid = (l_lang_instr != self.pad) lang_instr_loss = F.cross_entropy(p_lang_instr, l_lang_instr, reduction='none') lang_instr_loss *= pad_valid.float() lang_instr_loss = lang_instr_loss.mean() losses['lang_instr'] = lang_instr_loss perplexity = 2**lang_instr_loss return losses, perplexity def compute_metric(self, preds, data): ''' compute BLEU score for output ''' # how does this work during training with teacher forcing !? m = collections.defaultdict(list) if self.predict_high_level_goal: flatten_isntr = lambda instr: [word.strip() for word in instr] else: flatten_isntr = lambda instr: [word.strip() for sent in instr for word in sent] all_pred_id_ann = list(preds.keys()) for task in data: # find matching prediction pred_id_ann = '{}_{}'.format(task['task'].split('/')[1], task['repeat_idx']) # grab task data for ann_0, ann_1 and ann_2 exs = self.load_task_jsons(task) # a list of 3 lists of word tokens. (1 for each human annotation, so total 3) if self.predict_high_level_goal: ref_lang_instrs = [flatten_isntr(ex['ann']['goal']) for ex in exs] else: ref_lang_instrs = [flatten_isntr(ex['ann']['instr']) for ex in exs] # compute bleu score m['BLEU_mean'].append(sentence_bleu(ref_lang_instrs, preds[pred_id_ann]['lang_instr'].split(' '))) all_pred_id_ann.remove(pred_id_ann) assert len(all_pred_id_ann) == 0 return {k: sum(v)/len(v) for k, v in m.items()}
525
12,035
23
b97907bf04908f82140d9fe06af904e47945d70a
951
py
Python
stellar_poe/stellar/utils.py
erickgnavar/stellar-poe
dd98f57f7f946f26cf84d412f3ed1136e29c95ce
[ "MIT" ]
null
null
null
stellar_poe/stellar/utils.py
erickgnavar/stellar-poe
dd98f57f7f946f26cf84d412f3ed1136e29c95ce
[ "MIT" ]
null
null
null
stellar_poe/stellar/utils.py
erickgnavar/stellar-poe
dd98f57f7f946f26cf84d412f3ed1136e29c95ce
[ "MIT" ]
1
2019-02-13T07:49:17.000Z
2019-02-13T07:49:17.000Z
import logging import requests from stellar_base.builder import Builder from stellar_base.keypair import Keypair logger = logging.getLogger(__name__)
26.416667
76
0.705573
import logging import requests from stellar_base.builder import Builder from stellar_base.keypair import Keypair logger = logging.getLogger(__name__) def create_new_address(): kp = Keypair.random() public_key = kp.address().decode() seed = kp.seed().decode() return seed, public_key def ask_for_coins(public_key): url = f"https://horizon-testnet.stellar.org/friendbot?addr={public_key}" response = requests.get(url) if response.status_code == 200: return True, None else: data = response.json() logger.warning(data["detail"], extra={"response_data": data}) return False, data["detail"] def create_transaction(sender_seed, recipient_key, hash_value): builder = Builder(secret=sender_seed) builder.append_payment_op(recipient_key, "1", "XLM") builder.add_hash_memo(hash_value.encode()) builder.sign() # TODO: handle possible errors return builder.submit()
726
0
69
c3b49b47c0a0e99572a9c193a453ee565c642387
494
py
Python
tests/python/test_scheduler.py
Erotemic/misc
6f8460a690d05e7e0117becc6cae9902cbe2cedd
[ "Apache-2.0" ]
5
2021-04-29T21:07:18.000Z
2021-09-29T08:46:08.000Z
tests/python/test_scheduler.py
Erotemic/misc
6f8460a690d05e7e0117becc6cae9902cbe2cedd
[ "Apache-2.0" ]
null
null
null
tests/python/test_scheduler.py
Erotemic/misc
6f8460a690d05e7e0117becc6cae9902cbe2cedd
[ "Apache-2.0" ]
1
2018-04-07T12:26:21.000Z
2018-04-07T12:26:21.000Z
import torch parameters = [torch.autograd.Variable(torch.FloatTensor([0, 0, 0]), requires_grad=True)] optimizer = torch.optim.SGD(parameters, lr=0.1, momentum=0.9) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True) for _ in range(100): scheduler.step(.1) lrs = get_lrs() lr_str = ','.join(['{:.2g}'.format(lr) for lr in lrs]) # print(lr_str)
24.7
88
0.684211
import torch parameters = [torch.autograd.Variable(torch.FloatTensor([0, 0, 0]), requires_grad=True)] optimizer = torch.optim.SGD(parameters, lr=0.1, momentum=0.9) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True) def get_lrs(): lrs = set(map(lambda group: group['lr'], optimizer.param_groups)) return lrs for _ in range(100): scheduler.step(.1) lrs = get_lrs() lr_str = ','.join(['{:.2g}'.format(lr) for lr in lrs]) # print(lr_str)
78
0
23
236cf56846b02f7fe0f86940d0bcf6ca533893b6
6,713
py
Python
fairdiplomacy/utils/game_scoring.py
facebookresearch/diplomacy_searchbot
44d6f3272be7567060ba7d0e41f4e44b1bb8b5ca
[ "MIT" ]
32
2021-05-04T17:05:19.000Z
2022-03-21T07:56:53.000Z
fairdiplomacy/utils/game_scoring.py
facebookresearch/diplomacy_searchbot
44d6f3272be7567060ba7d0e41f4e44b1bb8b5ca
[ "MIT" ]
3
2022-01-22T19:44:10.000Z
2022-03-02T23:20:52.000Z
fairdiplomacy/utils/game_scoring.py
facebookresearch/diplomacy_searchbot
44d6f3272be7567060ba7d0e41f4e44b1bb8b5ca
[ "MIT" ]
10
2021-05-07T11:51:29.000Z
2022-02-18T18:29:57.000Z
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Sequence, Tuple, List import collections import json import re from fairdiplomacy.models.consts import POWERS, N_SCS GameScores = collections.namedtuple( "GameScores", [ "center_ratio", # Ratio of the player's SCs to N_SCS. "draw_score", # 1. / num alive players, but 1/0 if clear win/loss. "square_ratio", # Ratio of the squure of the SCs to sum of squares. "square_score", # Same as square_ratio, but 1 if is_clear_win. "is_complete_unroll", # 0/1 whether last phase is complete. "is_clear_win", # 0/1 whether the player has more than half SC. "is_clear_loss", # 0/1 whether another player has more than half SC. "is_eliminated", # 0/1 whether has 0 SC. "is_leader", # 0/1 whether the player has at least as many SCs as anyone else. "can_draw", # 0/1 whether the player is alive and nobody wins solo. "num_games", # Number of games being averaged ], ) def get_power_one(game_json_path): """This function is depreccated. Use fairdiplomacy.compare_agents_array.""" name = re.findall("game.*\.json", game_json_path)[0] for power in POWERS: if power[:3] in name: return power raise ValueError(f"Couldn't parse power name from {name}") def get_game_result_from_json(game_json_path): """This function is depreccated. Use fairdiplomacy.compare_agents_array.""" power_one = get_power_one(game_json_path) try: with open(game_json_path) as f: j = json.load(f) except Exception as e: print(e) return None rl_rewards = compute_game_scores(POWERS.index(power_one), j) counts = {k: len(v) for k, v in j["phases"][-1]["state"]["centers"].items()} for p in POWERS: if p not in counts: counts[p] = 0 powers_won = {p for p, v in counts.items() if v == max(counts.values())} power_won = power_one if power_one in powers_won else powers_won.pop() if counts[power_one] == 0: return "six", power_one, power_won, rl_rewards winner_count, winner = max([(c, p) for p, c in counts.items()]) if winner_count < 18: return "draw", power_one, power_won, rl_rewards if winner == power_one: return "one", power_one, power_won, rl_rewards else: return "six", power_one, power_won, rl_rewards
41.95625
112
0.682556
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Sequence, Tuple, List import collections import json import re from fairdiplomacy.models.consts import POWERS, N_SCS GameScores = collections.namedtuple( "GameScores", [ "center_ratio", # Ratio of the player's SCs to N_SCS. "draw_score", # 1. / num alive players, but 1/0 if clear win/loss. "square_ratio", # Ratio of the squure of the SCs to sum of squares. "square_score", # Same as square_ratio, but 1 if is_clear_win. "is_complete_unroll", # 0/1 whether last phase is complete. "is_clear_win", # 0/1 whether the player has more than half SC. "is_clear_loss", # 0/1 whether another player has more than half SC. "is_eliminated", # 0/1 whether has 0 SC. "is_leader", # 0/1 whether the player has at least as many SCs as anyone else. "can_draw", # 0/1 whether the player is alive and nobody wins solo. "num_games", # Number of games being averaged ], ) def compute_game_sos_from_state(game_state: Dict) -> List[float]: center_counts = [len(game_state["centers"].get(p, [])) for p in POWERS] clear_wins = [c > (N_SCS / 2) for c in center_counts] if any(clear_wins): return [float(w) for w in clear_wins] center_squares = [x ** 2 for x in center_counts] sum_sq = sum(center_squares) return [c / sum_sq for c in center_squares] def compute_game_dss_from_state(game_state: Dict) -> List[float]: center_counts = [len(game_state["centers"].get(p, [])) for p in POWERS] clear_wins = [c > (N_SCS / 2) for c in center_counts] if any(clear_wins): return [float(w) for w in clear_wins] alive = [c > 0 for c in center_counts] n_alive = sum(alive) return [a / n_alive for a in alive] def compute_phase_scores(power_id: int, phase_json: Dict) -> GameScores: return compute_game_scores_from_state(power_id, phase_json["state"]) def compute_game_scores(power_id: int, game_json: Dict) -> GameScores: return compute_game_scores_from_state(power_id, game_json["phases"][-1]["state"]) def compute_game_scores_from_state(power_id: int, game_state: Dict) -> GameScores: center_counts = [len(game_state["centers"].get(p, [])) for p in POWERS] center_squares = [x ** 2 for x in center_counts] complete_unroll = game_state["name"] == "COMPLETED" is_clear_win = center_counts[power_id] > N_SCS / 2 someone_wins = any(c > N_SCS / 2 for c in center_counts) is_eliminated = center_counts[power_id] == 0 is_clear_loss = is_eliminated or (not is_clear_win and someone_wins) metrics = dict( center_ratio=center_counts[power_id] / N_SCS, square_ratio=center_squares[power_id] / sum(center_squares, 1e-5), is_complete_unroll=float(complete_unroll), is_clear_win=float(is_clear_win), is_clear_loss=float(is_clear_loss), is_eliminated=float(is_eliminated), is_leader=float(center_counts[power_id] == max(center_counts)), can_draw=float(not someone_wins and not is_eliminated), ) metrics["square_score"] = ( 1.0 if is_clear_win else (0 if is_clear_loss else metrics["square_ratio"]) ) is_alive = not is_eliminated num_alive = sum(x > 0 for x in center_counts) metrics["draw_score"] = float(is_clear_win) if someone_wins else float(is_alive) / num_alive return GameScores(**metrics, num_games=1) def compute_game_scores(power_id: int, game_json: Dict) -> GameScores: return compute_phase_scores(power_id, game_json["phases"][-1]) def add_offset_to_square_score(game_scores: GameScores, offset: float) -> GameScores: return game_scores._replace(square_score=game_scores.square_score + offset) def average_game_scores(many_games_scores: Sequence[GameScores]) -> Tuple[GameScores, GameScores]: assert many_games_scores, "Must be non_empty" avgs, stderrs = {}, {} tot_n_games = sum(scores.num_games for scores in many_games_scores) # In theory, we could get much better stderrs by taking into account that the means for each # different powers vary, and when we have enough data, computing per-power variances and combining them. # We don't do this since it's messy and much tricker, statistically. for key in GameScores._fields: if key == "num_games": continue avgs[key] = ( sum(getattr(scores, key) * scores.num_games for scores in many_games_scores) / tot_n_games ) # Divide by N-1 for unbiased estimate, with hack to not crash on divide by 0. # We could do better things here given that we also know that most of our values are bounded in [0,1] # and that our null hypothesis for many is 1/7, but again, that's messier and harder and not worth much. sample_variance = sum( (getattr(scores, key) - avgs[key]) ** 2 * scores.num_games for scores in many_games_scores ) / max(0.5, tot_n_games - 1) stderrs[key] = (sample_variance / tot_n_games) ** 0.5 return GameScores(**avgs, num_games=tot_n_games), GameScores(**stderrs, num_games=tot_n_games) def get_power_one(game_json_path): """This function is depreccated. Use fairdiplomacy.compare_agents_array.""" name = re.findall("game.*\.json", game_json_path)[0] for power in POWERS: if power[:3] in name: return power raise ValueError(f"Couldn't parse power name from {name}") def get_game_result_from_json(game_json_path): """This function is depreccated. Use fairdiplomacy.compare_agents_array.""" power_one = get_power_one(game_json_path) try: with open(game_json_path) as f: j = json.load(f) except Exception as e: print(e) return None rl_rewards = compute_game_scores(POWERS.index(power_one), j) counts = {k: len(v) for k, v in j["phases"][-1]["state"]["centers"].items()} for p in POWERS: if p not in counts: counts[p] = 0 powers_won = {p for p, v in counts.items() if v == max(counts.values())} power_won = power_one if power_one in powers_won else powers_won.pop() if counts[power_one] == 0: return "six", power_one, power_won, rl_rewards winner_count, winner = max([(c, p) for p, c in counts.items()]) if winner_count < 18: return "draw", power_one, power_won, rl_rewards if winner == power_one: return "one", power_one, power_won, rl_rewards else: return "six", power_one, power_won, rl_rewards
3,954
0
184
3374490ae81fb09cb4ae79b69a10d63387eb70f1
1,540
py
Python
Data-Wrangling-with-Pandas/code.py
chiragThakur62/ga-learner-dsmp-repo
a525732fad0da0ed2a73bced16b8006621097e2b
[ "MIT" ]
null
null
null
Data-Wrangling-with-Pandas/code.py
chiragThakur62/ga-learner-dsmp-repo
a525732fad0da0ed2a73bced16b8006621097e2b
[ "MIT" ]
null
null
null
Data-Wrangling-with-Pandas/code.py
chiragThakur62/ga-learner-dsmp-repo
a525732fad0da0ed2a73bced16b8006621097e2b
[ "MIT" ]
null
null
null
# -------------- # Import packages import numpy as np import pandas as pd from scipy.stats import mode bank=pd.read_csv(path) #print(bank) # code starts here categorical_var=bank.select_dtypes(include = 'object') print(categorical_var) numerical_var=bank.select_dtypes(include = 'number') print(numerical_var) # code ends here # -------------- # code starts here #code ends here banks=bank.drop('Loan_ID', axis=1) #print(banks) sums=banks.isnull().sum() print(sums) bank_mode=banks.mode print(bank_mode) banks=banks.fillna(bank_mode) print(banks) # -------------- # code starts here # check the avg_loan_amount avg_loan_amount = banks.pivot_table(index=["Gender","Married","Self_Employed"],values="LoanAmount") print (avg_loan_amount) # code ends here # -------------- # code starts here loan_approved_se=len(banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status']== 'Y')]) loan_approved_nse=len(banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status']== 'Y')]) # code ends here percentage_se=(loan_approved_se/614)*100 print(percentage_se) percentage_nse=(loan_approved_nse/614)*100 print(percentage_nse) # -------------- # code starts here loan_term=banks['Loan_Amount_Term'].apply(lambda x:x/12) #print(loan_term) big_loan_term=len(loan_term[loan_term >=25]) print(big_loan_term) # code ends here # -------------- # code starts here loan_groupby =banks.groupby('Loan_Status')['ApplicantIncome', 'Credit_History'] mean_values=loan_groupby.agg(np.mean) # code ends here
14.666667
99
0.694805
# -------------- # Import packages import numpy as np import pandas as pd from scipy.stats import mode bank=pd.read_csv(path) #print(bank) # code starts here categorical_var=bank.select_dtypes(include = 'object') print(categorical_var) numerical_var=bank.select_dtypes(include = 'number') print(numerical_var) # code ends here # -------------- # code starts here #code ends here banks=bank.drop('Loan_ID', axis=1) #print(banks) sums=banks.isnull().sum() print(sums) bank_mode=banks.mode print(bank_mode) banks=banks.fillna(bank_mode) print(banks) # -------------- # code starts here # check the avg_loan_amount avg_loan_amount = banks.pivot_table(index=["Gender","Married","Self_Employed"],values="LoanAmount") print (avg_loan_amount) # code ends here # -------------- # code starts here loan_approved_se=len(banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status']== 'Y')]) loan_approved_nse=len(banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status']== 'Y')]) # code ends here percentage_se=(loan_approved_se/614)*100 print(percentage_se) percentage_nse=(loan_approved_nse/614)*100 print(percentage_nse) # -------------- # code starts here loan_term=banks['Loan_Amount_Term'].apply(lambda x:x/12) #print(loan_term) big_loan_term=len(loan_term[loan_term >=25]) print(big_loan_term) # code ends here # -------------- # code starts here loan_groupby =banks.groupby('Loan_Status')['ApplicantIncome', 'Credit_History'] mean_values=loan_groupby.agg(np.mean) # code ends here
0
0
0
8fc4d0cd377ffeebcd3f89f11edf4e340a6920f1
4,231
py
Python
baseline_modify/utils/auxiliary.py
lxchtan/DSTC9-Track1
26c3d36df1ab13a766767989434b79894b5317c5
[ "Apache-2.0" ]
7
2021-04-20T09:04:59.000Z
2022-03-07T03:42:09.000Z
baseline_modify/utils/auxiliary.py
lxchtan/DSTC9-Track1
26c3d36df1ab13a766767989434b79894b5317c5
[ "Apache-2.0" ]
null
null
null
baseline_modify/utils/auxiliary.py
lxchtan/DSTC9-Track1
26c3d36df1ab13a766767989434b79894b5317c5
[ "Apache-2.0" ]
null
null
null
import torch import torch.nn.functional as F from torch.utils.data import Sampler import math def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')): """ Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (..., vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. threshold: a minimal threshold to keep logits """ top_k = min(top_k, logits.size(-1)) if top_k > 0: # Remove all tokens with a probability less than the last token in the top-k tokens indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: # Compute cumulative probabilities of sorted tokens sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probabilities > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # Back to unsorted indices and set them to -infinity indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value indices_to_remove = logits < threshold logits[indices_to_remove] = filter_value return logits class SequentialDistributedSampler(Sampler): """ Distributed Sampler that subsamples indicies sequentially, making it easier to collate all results at the end. Even though we only use this sampler for eval and predict (no training), which means that the model params won't have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. """
39.175926
110
0.708816
import torch import torch.nn.functional as F from torch.utils.data import Sampler import math def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')): """ Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (..., vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. threshold: a minimal threshold to keep logits """ top_k = min(top_k, logits.size(-1)) if top_k > 0: # Remove all tokens with a probability less than the last token in the top-k tokens indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: # Compute cumulative probabilities of sorted tokens sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probabilities > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # Back to unsorted indices and set them to -infinity indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value indices_to_remove = logits < threshold logits[indices_to_remove] = filter_value return logits class SequentialDistributedSampler(Sampler): """ Distributed Sampler that subsamples indicies sequentially, making it easier to collate all results at the end. Even though we only use this sampler for eval and predict (no training), which means that the model params won't have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. """ def __init__(self, dataset, num_replicas=None, rank=None): if num_replicas is None: if not torch.distributed.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = torch.distributed.get_world_size() if rank is None: if not torch.distributed.is_available(): raise RuntimeError("Requires distributed package to be available") rank = torch.distributed.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas def __iter__(self): indices = list(range(len(self.dataset))) # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def checkeq(num, target_list, eps=1e-2): if type(target_list) == int: target_list = [target_list] for n in target_list: if abs(num - n) < eps: return True return False class RunningAverage(object): def __init__(self, alpha: float = 0.98): self.alpha = alpha self.reset() def reset(self): self._value = None def add(self, now_value): if self._value is None: self._value = now_value else: self._value = self._value * self.alpha + (1.0 - self.alpha) * now_value return self._value
1,471
8
195
688b0da582dadeec1c8665f0835e9c4fe7d30b74
1,176
py
Python
DepthFirstSearch.py
sprasadhpy/Python_Data_structures
c8058b67cd8be0a3198f97b727822c38f7a13df7
[ "MIT" ]
null
null
null
DepthFirstSearch.py
sprasadhpy/Python_Data_structures
c8058b67cd8be0a3198f97b727822c38f7a13df7
[ "MIT" ]
null
null
null
DepthFirstSearch.py
sprasadhpy/Python_Data_structures
c8058b67cd8be0a3198f97b727822c38f7a13df7
[ "MIT" ]
null
null
null
if __name__ == '__main__': # first we have to create the vertices (nodes) node1 = Node("A") node2 = Node("B") node3 = Node("C") node4 = Node("D") node5 = Node("E") # handle and set the neighbors accordingly node1.adjacency_list.append(node2) node1.adjacency_list.append(node3) node2.adjacency_list.append(node4) node4.adjacency_list.append(node5) # run the DFS depth_first_search(node1)
25.021277
79
0.62415
class Node: def __init__(self, name): self.name = name self.adjacency_list = [] self.visited = False def depth_first_search(start_node): # that we need a LIFO: last item we insert is the first one we take out stack = [start_node] # let's iterate until the stack becomes empty while stack: # the pop() function returns with the last item we have inserted - O(1) actual_node = stack.pop() actual_node.visited = True print(actual_node.name) for n in actual_node.adjacency_list: # if the node has not been visited so far if not n.visited: # insert the item into the stack stack.append(n) if __name__ == '__main__': # first we have to create the vertices (nodes) node1 = Node("A") node2 = Node("B") node3 = Node("C") node4 = Node("D") node5 = Node("E") # handle and set the neighbors accordingly node1.adjacency_list.append(node2) node1.adjacency_list.append(node3) node2.adjacency_list.append(node4) node4.adjacency_list.append(node5) # run the DFS depth_first_search(node1)
668
-10
73
6016e5097628258ac085efa94a0c5801a1ce49c5
706
py
Python
bin-seeds.py
maikthulhu/erl-matter
911c809712ef3e780b934e89d45237fa4aa8b2a7
[ "BSD-3-Clause" ]
63
2017-09-15T15:47:27.000Z
2022-03-30T21:29:42.000Z
bin-seeds.py
maikthulhu/erl-matter
911c809712ef3e780b934e89d45237fa4aa8b2a7
[ "BSD-3-Clause" ]
4
2020-07-20T00:31:29.000Z
2021-11-22T16:06:32.000Z
bin-seeds.py
maikthulhu/erl-matter
911c809712ef3e780b934e89d45237fa4aa8b2a7
[ "BSD-3-Clause" ]
10
2018-09-06T14:55:58.000Z
2022-03-26T09:10:01.000Z
#!/usr/bin/env python2.7 import sys import json data = load_seeds(sys.stdin) n = len(data) intervals = [] for (a, b, count) in make_bins(data): intervals.append({'start': a, 'stop': b, 'prob': (100.0*count)/n}) print(json.dumps(intervals))
21.393939
75
0.645892
#!/usr/bin/env python2.7 import sys import json def load_seeds(f): data = [int(x) for x in f.readlines()] return data def make_bins(data): _min = min(data) _max = max(data) _delta = _max-_min+1 _epsilon = 1000000 _bins = [0 for x in range(int(_delta//_epsilon) + 1)] for d in data: _bins[int((d-_min)//_epsilon)] += 1 indexes = sorted(range(len(_bins)), key=lambda x: _bins[x], reverse=True) for i in indexes: yield (_min+i*int(_epsilon), _min+(i+1)*int(_epsilon), _bins[i]) data = load_seeds(sys.stdin) n = len(data) intervals = [] for (a, b, count) in make_bins(data): intervals.append({'start': a, 'stop': b, 'prob': (100.0*count)/n}) print(json.dumps(intervals))
415
0
46
e2d9699f8770940f7a5e41871f86f0a2b79cb1a7
743
py
Python
examples/serializer.py
gorzechowski/flask-restly
54f28b66f35b0ab12ba4ee37bcd6d39aaf24111a
[ "MIT" ]
16
2018-10-16T20:07:02.000Z
2021-01-07T13:01:05.000Z
examples/serializer.py
gorzechowski/flask-restly
54f28b66f35b0ab12ba4ee37bcd6d39aaf24111a
[ "MIT" ]
16
2018-10-16T14:09:55.000Z
2020-01-16T07:52:22.000Z
examples/serializer.py
gorzechowski/flask-restly
54f28b66f35b0ab12ba4ee37bcd6d39aaf24111a
[ "MIT" ]
1
2019-04-17T03:20:41.000Z
2019-04-17T03:20:41.000Z
from flask import Flask from flask_restly import FlaskRestly from flask_restly.decorator import resource, get app = Flask(__name__) # json is default serializer # from flask_restly.serializer import json # app.config['RESTLY_SERIALIZER'] = json rest = FlaskRestly(app) rest.init_app(app) @resource(name='employees') with app.app_context(): EmployeesResource() if __name__ == "__main__": app.run(host='127.0.0.1', port=5001, debug=True)
21.228571
68
0.666218
from flask import Flask from flask_restly import FlaskRestly from flask_restly.decorator import resource, get app = Flask(__name__) # json is default serializer # from flask_restly.serializer import json # app.config['RESTLY_SERIALIZER'] = json rest = FlaskRestly(app) rest.init_app(app) @resource(name='employees') class EmployeesResource: @get('/<id>', serialize=lambda result, _: str(result.get('id'))) def get_employee(self, id): return dict(id=int(id)) @get('/') def get_employees(self): return dict(entites=[ dict(id=1), dict(id=2) ]) with app.app_context(): EmployeesResource() if __name__ == "__main__": app.run(host='127.0.0.1', port=5001, debug=True)
129
139
22
7c53c1f3c87c5a3e8d711d49440ac0b2f7015b4b
5,333
py
Python
anyioc/ioc_service_info.py
Cologler/anyioc-python
87aaf52208b8c510c9128f89359300a2158e3637
[ "MIT" ]
null
null
null
anyioc/ioc_service_info.py
Cologler/anyioc-python
87aaf52208b8c510c9128f89359300a2158e3637
[ "MIT" ]
null
null
null
anyioc/ioc_service_info.py
Cologler/anyioc-python
87aaf52208b8c510c9128f89359300a2158e3637
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) 2018~2999 - Cologler <skyoflw@gmail.com> # ---------- # # ---------- from abc import abstractmethod, ABC from enum import Enum from inspect import signature, Parameter from typing import Any from threading import RLock import inspect import contextlib from .symbols import Symbols from .utils import update_wrapper class ServiceInfo(IServiceInfo): '''generic `IServiceInfo`.''' __slots__ = ( '_key', '_lifetime', '_factory', # for not transient '_lock', # for singleton '_cache_value', '_service_provider', # options '_options', ) def _create(self, provider): ''' return the finally service instance. ''' service = self._factory(provider) if self._options['auto_enter']: wrapped = getattr(self._factory, '__anyioc_wrapped__', self._factory) if isinstance(wrapped, type) and hasattr(wrapped, '__enter__') and hasattr(wrapped, '__exit__'): service = provider.enter(service) return service class ProviderServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get current `ServiceProvider`.''' __slots__ = () class GetAttrServiceInfo(IServiceInfo): '''getattr from current `ServiceProvider`.''' __slots__ = ('_attr_info') class ValueServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get fixed value.''' __slots__ = ('_value') class GroupedServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get multi values as a tuple from keys list.''' __slots__ = ('_keys') class BindedServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get value from target key.''' __slots__ = ('_target_key') class CallerFrameServiceInfo(IServiceInfo): 'a `IServiceInfo` use for get caller frameinfo' __slots__ = ()
27.489691
108
0.617101
# -*- coding: utf-8 -*- # # Copyright (c) 2018~2999 - Cologler <skyoflw@gmail.com> # ---------- # # ---------- from abc import abstractmethod, ABC from enum import Enum from inspect import signature, Parameter from typing import Any from threading import RLock import inspect import contextlib from .symbols import Symbols from .utils import update_wrapper class LifeTime(Enum): transient = 0 scoped = 1 singleton = 2 class IServiceInfo(ABC): __slots__ = () @abstractmethod def get(self, provider) -> Any: raise NotImplementedError class ServiceInfo(IServiceInfo): '''generic `IServiceInfo`.''' __slots__ = ( '_key', '_lifetime', '_factory', # for not transient '_lock', # for singleton '_cache_value', '_service_provider', # options '_options', ) def __init__(self, service_provider, key, factory, lifetime): sign = signature(factory) if not sign.parameters: self._factory = update_wrapper(lambda _: factory(), factory) elif len(sign.parameters) == 1: arg_0 = list(sign.parameters.values())[0] if arg_0.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD): self._factory = factory elif arg_0.kind == Parameter.KEYWORD_ONLY: arg_0_name = arg_0.name self._factory = update_wrapper(lambda _arg: factory(**{arg_0_name: _arg}), factory) else: raise ValueError(f'unsupported factory signature: {sign}') else: raise TypeError('factory has too many parameters.') self._key = key self._lifetime = lifetime self._cache_value = None self._service_provider = service_provider self._options: dict = service_provider[Symbols.provider_options] if self._lifetime != LifeTime.transient: self._lock = RLock() else: self._lock = None if self._lifetime == LifeTime.singleton: # service_provider is required when lifetime == singleton assert self._service_provider is not None def get(self, provider): if self._lifetime is LifeTime.transient: return self._create(provider) if self._lifetime is LifeTime.scoped: return self._from_scoped(provider) if self._lifetime is LifeTime.singleton: return self._from_singleton() raise NotImplementedError(f'what is {self._lifetime}?') def _from_scoped(self, provider): cache: dict = provider[Symbols.cache] try: return cache[self] except KeyError: pass with self._lock: try: return cache[self] except KeyError: service = self._create(provider) cache[self] = service return service def _from_singleton(self): if self._cache_value is None: with self._lock: if self._cache_value is None: self._cache_value = (self._create(self._service_provider), ) return self._cache_value[0] def _create(self, provider): ''' return the finally service instance. ''' service = self._factory(provider) if self._options['auto_enter']: wrapped = getattr(self._factory, '__anyioc_wrapped__', self._factory) if isinstance(wrapped, type) and hasattr(wrapped, '__enter__') and hasattr(wrapped, '__exit__'): service = provider.enter(service) return service class ProviderServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get current `ServiceProvider`.''' __slots__ = () def get(self, provider): return provider class GetAttrServiceInfo(IServiceInfo): '''getattr from current `ServiceProvider`.''' __slots__ = ('_attr_info') def __init__(self, *attr_info: tuple): super().__init__() self._attr_info = attr_info def get(self, provider): return getattr(provider, *self._attr_info) class ValueServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get fixed value.''' __slots__ = ('_value') def __init__(self, value): self._value = value def get(self, provider): return self._value class GroupedServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get multi values as a tuple from keys list.''' __slots__ = ('_keys') def __init__(self, keys: list): self._keys = keys def get(self, provider): return tuple(provider[k] for k in self._keys) class BindedServiceInfo(IServiceInfo): '''a `IServiceInfo` use for get value from target key.''' __slots__ = ('_target_key') def __init__(self, target_key): self._target_key = target_key def get(self, provider): return provider[self._target_key] class CallerFrameServiceInfo(IServiceInfo): 'a `IServiceInfo` use for get caller frameinfo' __slots__ = () def get(self, _): frs = inspect.getouterframes(inspect.currentframe()) for fr in frs[2:]: mo = inspect.getmodule(fr.frame) if mo is None or mo.__name__.partition('.')[0] != 'anyioc': return fr
2,916
120
424
29818413c4e0e423211aa6c50ec9f07e237070d7
38
py
Python
ctzzy/__init__.py
zzylydx/ctzzy
d51b785f67acae8da84b646e15a8f89634af9445
[ "MIT" ]
null
null
null
ctzzy/__init__.py
zzylydx/ctzzy
d51b785f67acae8da84b646e15a8f89634af9445
[ "MIT" ]
null
null
null
ctzzy/__init__.py
zzylydx/ctzzy
d51b785f67acae8da84b646e15a8f89634af9445
[ "MIT" ]
null
null
null
from ctzzy._version import __version__
38
38
0.894737
from ctzzy._version import __version__
0
0
0
2b9be5e1f667665236c1af0380b6966278583201
1,927
py
Python
tests/testsniff.py
4788665/pcap-ct
dbedb178b82b1f17a371e7ae191074a1c8f870f1
[ "BSD-3-Clause" ]
17
2018-01-28T03:42:41.000Z
2022-01-30T01:47:54.000Z
tests/testsniff.py
4788665/pcap-ct
dbedb178b82b1f17a371e7ae191074a1c8f870f1
[ "BSD-3-Clause" ]
6
2019-11-22T23:06:34.000Z
2022-03-01T01:09:16.000Z
tests/testsniff.py
4788665/pcap-ct
dbedb178b82b1f17a371e7ae191074a1c8f870f1
[ "BSD-3-Clause" ]
3
2020-03-14T22:22:36.000Z
2022-03-01T00:53:53.000Z
#!/usr/bin/env python import getopt import sys import dpkt import pcap if __name__.rpartition(".")[-1] == "__main__": main()
24.0875
85
0.580695
#!/usr/bin/env python import getopt import sys import dpkt import pcap def usage(): sys.stderr.write('Usage: {} [-i device] [-l] [-n] [pattern]'.format(sys.argv[0])) sys.stderr.write(""" Options: \t-i device - Use the specific device. \t-l - Use pcap.loop() method. \t-n - Report timestamps in nanoseconds. Available devices:""") sys.stderr.write('\t' + '\n\t'.join(pcap.findalldevs())) sys.exit(1) def format_packet(ts, pkt, decode_fn): if isinstance(ts, float): msg = '%.6f %r' % (ts, decode_fn(pkt)) else: msg = '%d.%09d %r' % (ts // 1000000000, ts % 1000000000, decode_fn(pkt)) return msg def iter(pc, decode_fn): for ts, pkt in pc: print(format_packet(ts, pkt, decode_fn)) def loop(pc, decode_fn): def cb(ts, pkt, *args): print(format_packet(ts, pkt, decode_fn)) pc.loop(0, cb) def main(argv=sys.argv[1:]): opts, args = getopt.getopt(argv, 'i:hln') name = None use_loop = False timestamp_in_ns = False for o, a in opts: if o == '-i': name = a elif o == '-l': use_loop = True elif o == '-n': timestamp_in_ns = True else: usage() pc = pcap.pcap(name, timeout_ms=50, timestamp_in_ns=timestamp_in_ns) pc.setfilter(' '.join(args)) decode = { pcap.DLT_LOOP: dpkt.loopback.Loopback, pcap.DLT_NULL: dpkt.loopback.Loopback, pcap.DLT_EN10MB: dpkt.ethernet.Ethernet }[pc.datalink()] print('listening on {}: {}'.format(pc.name, pc.filter)) try: if use_loop: loop(pc, decode) else: iter(pc, decode) except KeyboardInterrupt: nrecv, ndrop, nifdrop = pc.stats() print('\n{:d} packets received by filter'.format(nrecv)) print('{:d} packets dropped by kernel'.format(ndrop)) if __name__.rpartition(".")[-1] == "__main__": main()
1,674
0
115
ab5108b4bd24a6bbd45ef080854fe71f7d639754
1,368
py
Python
microcosm_sagemaker/factories.py
globality-corp/microcosm-sagemaker
c112ea2c1f5c40c1973c292b73ca0fadbf461280
[ "Apache-2.0" ]
null
null
null
microcosm_sagemaker/factories.py
globality-corp/microcosm-sagemaker
c112ea2c1f5c40c1973c292b73ca0fadbf461280
[ "Apache-2.0" ]
15
2019-04-22T19:46:32.000Z
2022-02-11T17:31:43.000Z
microcosm_sagemaker/factories.py
globality-corp/microcosm-sagemaker
c112ea2c1f5c40c1973c292b73ca0fadbf461280
[ "Apache-2.0" ]
null
null
null
""" Consumer factories. """ from microcosm.api import defaults from microcosm.object_graph import ObjectGraph from microcosm_sagemaker.artifact import RootInputArtifact @defaults( perform_load=True, ) def load_active_bundle_and_dependencies(graph: ObjectGraph): """ Loads the active bundle and its dependencies immediately upon instantation. """ if not graph.config.load_active_bundle_and_dependencies.perform_load: return root_input_artifact = RootInputArtifact(graph.config.root_input_artifact_path) graph.bundle_and_dependencies_loader( bundle=graph.active_bundle, root_input_artifact=root_input_artifact, ) def configure_sagemaker(graph): """ Instantiates all the necessary sagemaker factories. """ graph.use( "active_bundle", "active_evaluation", "bundle_and_dependencies_loader", "bundle_and_dependencies_trainer", "random", "training_initializers", "experiment_metrics", )
24.428571
82
0.722953
""" Consumer factories. """ from microcosm.api import defaults from microcosm.object_graph import ObjectGraph from microcosm_sagemaker.artifact import RootInputArtifact def configure_active_bundle(graph): if not getattr(graph.config, "active_bundle", ""): return None return getattr(graph, graph.config.active_bundle) def configure_active_evaluation(graph): if not getattr(graph.config, "active_evaluation", ""): return None return getattr(graph, graph.config.active_evaluation) @defaults( perform_load=True, ) def load_active_bundle_and_dependencies(graph: ObjectGraph): """ Loads the active bundle and its dependencies immediately upon instantation. """ if not graph.config.load_active_bundle_and_dependencies.perform_load: return root_input_artifact = RootInputArtifact(graph.config.root_input_artifact_path) graph.bundle_and_dependencies_loader( bundle=graph.active_bundle, root_input_artifact=root_input_artifact, ) def configure_sagemaker(graph): """ Instantiates all the necessary sagemaker factories. """ graph.use( "active_bundle", "active_evaluation", "bundle_and_dependencies_loader", "bundle_and_dependencies_trainer", "random", "training_initializers", "experiment_metrics", )
298
0
46
36d73bf673396d4a70a41c5cc62df04a70906dc7
1,271
py
Python
brother-printer.py
c99koder/telegraf-brother-printer
ff7fada79f4e5fc6f41e43446f3c495e607f2c7f
[ "Apache-2.0" ]
null
null
null
brother-printer.py
c99koder/telegraf-brother-printer
ff7fada79f4e5fc6f41e43446f3c495e607f2c7f
[ "Apache-2.0" ]
null
null
null
brother-printer.py
c99koder/telegraf-brother-printer
ff7fada79f4e5fc6f41e43446f3c495e607f2c7f
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 # Copyright (C) 2021 Sam Steele # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio, json, sys from brother import Brother, SnmpError, UnsupportedModel loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close()
31
75
0.696302
#!/usr/bin/python3 # Copyright (C) 2021 Sam Steele # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio, json, sys from brother import Brother, SnmpError, UnsupportedModel async def main(): if len(sys.argv) < 2: print(f"Usage: {sys.argv[0]} <hostname> [laser | ink]") sys.exit() host = sys.argv[1] kind = sys.argv[2] if len(sys.argv) > 2 else "laser" brother = Brother(host, kind=kind) try: await brother.async_update() except (ConnectionError, SnmpError, UnsupportedModel) as error: print(f"{error}") return brother.shutdown() if brother.available: print(json.dumps(brother.data, default=str)) loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close()
484
0
23
49580ae540eae9c0df2ff0b69fb3a765f77e4ff4
4,415
py
Python
spectrocrunch/io/tests/test_excel.py
woutdenolf/spectrocrunch
fde4b6e0f462f464ce7af6a942b355d3d8f39f77
[ "MIT" ]
3
2018-04-16T15:51:36.000Z
2019-12-16T11:21:05.000Z
spectrocrunch/io/tests/test_excel.py
woutdenolf/spectrocrunch
fde4b6e0f462f464ce7af6a942b355d3d8f39f77
[ "MIT" ]
null
null
null
spectrocrunch/io/tests/test_excel.py
woutdenolf/spectrocrunch
fde4b6e0f462f464ce7af6a942b355d3d8f39f77
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import unittest import numpy as np import pandas as pd import random from collections import OrderedDict from testfixtures import TempDirectory import os from .. import excel def test_suite(): """Test suite including all test suites""" testSuite = unittest.TestSuite() testSuite.addTest(test_excel("test_dataframe")) testSuite.addTest(test_excel("test_save")) return testSuite if __name__ == "__main__": import sys mysuite = test_suite() runner = unittest.TextTestRunner() if not runner.run(mysuite).wasSuccessful(): sys.exit(1)
32.226277
86
0.521631
# -*- coding: utf-8 -*- import unittest import numpy as np import pandas as pd import random from collections import OrderedDict from testfixtures import TempDirectory import os from .. import excel class test_excel(unittest.TestCase): def setUp(self): self.dir = TempDirectory() self.nrow, self.ncol = 8, 6 self._rows = [chr(c + ord("a")) for c in range(0, self.nrow)] self._columns = [chr(c + ord("a")) for c in range(13, 13 + self.ncol)] def rows(self, n=None): rows = [chr(c + ord("a")) for c in range(0, self.nrow)] if n: np.random.shuffle(rows) rows = rows[:n] return rows def columns(self, n=None): columns = [chr(c + ord("a")) for c in range(13, 13 + self.ncol)] if n: np.random.shuffle(columns) columns = columns[:n] return columns def tearDown(self): self.dir.cleanup() def _generate_df(self, writer=None): data = np.random.random(self.nrow * self.ncol).reshape((self.nrow, self.ncol)) priority = random.choice(list(excel.DataFrame.priorities)) df = excel.DataFrame( writer=writer, data=data, columns=self.columns(), index=self.rows(), priority=priority, ) for i in range(5): m = random.choice([0, 1, 2]) rows = self.rows(4) columns = self.columns(4) if m == 2: df.addrow_formula(rows[0], "({}+{}+{})/3.", rows[1:]) elif m == 1: df.addcolumn_formula(columns[0], "({}+{}+{})/3.", columns[1:]) else: df.addcell_formula( rows[0], columns[0], "({}+{}+{})/3.", rows[1:], columns[1:] ) return df def test_save(self): filename = os.path.join(self.dir.path, "test.xlsx") with excel.Writer(filename) as writer: df1 = self._generate_df(writer=writer) df1.save() df2 = next(iter(excel.DataFrame.fromexcel(filename, index_col=0).values())) np.testing.assert_array_almost_equal(df1.df.values, df2.df.values) def test_dataframe(self): data = np.arange(self.nrow * self.ncol).reshape((self.nrow, self.ncol)) rows = self.rows() columns = self.columns() for equal in [True, False]: df2 = pd.DataFrame(data=data, columns=columns, index=rows) if equal: priority = excel.DataFrame.priorities.column else: priority = excel.DataFrame.priorities.row df1 = excel.DataFrame(columns=columns, index=rows, priority=priority) for index, row in zip(rows, data): df1.addrow(index, row) for i in [2, 3]: j, k = i - 2, i - 1 df1.addrow_formula(rows[i], "({}+{})/2.", [rows[j], rows[k]]) df2.loc[rows[i]] = (df2.loc[rows[j]] + df2.loc[rows[k]]) / 2.0 for i in [2, 3]: j, k = i - 2, i - 1 df1.addcolumn_formula( columns[i], "({}+{})/3.", [columns[j], columns[k]] ) df2[columns[i]] = (df2[columns[j]] + df2[columns[k]]) / 3.0 i = 2 df1.addcell_formula( rows[i], columns[i], "{}", rows[self.nrow - 1], columns[self.ncol - 1] ) df2.loc[rows[i], columns[i]] = df2.loc[ rows[self.nrow - 1], columns[self.ncol - 1] ] # dfequal = np.array_equal(df1.df.values,df2.values) # dfequal = df1.df.equals(df2) if equal: np.testing.assert_array_equal(df1.df.values, df2.values) else: np.testing.assert_raises( AssertionError, np.testing.assert_array_equal, df1.df.values, df2.values, ) def test_suite(): """Test suite including all test suites""" testSuite = unittest.TestSuite() testSuite.addTest(test_excel("test_dataframe")) testSuite.addTest(test_excel("test_save")) return testSuite if __name__ == "__main__": import sys mysuite = test_suite() runner = unittest.TextTestRunner() if not runner.run(mysuite).wasSuccessful(): sys.exit(1)
3,584
15
211
f0340e9b622398ae8fb557f894bad5796c9ce19d
4,814
py
Python
examples/B_basic_platform/bsp03_1plat4gw_noAuth_Demo/demonstrator/potentiometer/pott1.py
N5GEH/n5geh.services.rvk_simulator
4a2c2882ddfedd7fcb260be3a86b9088f29b17c6
[ "MIT" ]
null
null
null
examples/B_basic_platform/bsp03_1plat4gw_noAuth_Demo/demonstrator/potentiometer/pott1.py
N5GEH/n5geh.services.rvk_simulator
4a2c2882ddfedd7fcb260be3a86b9088f29b17c6
[ "MIT" ]
null
null
null
examples/B_basic_platform/bsp03_1plat4gw_noAuth_Demo/demonstrator/potentiometer/pott1.py
N5GEH/n5geh.services.rvk_simulator
4a2c2882ddfedd7fcb260be3a86b9088f29b17c6
[ "MIT" ]
null
null
null
import paho.mqtt.client as mqtt import ssl import json import utils #======================================================================= # program that replaces te physical potentiometer # it sends a value between 0 and 255 that emulates the signal from potentiometer # to the topic sub_topic defined in the general section of this file # demo1.py listens on this topic and uses the sent value to control the rest of simulation #======================================================================= # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ================================================================== # ================================================================== # ================================================================== # ================================================================== # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- #####mqtt_username = "rvksim" #####mqtt_password = "7TvpyxEQBsMHSHaF7E8m6Hd7Sux2mpQx" #####mqtt_endpoint = "fiware.n5geh.de" #####mqtt_endpoint = "127.0.0.1" #####mqtt_port = 1026 #####TLS_CONNECTION = True #####TLS_CONNECTION = False #####ACT_AUTH = False ##### #####sub_topic = "/rvksim/lastgang/cmd" #####pub_topic = "/rvksim/lastgang/attrs" ##### #####filepath = "./static_lastgang.dat" ##### ###### persistent data object #####data = {} if __name__ == "__main__": main()
40.116667
141
0.585584
import paho.mqtt.client as mqtt import ssl import json import utils #======================================================================= # program that replaces te physical potentiometer # it sends a value between 0 and 255 that emulates the signal from potentiometer # to the topic sub_topic defined in the general section of this file # demo1.py listens on this topic and uses the sent value to control the rest of simulation #======================================================================= # ---------------------------------------------------------------------- def initialize_components(config_file): mqtt_broker = config_file['calculation']['platform_mode']['mqtt_broker'] mqtt_port_nr = config_file['calculation']['platform_mode']['mqtt_port_nr'] # authentication authentication = config_file['calculation']['demonstrator_mode']['authentication']['activate'] mqtt_username = config_file['calculation']['demonstrator_mode']['authentication']['mqtt_username'] mqtt_password = config_file['calculation']['demonstrator_mode']['authentication']['mqtt_password'] tls_connection = config_file['calculation']['demonstrator_mode']['authentication']['tls_connection'] sub_topic = config_file['calculation']['demonstrator_mode']['mqtt_topic_pott_sub'] return (authentication, mqtt_username, mqtt_password, tls_connection, mqtt_broker, mqtt_port_nr, sub_topic) # ---------------------------------------------------------------------- def create_mqtt_client(broker, port_nr, client_name, authentication, mqtt_username, mqtt_password, tls_connection, mytag): print('{} create client {}'.format(mytag, client_name)) client = mqtt.Client(client_name) client.on_connect = on_connect client.on_message = on_message client.on_publish = on_publish client.on_disconnect = on_disconnect print('{} connect client {} to broker'.format(mytag, client_name)) if(authentication): client.username_pw_set(mqtt_username, password=mqtt_password) if tls_connection: client.tls_set(tls_version=ssl.PROTOCOL_TLSv1_2) client.tls_insecure_set(False) client.connect(broker, port=port_nr, keepalive=60, bind_address="") # connect return client # end create_mqtt_client # ---------------------------------------------------------------------- # ================================================================== def on_message(client, userdata, message): print('\nDEMO ON MESSAGE') # end on_message # ================================================================== def on_connect(client, userdata, flags, rc): print('ON CONNECT') if rc == 0: client.connected_flag = True else: print('Bad connection returned code {}'.format(rc)) client.loop_stop() # end on_connect # ================================================================== def on_disconnect(client, userdata, rc): print('client has disconnected') # end on_disconnect # ================================================================== def on_publish(client, userdata, message): print("ON PUBLISH") #print("received message =", str(message.payload.decode("utf-8"))) # end on_publish # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- #####mqtt_username = "rvksim" #####mqtt_password = "7TvpyxEQBsMHSHaF7E8m6Hd7Sux2mpQx" #####mqtt_endpoint = "fiware.n5geh.de" #####mqtt_endpoint = "127.0.0.1" #####mqtt_port = 1026 #####TLS_CONNECTION = True #####TLS_CONNECTION = False #####ACT_AUTH = False ##### #####sub_topic = "/rvksim/lastgang/cmd" #####pub_topic = "/rvksim/lastgang/attrs" ##### #####filepath = "./static_lastgang.dat" ##### ###### persistent data object #####data = {} def main(): mytag = "potenTIOmeter" config_file_path = "./config.json" config_file = utils.check_and_open_json_file(config_file_path) (authentication, mqtt_username, mqtt_password, tls_connection, mqtt_broker, mqtt_port_nr, sub_topic) = initialize_components(config_file) # listen to rvk - initialize mqtt subscription - read times of every data set mqtt_client_name = 'Potentiometer_1' mqtt_client = create_mqtt_client(mqtt_broker, mqtt_port_nr, mqtt_client_name, authentication, mqtt_username, mqtt_password, tls_connection, mytag) myvalue = 127.5 myvalue = 0.0 myvalue = 255 print('myvalue = {}'.format(myvalue)) payload = json.dumps({"offset": myvalue}) mqtt_client.publish(sub_topic, payload) print('{} published {} at topic {}'.format(mqtt_client_name, payload, sub_topic)) if __name__ == "__main__": main()
2,944
0
161
0421b42e9cdf39350afeaa5ecfd3fccbeb7ed91b
4,545
py
Python
synmod/aggregators.py
Craven-Biostat-Lab/synmod
433e2f1726e68acbc45f226b1235f15508156de1
[ "MIT" ]
null
null
null
synmod/aggregators.py
Craven-Biostat-Lab/synmod
433e2f1726e68acbc45f226b1235f15508156de1
[ "MIT" ]
null
null
null
synmod/aggregators.py
Craven-Biostat-Lab/synmod
433e2f1726e68acbc45f226b1235f15508156de1
[ "MIT" ]
null
null
null
"""Feature temporal aggregation functions""" from abc import ABC import numpy as np class Aggregator(): """Aggregates temporal values""" def update_statistics(self, instances): """Identify statistics to standardize each feature""" for fidx, _ in enumerate(self._aggregation_fns): left, right = self._windows[fidx] vec = self.operate_on_feature(fidx, instances[:, fidx, left: right + 1]) self._means[fidx] = np.mean(vec) self._stds[fidx] = np.std(vec) if self._stds[fidx] < 1e-10: # FIXME: features can pass the variance test earlier but fail it here, since the samples used are different self._stds[fidx] = 1 def operate_on_feature(self, fidx, sequences): """Operate on sequences for given feature""" return (self._aggregation_fns[fidx].operate(sequences) - self._means[fidx]) / self._stds[fidx] # sequences: instances X timesteps def operate(self, sequences): """Apply feature-wise operations to sequence data""" # TODO: when perturbing a feature, other values do not need to be recomputed. # But this seems unavoidable under the current design (analysis only calls model.predict, doesn't provide other info) num_instances, num_features, _ = sequences.shape # sequences: instances X features X timesteps matrix = np.zeros((num_instances, num_features)) for fidx in range(num_features): (left, right) = self._windows[fidx] matrix[:, fidx] = self.operate_on_feature(fidx, sequences[:, fidx, left: right + 1]) return matrix class TabularAggregator(Aggregator): """Returns input as-is without aggregation (for tabular features)""" class AggregationFunction(ABC): """Aggregation function base class""" NONLINEARITY_OPERATORS = [lambda x: x, np.abs, np.square] def operate(self, sequences): """Operate on sequences for given feature""" return self._nonlinearity_operator(np.apply_along_axis(self._sequence_operator, 1, sequences)) # sequences: instances X timesteps class Max(AggregationFunction): """Computes max of inputs""" class Average(AggregationFunction): """Computes average of inputs""" class MonotonicWeightedAverage(AggregationFunction): """Computes weighted average of inputs with monotically increasing weights""" class RandomWeightedAverage(AggregationFunction): """Computes weighted average of inputs with random weights""" AGGREGATION_OPERATORS = [Max, Average, MonotonicWeightedAverage, RandomWeightedAverage] def get_aggregation_fn_cls(rng): """Sample aggregation function for feature""" return rng.choice(AGGREGATION_OPERATORS)
39.868421
138
0.682948
"""Feature temporal aggregation functions""" from abc import ABC import numpy as np class Aggregator(): """Aggregates temporal values""" def __init__(self, aggregation_fns, windows, instances=None, standardize_features=False): self._aggregation_fns = aggregation_fns # Temporal aggregation functions for all features self._windows = windows # Windows over time for all features (list of tuples) if instances is None: return # Identify statistics to standardize each feature num_features = len(self._aggregation_fns) self._means = np.zeros(num_features) self._stds = np.ones(num_features) if standardize_features: self.update_statistics(instances) def update_statistics(self, instances): """Identify statistics to standardize each feature""" for fidx, _ in enumerate(self._aggregation_fns): left, right = self._windows[fidx] vec = self.operate_on_feature(fidx, instances[:, fidx, left: right + 1]) self._means[fidx] = np.mean(vec) self._stds[fidx] = np.std(vec) if self._stds[fidx] < 1e-10: # FIXME: features can pass the variance test earlier but fail it here, since the samples used are different self._stds[fidx] = 1 def operate_on_feature(self, fidx, sequences): """Operate on sequences for given feature""" return (self._aggregation_fns[fidx].operate(sequences) - self._means[fidx]) / self._stds[fidx] # sequences: instances X timesteps def operate(self, sequences): """Apply feature-wise operations to sequence data""" # TODO: when perturbing a feature, other values do not need to be recomputed. # But this seems unavoidable under the current design (analysis only calls model.predict, doesn't provide other info) num_instances, num_features, _ = sequences.shape # sequences: instances X features X timesteps matrix = np.zeros((num_instances, num_features)) for fidx in range(num_features): (left, right) = self._windows[fidx] matrix[:, fidx] = self.operate_on_feature(fidx, sequences[:, fidx, left: right + 1]) return matrix class TabularAggregator(Aggregator): """Returns input as-is without aggregation (for tabular features)""" def __init__(self): super().__init__(None, None) def operate(self, sequences): return sequences class AggregationFunction(ABC): """Aggregation function base class""" NONLINEARITY_OPERATORS = [lambda x: x, np.abs, np.square] def __init__(self, rng, window): self._nonlinearity_operator = rng.choice(AggregationFunction.NONLINEARITY_OPERATORS) self._window = window self._sequence_operator = None self.ordering_important = False def operate(self, sequences): """Operate on sequences for given feature""" return self._nonlinearity_operator(np.apply_along_axis(self._sequence_operator, 1, sequences)) # sequences: instances X timesteps class Max(AggregationFunction): """Computes max of inputs""" def __init__(self, rng, window): super().__init__(rng, window) self._sequence_operator = np.max class Average(AggregationFunction): """Computes average of inputs""" def __init__(self, rng, window): super().__init__(rng, window) self._sequence_operator = np.average class MonotonicWeightedAverage(AggregationFunction): """Computes weighted average of inputs with monotically increasing weights""" def __init__(self, rng, window): super().__init__(rng, window) window_size = window[1] - window[0] + 1 weights = np.linspace(1, 2, window_size) self._sequence_operator = lambda seq: seq.dot(weights) self.ordering_important = window_size > 1 class RandomWeightedAverage(AggregationFunction): """Computes weighted average of inputs with random weights""" def __init__(self, rng, window): super().__init__(rng, window) window_size = window[1] - window[0] + 1 weights = np.linspace(1, 2, window_size) rng.shuffle(weights) self._sequence_operator = lambda seq: seq.dot(weights) self.ordering_important = window_size > 1 AGGREGATION_OPERATORS = [Max, Average, MonotonicWeightedAverage, RandomWeightedAverage] def get_aggregation_fn_cls(rng): """Sample aggregation function for feature""" return rng.choice(AGGREGATION_OPERATORS)
1,590
0
210
a83c92714df8528ee549b10de22f772dd1b8e28d
2,611
py
Python
attic/rojo_scripts/pull_charlesreid1.py
charlesreid1/debian-dotfiles
eb94148598665bf9bc7dd9534c28d90a594ab590
[ "MIT" ]
2
2021-03-08T01:13:01.000Z
2021-07-26T09:20:35.000Z
dotfiles/rojo_scripts/pull_charlesreid1.py
charlesreid1/debian-dotfiles
eb94148598665bf9bc7dd9534c28d90a594ab590
[ "MIT" ]
null
null
null
dotfiles/rojo_scripts/pull_charlesreid1.py
charlesreid1/debian-dotfiles
eb94148598665bf9bc7dd9534c28d90a594ab590
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import getpass import subprocess import glob import time import os import re import socket from datetime import datetime from pprint import pprint """ Pull Charlesreid1.com - Rojo This script pulls the latest version of charlesreid1.com source. """ if __name__=="__main__": host = socket.gethostname() user = getpass.getuser() if(host!="rojo"): print("You aren't on rojo - you probably didn't mean to run this script!") elif(user!="charles"): print("You aren't charles - you should run this script as charles!") else: pull()
27.197917
97
0.617388
#!/usr/bin/python3 import getpass import subprocess import glob import time import os import re import socket from datetime import datetime from pprint import pprint """ Pull Charlesreid1.com - Rojo This script pulls the latest version of charlesreid1.com source. """ def extract_output(cmd, cwd): result = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE).stdout.read().decode('utf-8') return result def pull(): htdocs_dir = "/www/charlesreid1.com/htdocs" log_dir = "/home/charles/.logs/pull_charlesreid1" theme_dir = "/home/charles/codes/charlesreid1/charlesreid1.com-theme" src_dir = "/www/charlesreid1.com/charlesreid1-src" pelican_dir = src_dir+"/pelican" output_dir = pelican_dir + "/output" try: output = "" # ensure log dir exists mkdircmd = ["mkdir","-p",log_dir] output += extract_output(mkdircmd, "/") # update theme themepullcmd = ["git","pull","origin","master"] output += extract_output(themepullcmd, theme_dir) pelicanthemescmd = ["pelican-themes","-U",theme_dir] output += extract_output(pelicanthemescmd, "/") # make site gitpullcmd = ["git","pull","origin","charlesreid1-src"] output += extract_output(gitpullcmd, src_dir) pelicancmd = ["pelican","content"] output += extract_output(pelicancmd, pelican_dir) everything = glob.glob(output_dir+"/*") cpcmd = ["/bin/cp","-r",*everything,htdocs_dir+"/."] output += extract_output(cpcmd, output_dir) now = datetime.now() day = now.date().isoformat() hr = re.sub(":","-",now.time().isoformat()[0:8]) timestamp = day + "_" + hr logfile = log_dir+"/SUCCESS_"+timestamp+".log" touchcmd = ["touch",logfile] subprocess.call(touchcmd) except subprocess.CalledProcessError: print("Encountered error: logging to %s"%logfile) now = datetime.now() day = now.date().isoformat() hr = re.sub(":","-",now.time().isoformat()[0:8]) timestamp = day + "_" + hr print("Dumping out results") print(len(output)) logfile = log_dir+"/FAIL_"+timestamp+".log" with open(logfile,'w') as f: f.write(output) if __name__=="__main__": host = socket.gethostname() user = getpass.getuser() if(host!="rojo"): print("You aren't on rojo - you probably didn't mean to run this script!") elif(user!="charles"): print("You aren't charles - you should run this script as charles!") else: pull()
1,967
0
46
8f341c0710c4095ba94bf4daadfe0f4e5d54d280
18
py
Python
test-data/python/editor/codeCompletion/emptyCompletions.py
kiteco/intellij-plugin
dc1361bb3da33f4cc2e2b7ea8aeb9fa4c3b6e0ab
[ "BSD-3-Clause" ]
3
2021-11-16T17:01:23.000Z
2021-12-03T21:18:37.000Z
test-data/python/editor/codeCompletion/emptyCompletions.py
kiteco/intellij-plugin
dc1361bb3da33f4cc2e2b7ea8aeb9fa4c3b6e0ab
[ "BSD-3-Clause" ]
null
null
null
test-data/python/editor/codeCompletion/emptyCompletions.py
kiteco/intellij-plugin
dc1361bb3da33f4cc2e2b7ea8aeb9fa4c3b6e0ab
[ "BSD-3-Clause" ]
2
2022-01-18T20:41:13.000Z
2022-02-23T08:17:04.000Z
a1=1 a2=1+a<caret>
9
13
0.666667
a1=1 a2=1+a<caret>
0
0
0
74aee2a3f6534452e177d21eddc3fb66b9dd5746
2,294
py
Python
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/Games4Kids/MyGamesEventsComponent.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
12
2015-10-20T10:22:01.000Z
2021-07-19T10:09:44.000Z
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/Games4Kids/MyGamesEventsComponent.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
2
2015-10-20T10:22:55.000Z
2017-02-13T11:05:25.000Z
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/Games4Kids/MyGamesEventsComponent.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
6
2015-03-09T12:51:59.000Z
2020-03-01T13:06:21.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- from Kamaelia.UI.Pygame.EventHandler import EventHandler from Axon.Component import component import pygame from Kamaelia.UI.Pygame.KeyEvent import KeyEvent
41.709091
78
0.564952
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- from Kamaelia.UI.Pygame.EventHandler import EventHandler from Axon.Component import component import pygame from Kamaelia.UI.Pygame.KeyEvent import KeyEvent def MyGamesEventsComponent(up="p", down="l", left="a", right="s"): if len(left)>1: left = left.upper() else: left = left.lower() if len(right)>1: right = right.upper() else: right = right.lower() if len(up)>1: up = up.upper() else: up = up.lower() if len(down)>1: down = down.upper() else: down = down.lower() return KeyEvent(outboxes = { "outbox" : "Normal place for message", "signal" : "Normal place for message", }, key_events = { eval("pygame.K_"+up): ("start_up", "outbox"), eval("pygame.K_"+down): ("start_down", "outbox"), eval("pygame.K_"+left): ("start_left", "outbox"), eval("pygame.K_"+right): ("start_right", "outbox"), }, key_up_events = { eval("pygame.K_"+up): ("stop_up", "outbox"), eval("pygame.K_"+down): ("stop_down", "outbox"), eval("pygame.K_"+left): ("stop_left", "outbox"), eval("pygame.K_"+right): ("stop_right", "outbox"), } )
1,211
0
22
380c2cf00245ccc7b89a78072a936330c9023bbf
652
py
Python
problems/exercism/book-store/book_store.py
JayMonari/py-personal
ef16d030cc7fe2266d661e1378d95f588229b746
[ "MIT" ]
null
null
null
problems/exercism/book-store/book_store.py
JayMonari/py-personal
ef16d030cc7fe2266d661e1378d95f588229b746
[ "MIT" ]
null
null
null
problems/exercism/book-store/book_store.py
JayMonari/py-personal
ef16d030cc7fe2266d661e1378d95f588229b746
[ "MIT" ]
null
null
null
from typing import List PRICES = { 1: 800, 2: 1520, 3: 2160, 4: 2560, 5: 3000, }
19.176471
50
0.578221
from typing import List PRICES = { 1: 800, 2: 1520, 3: 2160, 4: 2560, 5: 3000, } def find_sets(basket: List[int]) -> List[int]: copy = basket.copy() sets = [] uniq = set(copy) while copy: sets.append(len(uniq)) for b in uniq: copy.remove(b) uniq = set(copy) return sets def total(basket: List[int]) -> int: set_lengths = find_sets(basket) # Amazing optimizations!!! while 3 in set_lengths and 5 in set_lengths: set_lengths.remove(3) set_lengths.remove(5) set_lengths.extend([4, 4]) return sum(PRICES[set] for set in set_lengths)
501
0
46
330e5b10c753cb624566baa31391572cd4ef88de
3,877
py
Python
architecture/model.py
Xenovortex/INN_Embedding_Classification
df31ec3dcf70780cae5140a69ffafdd64f218e5f
[ "MIT" ]
null
null
null
architecture/model.py
Xenovortex/INN_Embedding_Classification
df31ec3dcf70780cae5140a69ffafdd64f218e5f
[ "MIT" ]
null
null
null
architecture/model.py
Xenovortex/INN_Embedding_Classification
df31ec3dcf70780cae5140a69ffafdd64f218e5f
[ "MIT" ]
null
null
null
from FrEIA import framework as fr from FrEIA import modules as la import torch import torchvision.models as models import torch.nn as nn import numpy as np def get_vgg16(): """ Get features from pretrained VGG16 model. :return: partial VGG16 model, which takes ImageNet images and return feature0 """ vgg = models.vgg16(pretrained=True) vgg_feature = vgg.features[:17] return vgg_feature def inn_model(img_dims=4): """ Return INN model. :param img_dims: size of the model input images. Default: Size of MNIST images :return: INN model """ inp = fr.InputNode(img_dims, name='input') fc1 = fr.Node([inp.out0], la.GLOWCouplingBlock, {'subnet_constructor': fc_constr, 'clamp': 2}, name='fc1') fc2 = fr.Node([fc1.out0], la.GLOWCouplingBlock, {'subnet_constructor': fc_constr, 'clamp': 2}, name='fc2') fc3 = fr.Node([fc2.out0], la.GLOWCouplingBlock, {'subnet_constructor': fc_constr, 'clamp': 2}, name='fc3') outp = fr.OutputNode([fc3.out0], name='output') nodes = [inp, outp, fc1, fc2, fc3] model = fr.ReversibleGraphNet(nodes) return model fc_width = 512 conv_width = 256 n_coupling_blocks_fc = 12 n_coupling_blocks_conv_0 = 2 n_coupling_blocks_conv_1 = 2 clamp = 2.0
30.769841
138
0.653598
from FrEIA import framework as fr from FrEIA import modules as la import torch import torchvision.models as models import torch.nn as nn import numpy as np def get_vgg16(): """ Get features from pretrained VGG16 model. :return: partial VGG16 model, which takes ImageNet images and return feature0 """ vgg = models.vgg16(pretrained=True) vgg_feature = vgg.features[:17] return vgg_feature def fc_constr(c_in, c_out): net = [nn.Linear(c_in, 2048), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(2048, c_out), ] net[-1].weight.data *= 0.1 return nn.Sequential(*net) def inn_model(img_dims=4): """ Return INN model. :param img_dims: size of the model input images. Default: Size of MNIST images :return: INN model """ inp = fr.InputNode(img_dims, name='input') fc1 = fr.Node([inp.out0], la.GLOWCouplingBlock, {'subnet_constructor': fc_constr, 'clamp': 2}, name='fc1') fc2 = fr.Node([fc1.out0], la.GLOWCouplingBlock, {'subnet_constructor': fc_constr, 'clamp': 2}, name='fc2') fc3 = fr.Node([fc2.out0], la.GLOWCouplingBlock, {'subnet_constructor': fc_constr, 'clamp': 2}, name='fc3') outp = fr.OutputNode([fc3.out0], name='output') nodes = [inp, outp, fc1, fc2, fc3] model = fr.ReversibleGraphNet(nodes) return model fc_width = 512 conv_width = 256 n_coupling_blocks_fc = 12 n_coupling_blocks_conv_0 = 2 n_coupling_blocks_conv_1 = 2 clamp = 2.0 def conv_constr_0(c_in, c_out): net = [nn.Conv2d(c_in, conv_width//2, 3, padding=1), nn.BatchNorm2d(conv_width//2, track_running_stats=False), nn.ReLU(), nn.Conv2d(conv_width//2, c_out, 3, padding=1)] net[-1].weight.data *= 0.003 return nn.Sequential(*net) def conv_constr_1(c_in, c_out): net = [nn.Conv2d(c_in, conv_width, 3, padding=1), nn.BatchNorm2d(conv_width, track_running_stats=False), nn.Dropout2d(p=0.2), nn.ReLU(), nn.Conv2d(conv_width, c_out, 3, padding=1)] net[-1].weight.data *= 0.003 return nn.Sequential(*net) def fc_constr(c_in, c_out): net = [nn.Linear(c_in, fc_width), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(fc_width, c_out), ] net[-1].weight.data *= 0.1 return nn.Sequential(*net) def random_orthog(n): w = np.zeros((n,n)) for i,j in enumerate(np.random.permutation(n)): w[i,j] = 0.25 return torch.FloatTensor(w) def constuct_inn(img_dims=[3, 32, 32]): split_nodes = [] nodes = [fr.InputNode(*img_dims, name='input')] nodes.append(fr.Node(nodes[-1].out0, la.Reshape, {'target_dim':(img_dims[0], *img_dims[1:])}, name='reshape')) nodes.append(fr.Node(nodes[-1].out0, la.HaarDownsampling, {'rebalance':0.5}, name='haar_down_1')) for k in range(n_coupling_blocks_conv_0): nodes.append(fr.Node(nodes[-1], la.GLOWCouplingBlock, {'subnet_constructor':conv_constr_0, 'clamp':clamp}, name=f'CB CONV_0_{k}')) if k%2: nodes.append(fr.Node(nodes[-1], la.Fixed1x1Conv, {'M':random_orthog(12)})) nodes.append(fr.Node(nodes[-1].out0, la.HaarDownsampling, {'rebalance':0.5}, name='haar_down_2')) for k in range(n_coupling_blocks_conv_1): nodes.append(fr.Node(nodes[-1], la.GLOWCouplingBlock, {'subnet_constructor':conv_constr_1, 'clamp':clamp}, name=f'CB CONV_1_{k}')) if k%2: nodes.append(fr.Node(nodes[-1], la.Fixed1x1Conv, {'M':random_orthog(48)})) nodes.append(fr.Node(nodes[-1].out0, la.Flatten, {}, name='flatten')) for k in range(n_coupling_blocks_fc): nodes.append(fr.Node(nodes[-1], la.GLOWCouplingBlock, {'subnet_constructor':fc_constr, 'clamp':clamp}, name=f'CB FC_{k}')) if k%2: nodes.append(fr.Node(nodes[-1], la.PermuteRandom, {'seed':k})) nodes.append(fr.OutputNode(nodes[-1], name='output')) return fr.ReversibleGraphNet(nodes)
2,471
0
138
55b85aac450f05367cce62caf658be6a08c09028
3,165
py
Python
services/web/server/src/simcore_service_webserver/application_proxy.py
oetiker/osparc-simcore
00918bf8f000840cc70cc49458780a55858d52ea
[ "MIT" ]
null
null
null
services/web/server/src/simcore_service_webserver/application_proxy.py
oetiker/osparc-simcore
00918bf8f000840cc70cc49458780a55858d52ea
[ "MIT" ]
2
2018-05-13T09:10:57.000Z
2019-03-06T08:10:40.000Z
services/web/server/src/simcore_service_webserver/application_proxy.py
oetiker/osparc-simcore
00918bf8f000840cc70cc49458780a55858d52ea
[ "MIT" ]
null
null
null
""" Sets up reverse proxy in the application - app's reverse proxy dynamically reroutes communication between web-server's client and dynamic-backend services (or dyb's) - couples director with reverse_proxy subsystems Use case - All requests to `/x/{serviceId}/{proxyPath}` are re-routed to a dyb service - dy-services are managed by the director service who monitors and controls its lifetime """ import logging import os import attr from aiohttp import web from aiohttp.client import ClientSession from yarl import URL from servicelib.rest_responses import unwrap_envelope from .director.config import APP_DIRECTOR_API_KEY from .reverse_proxy import setup_reverse_proxy from .reverse_proxy.abc import ServiceResolutionPolicy THIS_CLIENT_SESSION = __name__ + ".session" logger = logging.getLogger(__name__) @attr.s(auto_attribs=True) # alias setup_app_proxy = setup __all__ = ( 'setup_app_proxy' )
31.336634
111
0.688468
""" Sets up reverse proxy in the application - app's reverse proxy dynamically reroutes communication between web-server's client and dynamic-backend services (or dyb's) - couples director with reverse_proxy subsystems Use case - All requests to `/x/{serviceId}/{proxyPath}` are re-routed to a dyb service - dy-services are managed by the director service who monitors and controls its lifetime """ import logging import os import attr from aiohttp import web from aiohttp.client import ClientSession from yarl import URL from servicelib.rest_responses import unwrap_envelope from .director.config import APP_DIRECTOR_API_KEY from .reverse_proxy import setup_reverse_proxy from .reverse_proxy.abc import ServiceResolutionPolicy THIS_CLIENT_SESSION = __name__ + ".session" logger = logging.getLogger(__name__) @attr.s(auto_attribs=True) class ServiceMonitor(ServiceResolutionPolicy): app: web.Application base_url: URL @property def session(self): return self.app[THIS_CLIENT_SESSION] async def _request_info(self, service_identifier: str): data = {} url = self.base_url / ("running_interactive_services/%s" % service_identifier) # TODO: see if client can cache consecutive calls. SEE self.cli.api_client.last_response is a # https://docs.aiohttp.org/en/stable/client_reference.html#response-object async with self.session.get(url, ssl=False) as resp: payload = await resp.json() data, error = unwrap_envelope(payload) if error: raise RuntimeError(str(error)) return data # override async def get_image_name(self, service_identifier: str) -> str: data = await self._request_info(service_identifier) return data.get('service_key') # override async def find_url(self, service_identifier: str) -> URL: """ Returns the url = origin + mountpoint of the backend dynamic service identified """ data = await self._request_info(service_identifier) base_url = URL.build(scheme="http", host=data.get('service_host'), port=data.get('service_port'), path=data.get('service_basepath')) if not os.environ.get('IS_CONTAINER_CONTEXT'): # If server is not in swarm (e.g. during testing) then host:port = localhost:data['published_port'] base_url = base_url.with_host('127.0.0.1') \ .with_port(data['published_port']) return base_url async def _cleanup_ctx(app: web.Application): app[THIS_CLIENT_SESSION] = session = ClientSession(loop=app.loop) yield session = app.get(THIS_CLIENT_SESSION) if session: await session.close() def setup(app: web.Application): monitor = ServiceMonitor(app, base_url=app[APP_DIRECTOR_API_KEY]) setup_reverse_proxy(app, monitor) assert "reverse_proxy" in app.router app["reverse_proxy.basemount"] = monitor.base_mountpoint app.cleanup_ctx.append(_cleanup_ctx) # alias setup_app_proxy = setup __all__ = ( 'setup_app_proxy' )
1,206
951
68
91418e522d5fd515e0642c6ff5e6d41b16f1b4ec
256
py
Python
therandy/rules/yarn_command_replaced.py
benmonro/therandy
b7c7c4ffc8f82b27f284ba90621a47baa5dfcb03
[ "MIT" ]
null
null
null
therandy/rules/yarn_command_replaced.py
benmonro/therandy
b7c7c4ffc8f82b27f284ba90621a47baa5dfcb03
[ "MIT" ]
null
null
null
therandy/rules/yarn_command_replaced.py
benmonro/therandy
b7c7c4ffc8f82b27f284ba90621a47baa5dfcb03
[ "MIT" ]
null
null
null
import re from therandy.utils import for_app regex = re.compile(r'Run "(.*)" instead') @for_app('yarn', at_least=1)
18.285714
43
0.726563
import re from therandy.utils import for_app regex = re.compile(r'Run "(.*)" instead') @for_app('yarn', at_least=1) def match(command): return regex.findall(command.output) def get_new_command(command): return regex.findall(command.output)[0]
91
0
45
dfe645551145fb5cd7dbe96a4a0482262bc7d285
3,369
py
Python
moai/monads/keypoints/visibility.py
tzole1155/moai
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
[ "Apache-2.0" ]
10
2021-04-02T11:21:33.000Z
2022-01-18T18:32:32.000Z
moai/monads/keypoints/visibility.py
tzole1155/moai
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
[ "Apache-2.0" ]
1
2022-03-22T20:10:55.000Z
2022-03-24T13:11:02.000Z
moai/monads/keypoints/visibility.py
tzole1155/moai
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
[ "Apache-2.0" ]
3
2021-05-16T20:47:40.000Z
2021-12-01T21:15:36.000Z
import torch import functools __all__ = [ "VisibilityFOV", "VisibilityHeatmap" ]
40.107143
136
0.511725
import torch import functools __all__ = [ "VisibilityFOV", "VisibilityHeatmap" ] class VisibilityFOV(torch.nn.Module): def __init__(self, width: int=1, height: int=1, coord_type: str='coord', # [B, K, UV] ): super(VisibilityFOV,self).__init__() self.coord_type = coord_type self.width = width self.height = height def forward(self, coords: torch.Tensor ) -> torch.Tensor: _coords = coords.clone().detach() if self.coord_type != 'coord': _coords[..., 0] = (_coords[..., 0] + 1.0) / 2.0 * self.width if self.coord_type == 'ndc' else _coords[..., 0] * self.width _coords[..., 1] = (_coords[..., 1] + 1.0) / 2.0 * self.height if self.coord_type == 'ndc' else _coords[..., 1] * self.height masks = torch.zeros_like(coords) masks[..., 0] = (_coords[..., 0] >= 0) * (_coords[..., 0] < self.width) masks[..., 1] = (_coords[..., 1] >= 0) * (_coords[..., 1] < self.height) return masks class VisibilityHeatmap(torch.nn.Module): def _mask(self, coords: torch.Tensor, # [B, K, UV] heatmaps: torch.Tensor # [B, K, (D), H, W], with its value across the spatial dimensions summing to unity ) -> torch.Tensor: r"""Extracts the visibility mask of keypoint predictions based on heatmap values. . Args: coords: torch.Tensor, [B, K, UV(S) or (S)VU] heatmaps: torch.Tensor [B, K, (D), H, W], with its value across the spatial dimensions summing to unity Returns: The visibility weights of coordinates, [B, K, UV(S) or (S)VU]. """ masks = torch.zeros_like(coords) channels = heatmaps.shape[1] for i in range(channels): heatmap = heatmaps[:, i, ...] for b in range(coords.shape[0]): uv = tuple(coords.flip(-1).long()[b, i]) if uv[0] > -1 and uv[1] > -1 and uv[0] < heatmap.shape[-2] and uv[1] < heatmap.shape[-1]: masks[b, i, ...] = 1.0 if heatmap[b][uv] > self.threshold else 0.0 else: masks[b, i, ...] = 0.0 return masks def __init__(self, width: int=1, height: int=1, threshold: float=0.4, coord_type: str='coord', # [B, K, UV] ): super(VisibilityHeatmap,self).__init__() self.coord_type = coord_type self.width = width self.height = height self.threshold = threshold def forward(self, coords: torch.Tensor, # [B, K, UV(S) or (S)VU] heatmaps: torch.Tensor # [B, K, (D), H, W], with its value across the spatial dimensions summing to unity ) -> torch.Tensor: # [B, K] _coords = coords.clone().detach() if self.coord_type != 'coord': _coords[..., 0] = (_coords[..., 0] + 1.0) / 2.0 * self.width if self.coord_type == 'ndc' else _coords[..., 0] * self.width _coords[..., 1] = (_coords[..., 1] + 1.0) / 2.0 * self.height if self.coord_type == 'ndc' else _coords[..., 1] * self.height masks = self._mask(_coords, heatmaps) return masks
1,901
1,276
99
bc46a045fdce114c5d4d985d361b468382c41c86
211
py
Python
mwptoolkit/module/Embedder/__init__.py
ShubhamAnandJain/MWP-CS229
ce86233504fdb37e104a3944fd81d4606fbfa621
[ "MIT" ]
71
2021-03-08T06:06:15.000Z
2022-03-30T11:59:37.000Z
mwptoolkit/module/Embedder/__init__.py
ShubhamAnandJain/MWP-CS229
ce86233504fdb37e104a3944fd81d4606fbfa621
[ "MIT" ]
13
2021-09-07T12:38:23.000Z
2022-03-22T15:08:16.000Z
mwptoolkit/module/Embedder/__init__.py
ShubhamAnandJain/MWP-CS229
ce86233504fdb37e104a3944fd81d4606fbfa621
[ "MIT" ]
21
2021-02-16T07:46:36.000Z
2022-03-23T13:41:33.000Z
from __future__ import absolute_import from __future__ import print_function from __future__ import division from mwptoolkit.module.Embedder import basic_embedder,bert_embedder,position_embedder,roberta_embedder
52.75
102
0.905213
from __future__ import absolute_import from __future__ import print_function from __future__ import division from mwptoolkit.module.Embedder import basic_embedder,bert_embedder,position_embedder,roberta_embedder
0
0
0
e02ae7379b7c5223c244d34509f0049669884897
4,575
py
Python
colour/models/cie_ucs.py
canavandl/colour
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
[ "BSD-3-Clause" ]
1
2019-06-27T11:32:48.000Z
2019-06-27T11:32:48.000Z
colour/models/cie_ucs.py
canavandl/colour
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
[ "BSD-3-Clause" ]
null
null
null
colour/models/cie_ucs.py
canavandl/colour
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ CIE UCS Colourspace =================== Defines the *CIE UCS* colourspace transformations: - :func:`XYZ_to_UCS` - :func:`UCS_to_XYZ` - :func:`UCS_to_uv` - :func:`UCS_uv_to_xy` See Also -------- `CIE UCS Colourspace IPython Notebook <http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/cie_ucs.ipynb>`_ # noqa References ---------- .. [1] http://en.wikipedia.org/wiki/CIE_1960_color_space (Last accessed 24 February 2014) """ from __future__ import division, unicode_literals import numpy as np __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers' __license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = 'colour-science@googlegroups.com' __status__ = 'Production' __all__ = ['XYZ_to_UCS', 'UCS_to_XYZ', 'UCS_to_uv', 'UCS_uv_to_xy'] def XYZ_to_UCS(XYZ): """ Converts from *CIE XYZ* colourspace to *CIE UCS* colourspace. Parameters ---------- XYZ : array_like, (3,) *CIE XYZ* colourspace matrix. Returns ------- ndarray, (3,) *CIE UCS* colourspace matrix. Notes ----- - Input *CIE XYZ* colourspace matrix is in domain [0, 1]. - Output *CIE UCS* colourspace matrix is in domain [0, 1]. References ---------- .. [2] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> XYZ = np.array([0.1180583421, 0.1034, 0.0515089229]) >>> XYZ_to_UCS(XYZ) # doctest: +ELLIPSIS array([ 0.0787055..., 0.1034 , 0.1218252...]) """ X, Y, Z = np.ravel(XYZ) return np.array([2 / 3 * X, Y, 1 / 2 * (-X + 3 * Y + Z)]) def UCS_to_XYZ(UVW): """ Converts from *CIE UCS* colourspace to *CIE XYZ* colourspace. Parameters ---------- UVW : array_like, (3,) *CIE UCS* colourspace matrix. Returns ------- ndarray, (3,) *CIE XYZ* colourspace matrix. Notes ----- - Input *CIE UCS* colourspace matrix is in domain [0, 1]. - Output *CIE XYZ* colourspace matrix is in domain [0, 1]. References ---------- .. [3] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> UCS = np.array([0.07870556, 0.1034, 0.12182529]) >>> UCS_to_XYZ(UCS) # doctest: +ELLIPSIS array([ 0.1180583..., 0.1034 , 0.0515089...]) """ U, V, W = np.ravel(UVW) return np.array( [3 / 2 * U, V, 3 / 2 * U - (3 * V) + (2 * W)]) def UCS_to_uv(UVW): """ Returns the *uv* chromaticity coordinates from given *CIE UCS* colourspace matrix. Parameters ---------- UVW : array_like, (3,) *CIE UCS* colourspace matrix. Returns ------- tuple *uv* chromaticity coordinates. Notes ----- - Input *CIE UCS* colourspace matrix is in domain [0, 1]. - Output *uv* chromaticity coordinates are in domain [0, 1]. References ---------- .. [4] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> UCS = np.array([0.1180583421, 0.1034, 0.0515089229]) >>> UCS_to_uv(UCS) # doctest: +ELLIPSIS (0.4324999..., 0.3788000...) """ U, V, W = np.ravel(UVW) return U / (U + V + W), V / (U + V + W) def UCS_uv_to_xy(uv): """ Returns the *xy* chromaticity coordinates from given *CIE UCS* colourspace *uv* chromaticity coordinates. Parameters ---------- uv : array_like *CIE UCS uv* chromaticity coordinates. Returns ------- tuple *xy* chromaticity coordinates. Notes ----- - Input *uv* chromaticity coordinates are in domain [0, 1]. - Output *xy* chromaticity coordinates are in domain [0, 1]. References ---------- .. [5] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> uv = (0.43249999995420696, 0.378800000065942) >>> UCS_uv_to_xy(uv) # doctest: +ELLIPSIS (0.7072386..., 0.4129510...) """ return (3 * uv[0] / (2 * uv[0] - 8 * uv[1] + 4), 2 * uv[1] / (2 * uv[0] - 8 * uv[1] + 4))
24.206349
119
0.567869
#!/usr/bin/env python # -*- coding: utf-8 -*- """ CIE UCS Colourspace =================== Defines the *CIE UCS* colourspace transformations: - :func:`XYZ_to_UCS` - :func:`UCS_to_XYZ` - :func:`UCS_to_uv` - :func:`UCS_uv_to_xy` See Also -------- `CIE UCS Colourspace IPython Notebook <http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/cie_ucs.ipynb>`_ # noqa References ---------- .. [1] http://en.wikipedia.org/wiki/CIE_1960_color_space (Last accessed 24 February 2014) """ from __future__ import division, unicode_literals import numpy as np __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers' __license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = 'colour-science@googlegroups.com' __status__ = 'Production' __all__ = ['XYZ_to_UCS', 'UCS_to_XYZ', 'UCS_to_uv', 'UCS_uv_to_xy'] def XYZ_to_UCS(XYZ): """ Converts from *CIE XYZ* colourspace to *CIE UCS* colourspace. Parameters ---------- XYZ : array_like, (3,) *CIE XYZ* colourspace matrix. Returns ------- ndarray, (3,) *CIE UCS* colourspace matrix. Notes ----- - Input *CIE XYZ* colourspace matrix is in domain [0, 1]. - Output *CIE UCS* colourspace matrix is in domain [0, 1]. References ---------- .. [2] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> XYZ = np.array([0.1180583421, 0.1034, 0.0515089229]) >>> XYZ_to_UCS(XYZ) # doctest: +ELLIPSIS array([ 0.0787055..., 0.1034 , 0.1218252...]) """ X, Y, Z = np.ravel(XYZ) return np.array([2 / 3 * X, Y, 1 / 2 * (-X + 3 * Y + Z)]) def UCS_to_XYZ(UVW): """ Converts from *CIE UCS* colourspace to *CIE XYZ* colourspace. Parameters ---------- UVW : array_like, (3,) *CIE UCS* colourspace matrix. Returns ------- ndarray, (3,) *CIE XYZ* colourspace matrix. Notes ----- - Input *CIE UCS* colourspace matrix is in domain [0, 1]. - Output *CIE XYZ* colourspace matrix is in domain [0, 1]. References ---------- .. [3] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> UCS = np.array([0.07870556, 0.1034, 0.12182529]) >>> UCS_to_XYZ(UCS) # doctest: +ELLIPSIS array([ 0.1180583..., 0.1034 , 0.0515089...]) """ U, V, W = np.ravel(UVW) return np.array( [3 / 2 * U, V, 3 / 2 * U - (3 * V) + (2 * W)]) def UCS_to_uv(UVW): """ Returns the *uv* chromaticity coordinates from given *CIE UCS* colourspace matrix. Parameters ---------- UVW : array_like, (3,) *CIE UCS* colourspace matrix. Returns ------- tuple *uv* chromaticity coordinates. Notes ----- - Input *CIE UCS* colourspace matrix is in domain [0, 1]. - Output *uv* chromaticity coordinates are in domain [0, 1]. References ---------- .. [4] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> UCS = np.array([0.1180583421, 0.1034, 0.0515089229]) >>> UCS_to_uv(UCS) # doctest: +ELLIPSIS (0.4324999..., 0.3788000...) """ U, V, W = np.ravel(UVW) return U / (U + V + W), V / (U + V + W) def UCS_uv_to_xy(uv): """ Returns the *xy* chromaticity coordinates from given *CIE UCS* colourspace *uv* chromaticity coordinates. Parameters ---------- uv : array_like *CIE UCS uv* chromaticity coordinates. Returns ------- tuple *xy* chromaticity coordinates. Notes ----- - Input *uv* chromaticity coordinates are in domain [0, 1]. - Output *xy* chromaticity coordinates are in domain [0, 1]. References ---------- .. [5] http://en.wikipedia.org/wiki/CIE_1960_color_space#Relation_to_CIEXYZ # noqa (Last accessed 24 February 2014) Examples -------- >>> uv = (0.43249999995420696, 0.378800000065942) >>> UCS_uv_to_xy(uv) # doctest: +ELLIPSIS (0.7072386..., 0.4129510...) """ return (3 * uv[0] / (2 * uv[0] - 8 * uv[1] + 4), 2 * uv[1] / (2 * uv[0] - 8 * uv[1] + 4))
0
0
0
99757fb8a30f0935fc92d1bb843ac95491dbdfbf
7,485
py
Python
algorithms/POMDP/3-DRQN-Store-State-HeavenHell/model.py
zhihanyang2022/drqn
ac2482e3b42094e6242c042583dbbd9c98e4750b
[ "MIT" ]
5
2021-03-28T14:12:40.000Z
2021-11-19T20:46:10.000Z
algorithms/POMDP/3-DRQN-Store-State-HeavenHell/model.py
zhihanyang2022/drqn
ac2482e3b42094e6242c042583dbbd9c98e4750b
[ "MIT" ]
null
null
null
algorithms/POMDP/3-DRQN-Store-State-HeavenHell/model.py
zhihanyang2022/drqn
ac2482e3b42094e6242c042583dbbd9c98e4750b
[ "MIT" ]
null
null
null
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence # from config import burn_in_length
37.994924
133
0.600134
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence # from config import burn_in_length class DRQN(nn.Module): def __init__(self, num_inputs, num_outputs, use_deeper_net): super(DRQN, self).__init__() self.num_inputs = num_inputs self.num_outputs = num_outputs self.use_deeper_net = use_deeper_net if self.use_deeper_net: self.pre_process_net = nn.Sequential( nn.Linear(num_inputs, 64), nn.LeakyReLU(negative_slope=0.1), nn.Linear(64, 32), nn.LeakyReLU(negative_slope=0.1), nn.Linear(32, 16), nn.LeakyReLU(negative_slope=0.1), ) self.lstm = nn.LSTM(input_size=16, hidden_size=16, batch_first=True) else: # original code self.lstm = nn.LSTM(input_size=self.num_inputs, hidden_size=16, batch_first=True) self.post_process_net = nn.Sequential( nn.Linear(16, 128), nn.ReLU(), nn.Linear(128, num_outputs) ) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) def forward(self, x, hidden=None, inference=True, lengths=None, max_length=None): if self.use_deeper_net: mid = self.pre_process_net(x) if not inference: mid = pack_padded_sequence( mid, lengths=lengths, batch_first=True, enforce_sorted=False ) mid, hidden = self.lstm(mid, hidden) if not inference: mid, _ = pad_packed_sequence(sequence=mid, batch_first=True, total_length=max_length) q_value = self.post_process_net(mid) return q_value, hidden else: # original code out, hidden = self.lstm(x, hidden) if not inference: out, _ = pad_packed_sequence(sequence=out, batch_first=True, total_length=max_length) q_value = self.post_process_net(x) return q_value, hidden @classmethod def train_model(cls, online_net, target_net, optimizer, batch, batch_size, gamma, use_deeper_net, device): # def slice_burn_in(item): # return item[:, burn_in_length:, :] # batch.state is a list of tensors of shape (seq_length, input_dim) # so seq.size()[0] = the length of the sequence lengths = np.array([seq.size()[0] for seq in batch.state]) max_length = int(np.max(lengths)) # ===== compute loss mask ===== # for example, if sequence_length == 3, then lower_triangular_matrix = # 1 0 0 # 1 1 0 # 1 1 1 # suppose lengths == np.array([2, 3, 1]), then lengths - 1 == np.array([1, 2, 0]) and # the loss_mask computed from lower_triangular_matrix[lengths-1] is # 1 1 0 # 1 1 1 # 1 0 0 # which corresponds to lengths correctly lower_triangular_matrix = np.tril(np.ones((max_length, max_length))) loss_mask = lower_triangular_matrix[lengths-1] # first convert from 1-based to 0-based indexing loss_mask = torch.tensor(loss_mask) # has shape (bs, seq_len) if use_deeper_net: states = pad_sequence(batch.state, batch_first=True) next_states = pad_sequence(batch.next_state, batch_first=True) else: states = pack_padded_sequence( pad_sequence(batch.state, batch_first=True), lengths=lengths, batch_first=True, enforce_sorted=False ) next_states = pack_padded_sequence( pad_sequence(batch.next_state, batch_first=True), lengths=lengths, batch_first=True, enforce_sorted=False ) # max_length == sequence_length most of the times, but not always actions = pad_sequence(batch.action, batch_first=True).view(batch_size, max_length, -1).long() # has shape (bs, seq_len, 1) rewards = pad_sequence(batch.reward, batch_first=True).view(batch_size, max_length, -1) # has shape (bs, seq_len, 1) masks = pad_sequence(batch.mask, batch_first=True).view(batch_size, max_length, -1) # has shape (bs, seq_len, 1) h0 = torch.stack([rnn_state[0,0,:] for rnn_state in batch.rnn_state]).unsqueeze(0).detach() # has shape (1, bs, hidden_size) c0 = torch.stack([rnn_state[0,1,:] for rnn_state in batch.rnn_state]).unsqueeze(0).detach() # has shape (1, bs, hidden_size) h1 = torch.stack([rnn_state[1,0,:] for rnn_state in batch.rnn_state]).unsqueeze(0).detach() # has shape (1, bs, hidden_size) c1 = torch.stack([rnn_state[1,1,:] for rnn_state in batch.rnn_state]).unsqueeze(0).detach() # has shape (1, bs, hidden_size) # states = torch.stack(batch.state).view(batch_size, sequence_length, online_net.num_inputs) # next_states = torch.stack(batch.next_state).view(batch_size, sequence_length, online_net.num_inputs) # actions = torch.stack(batch.action).view(batch_size, sequence_length, -1).long() # rewards = torch.stack(batch.reward).view(batch_size, sequence_length, -1) # masks = torch.stack(batch.mask).view(batch_size, sequence_length, -1) # rnn_state = torch.stack(batch.rnn_state).view(batch_size, sequence_length, 2, -1) # [h0, c0] = rnn_state[:, 0, :, :].transpose(0, 1) # h0 = h0.unsqueeze(0).detach() # c0 = c0.unsqueeze(0).detach() # [h1, c1] = rnn_state[:, 1, :, :].transpose(0, 1) # h1 = h1.unsqueeze(0).detach() # c1 = c1.unsqueeze(0).detach() pred, _ = online_net(states, (h0, c0), inference=False, max_length=max_length, lengths=lengths) next_pred, _ = target_net(next_states, (h1, c1), inference=False, max_length=max_length, lengths=lengths) loss_mask = loss_mask.to(device) actions = actions.to(device) rewards = rewards.to(device) masks = masks.to(device) h0 = h0.to(device) c0 = c0.to(device) h1 = h1.to(device) c1 = c1.to(device) pred = pred.to(device) next_pred = next_pred.to(device) # if burn_in_length > 0: # pred = slice_burn_in(pred) # next_pred = slice_burn_in(next_pred) # actions = slice_burn_in(actions) # rewards = slice_burn_in(rewards) # masks = slice_burn_in(masks) pred = pred.gather(2, actions).squeeze() # has shape (bs, seq_len) target = rewards + masks * gamma * next_pred.max(2, keepdim=True)[0] target = target.squeeze() # has shape (bs, seq_len) loss = torch.mean(((pred - target.detach()) ** 2) * loss_mask.float()) # loss = F.mse_loss(pred, target.detach()) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(online_net.parameters(), 1.0) optimizer.step() return loss def get_action(self, state, hidden): state = state.unsqueeze(0).unsqueeze(0) qvalue, hidden = self.forward(state, hidden) _, action = torch.max(qvalue, 2) return action.cpu().numpy()[0][0], hidden
7,124
126
23
be89cd61695c8f64a9da74198f223bd8b27047f3
2,870
py
Python
resource3.py
Ashutosharma08/Resource_Manager_Python
4e5bf7acb00c72b89e0124a5439ae80f28bb7635
[ "MIT" ]
null
null
null
resource3.py
Ashutosharma08/Resource_Manager_Python
4e5bf7acb00c72b89e0124a5439ae80f28bb7635
[ "MIT" ]
null
null
null
resource3.py
Ashutosharma08/Resource_Manager_Python
4e5bf7acb00c72b89e0124a5439ae80f28bb7635
[ "MIT" ]
null
null
null
import psutil import time import GPUtil import csv import pandas as pd # process_name = 'LiDAR360.exe' name = psutil.Process(15500) name_1 = psutil.Process(9996) name_2 = psutil.Process(1476) name_3 = psutil.Process(14976) print(name.pid) # print("Hello") sleep_time = 2 mins = 60 # start = time.time() with open('Resource_Usage3.csv', 'w') as f: header = ['PID','Process Name', 'CPU Usage', 'GPU Usage', 'Memory Usage','IO_Usage','Timestamp'] writer = csv.writer(f) writer.writerow(header) lst_name = [name,name_1,name_2,name_3] for i in range(int(mins*60/sleep_time)): for j in range(len(lst_name)): count = 0 cpu_total = 0 cpu_usage, process_name, memory_usage, io_usage, gpu_usage,pid = res_info(lst_name[j]) cpu_total =+cpu_usage lst_info = [pid,process_name, cpu_usage, gpu_usage, memory_usage, io_usage,time.time()] print("Process Name: " + str(process_name)) print("CPU Usage: " + str(cpu_usage)) print("memory_usage: " + str(memory_usage)) print("IO Usage: " + str(io_usage)) print("GPU Usage: " + str(gpu_usage)) count =+ 1 if count == 4: print("Total CPU: "+str(cpu_total)) print("----------------------------") # with open('Resource_Usage3.csv', 'a', newline='') as d: # writer = csv.writer(d) # writer.writerow(lst_info) # # writer.writerow([]) # if count == 4: # cpu_total = 0 # count = 0 # # del lst_info # time.sleep(sleep_time) # # df = pd.read_csv('Resource_Usage3.csv') # # for i in range(4): # name = 'PID'+str(i) # df[name]='' # # for i in range(4): # if i == 1: # df['PID1'] = df.loc[df['PID']==df['PID'][0],'CPU Usage'].sum # for i in range(int((mins * 60) / sleep_time)): # cpu = name.cpu_percent(interval=2) / psutil.cpu_count() # name1 = name.name() # memory = name.memory_percent() # # print("Name % = " + str(name1)) # abc = GPUtil.showUtilization(useOldCode=True) # print(abc) # # print(GPUtil.showUtilization()) # print("CPU % = " + str(cpu)) # print("Memory % = " + str(memory)) # # print("----------------------") # lst = [name1, cpu, abc, memory] # with open('Resource_Usage.csv', 'a', newline='') as d: # writer = csv.writer(d) # writer.writerow(lst) # del lst # for creating list for new row entry # time.sleep(sleep_time)
28.137255
101
0.554355
import psutil import time import GPUtil import csv import pandas as pd # process_name = 'LiDAR360.exe' name = psutil.Process(15500) name_1 = psutil.Process(9996) name_2 = psutil.Process(1476) name_3 = psutil.Process(14976) print(name.pid) # print("Hello") sleep_time = 2 mins = 60 # start = time.time() with open('Resource_Usage3.csv', 'w') as f: header = ['PID','Process Name', 'CPU Usage', 'GPU Usage', 'Memory Usage','IO_Usage','Timestamp'] writer = csv.writer(f) writer.writerow(header) lst_name = [name,name_1,name_2,name_3] def res_info(i): pid = i.pid cpu = i.cpu_percent(interval=2)/ psutil.cpu_count() name1 = i.name() memory = i.memory_percent() io = i.io_counters() gpu = GPUtil.showUtilization(useOldCode=True) return cpu,name1,memory,io,gpu,pid for i in range(int(mins*60/sleep_time)): for j in range(len(lst_name)): count = 0 cpu_total = 0 cpu_usage, process_name, memory_usage, io_usage, gpu_usage,pid = res_info(lst_name[j]) cpu_total =+cpu_usage lst_info = [pid,process_name, cpu_usage, gpu_usage, memory_usage, io_usage,time.time()] print("Process Name: " + str(process_name)) print("CPU Usage: " + str(cpu_usage)) print("memory_usage: " + str(memory_usage)) print("IO Usage: " + str(io_usage)) print("GPU Usage: " + str(gpu_usage)) count =+ 1 if count == 4: print("Total CPU: "+str(cpu_total)) print("----------------------------") # with open('Resource_Usage3.csv', 'a', newline='') as d: # writer = csv.writer(d) # writer.writerow(lst_info) # # writer.writerow([]) # if count == 4: # cpu_total = 0 # count = 0 # # del lst_info # time.sleep(sleep_time) # # df = pd.read_csv('Resource_Usage3.csv') # # for i in range(4): # name = 'PID'+str(i) # df[name]='' # # for i in range(4): # if i == 1: # df['PID1'] = df.loc[df['PID']==df['PID'][0],'CPU Usage'].sum # for i in range(int((mins * 60) / sleep_time)): # cpu = name.cpu_percent(interval=2) / psutil.cpu_count() # name1 = name.name() # memory = name.memory_percent() # # print("Name % = " + str(name1)) # abc = GPUtil.showUtilization(useOldCode=True) # print(abc) # # print(GPUtil.showUtilization()) # print("CPU % = " + str(cpu)) # print("Memory % = " + str(memory)) # # print("----------------------") # lst = [name1, cpu, abc, memory] # with open('Resource_Usage.csv', 'a', newline='') as d: # writer = csv.writer(d) # writer.writerow(lst) # del lst # for creating list for new row entry # time.sleep(sleep_time)
243
0
25
9498fd49ed4a4b92604ac1923fd424d00fa30b23
9,171
py
Python
moviedb.py
sre28/Movie-database
bc99cf1ecd712c77d60c24fd82d4a4edfe3eb204
[ "Apache-2.0" ]
null
null
null
moviedb.py
sre28/Movie-database
bc99cf1ecd712c77d60c24fd82d4a4edfe3eb204
[ "Apache-2.0" ]
null
null
null
moviedb.py
sre28/Movie-database
bc99cf1ecd712c77d60c24fd82d4a4edfe3eb204
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START imports] import os import urllib from google.appengine.api import users from google.appengine.ext import ndb import jinja2 import webapp2 JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), extensions=['jinja2.ext.autoescape'], autoescape=True) # [END imports] DEFAULT_GENRE = 'action' genrer = "" genre2 = "" # We set a parent key on the 'Greetings' to ensure that they are all # in the same entity group. Queries across the single entity group # will be consistent. However, the write rate should be limited to # ~1/second. def genre_key(genre=DEFAULT_GENRE): """Constructs a Datastore key for a Guestbook entity. We use guestbook_name as the key. """ return ndb.Key('Guestbook', genre) # [START Movie] class Author(ndb.Model): """Sub model for representing an author.""" identity = ndb.StringProperty(indexed=False) email = ndb.StringProperty(indexed=False) class Movie(ndb.Model): """A main model for representing an individual movie entry.""" author = ndb.StructuredProperty(Author) name = ndb.StringProperty(indexed=False) director = ndb.StringProperty(indexed=False) actor = ndb.StringProperty(indexed=False) actor2 = ndb.StringProperty(indexed=False) year = ndb.StringProperty(indexed=False) duration = ndb.StringProperty(indexed=False) date = ndb.DateTimeProperty(auto_now_add=True) # [END Movie] # [START main_page] # [END main_page] # [START movieinfo] # [START app] app = webapp2.WSGIApplication([ ('/', MainPager), ('/sign', DisplayPage),('/search', SearchPage),('/enter', EnterPage), ], debug=True) # [END app]
29.488746
144
0.637335
#!/usr/bin/env python # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START imports] import os import urllib from google.appengine.api import users from google.appengine.ext import ndb import jinja2 import webapp2 JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), extensions=['jinja2.ext.autoescape'], autoescape=True) # [END imports] DEFAULT_GENRE = 'action' genrer = "" genre2 = "" # We set a parent key on the 'Greetings' to ensure that they are all # in the same entity group. Queries across the single entity group # will be consistent. However, the write rate should be limited to # ~1/second. def genre_key(genre=DEFAULT_GENRE): """Constructs a Datastore key for a Guestbook entity. We use guestbook_name as the key. """ return ndb.Key('Guestbook', genre) # [START Movie] class Author(ndb.Model): """Sub model for representing an author.""" identity = ndb.StringProperty(indexed=False) email = ndb.StringProperty(indexed=False) class Movie(ndb.Model): """A main model for representing an individual movie entry.""" author = ndb.StructuredProperty(Author) name = ndb.StringProperty(indexed=False) director = ndb.StringProperty(indexed=False) actor = ndb.StringProperty(indexed=False) actor2 = ndb.StringProperty(indexed=False) year = ndb.StringProperty(indexed=False) duration = ndb.StringProperty(indexed=False) date = ndb.DateTimeProperty(auto_now_add=True) # [END Movie] class DisplayPage(webapp2.RequestHandler): def get(self): genre_name = self.request.get('genre_name', DEFAULT_GENRE).lower() movies_query = Movie.query( ancestor=genre_key(genre_name)).order(-Movie.date) movies = movies_query.fetch(50) user = users.get_current_user() template_values = { 'user': user, 'movies': movies, 'genre_name': urllib.quote_plus(genre_name), } template = JINJA_ENVIRONMENT.get_template('displaymodel.html') self.response.write(template.render(template_values)) class SearchPage(webapp2.RequestHandler): def get(self): global genre2 genre = self.request.get('genre_name').lower() if genre == "": if genre2 == "": genre = self.request.get('genre_name',DEFAULT_GENRE).lower() else: genre = genre2 else: genre2 = genre; template_values = { 'genre_name': urllib.quote_plus(genre), } template = JINJA_ENVIRONMENT.get_template('searchmodel.html') self.response.write(template.render(template_values)) def post(self): #temp variable movies_query1 = Movie.query( ancestor=genre_key("-")).order(-Movie.date) movies1 = movies_query1.fetch(1) tempo = movies1 global genre2 genre = self.request.get('genre_name').lower() flag = -1 flag2 = -1 if genre == "": if genre2 == "": #first time and search genre = self.request.get('genre_name',DEFAULT_GENRE).lower() movies_query = Movie.query( ancestor=genre_key(genre)).order(-Movie.date) movies = movies_query.fetch(50) movie = Movie(parent=genre_key(genre)) movie.name = self.request.get('name') movie.actor = self.request.get('actor') movie.director = self.request.get('director') movie.year = self.request.get('year') if movie.year == "" and movie.name =="" and movie.actor == "" and movie.director == "": flag = 0; if flag != 0: for i in movies: if i.name.lower().find(movie.name.lower(), 0, len(i.name)) != -1: if i.actor.lower().find(movie.actor.lower(), 0, len(i.actor)) != -1 or i.actor2.lower().find(movie.actor.lower(), 0, len(i.actor2)) != -1: if i.director.lower().find(movie.director.lower(), 0, len(i.director)) != -1: if i.year == movie.year or movie.year == "": tempo.append(i) if not tempo: flag2 = 0 else: genre = genre2 movies_query = Movie.query( ancestor=genre_key(genre)).order(-Movie.date) movies = movies_query.fetch(50) movie = Movie(parent=genre_key(genre)) movie.name = self.request.get('name') movie.actor = self.request.get('actor') movie.director = self.request.get('director') movie.year = self.request.get('year') if movie.year == "" and movie.name =="" and movie.actor == "" and movie.director == "": flag = 0; if flag != 0: for i in movies: if i.name.lower().find(movie.name.lower(), 0, len(i.name)) != -1: if i.actor.lower().find(movie.actor.lower(), 0, len(i.actor)) != -1 or i.actor2.lower().find(movie.actor.lower(), 0, len(i.actor2)) != -1: if i.director.lower().find(movie.director.lower(), 0, len(i.director)) != -1: if i.year == movie.year or movie.year == "": tempo.append(i) if not tempo: flag2 = 0; else:#switch genre2 = genre; movies_query = Movie.query( ancestor=genre_key("-")).order(-Movie.date) tempo = movies_query.fetch(1) template_values = { 'genre_name': urllib.quote_plus(genre), 'movies': tempo, 'flag': flag, 'flag2':flag2, } template = JINJA_ENVIRONMENT.get_template('searchmodel.html') self.response.write(template.render(template_values)) class EnterPage(webapp2.RequestHandler): def get(self): global genrer genre = self.request.get('genre_name').lower() if genre == "": if genrer == "": genre = self.request.get('genre_name',DEFAULT_GENRE).lower() else: genre = genrer else: genrer = genre; template_values = { 'genre_name': urllib.quote_plus(genre), } template = JINJA_ENVIRONMENT.get_template('entermodel.html') self.response.write(template.render(template_values)) def post(self): # We set the same parent key on the 'Greeting' to ensure each # Greeting is in the same entity group. Queries across the # single entity group will be consistent. However, the write # rate to a single entity group should be limited to # ~1/second. global genrer genre = self.request.get('genre_name').lower() flag = -1 if genre == "": if genrer == "": # first time and search genre = self.request.get('genre_name',DEFAULT_GENRE).lower() movie = Movie(parent=genre_key(genre)) movie.name = self.request.get('name') movie.actor = self.request.get('actor') movie.actor2 = self.request.get('actor2') movie.director = self.request.get('director') movie.year = self.request.get('year') movie.duration = self.request.get('duration') if movie.year == "" or movie.name =="" or movie.director == "" or movie.duration == "": flag = 0; else: genre = genrer movie = Movie(parent=genre_key(genre)) movie.name = self.request.get('name') movie.actor = self.request.get('actor') movie.actor2 = self.request.get('actor2') movie.director = self.request.get('director') movie.year = self.request.get('year') movie.duration = self.request.get('duration') if movie.year == "" or movie.name =="" or movie.director == "" or movie.duration == "": flag = 0; else: #switch genrer = genre; movie = Movie(parent=genre_key(genre)) movie.name = self.request.get('name') movie.actor = self.request.get('actor') movie.actor2 = self.request.get('actor2') movie.director = self.request.get('director') movie.year = self.request.get('year') movie.duration = self.request.get('duration') flag = -1 if movie.name != "": if movie.director != "": if movie.year != "": if movie.duration != "": movie.put() template_values = { 'genre_name': urllib.quote_plus(genre), 'flag': flag, } #query_params = {'genre_name': genre} #self.redirect('/enter?' + urllib.urlencode(query_params)) template = JINJA_ENVIRONMENT.get_template('entermodel.html') self.response.write(template.render(template_values)) # [START main_page] class MainPager(webapp2.RequestHandler): def get(self): template = JINJA_ENVIRONMENT.get_template('index1.html') self.response.write(template.render()) # [END main_page] # [START movieinfo] # [START app] app = webapp2.WSGIApplication([ ('/', MainPager), ('/sign', DisplayPage),('/search', SearchPage),('/enter', EnterPage), ], debug=True) # [END app]
6,549
79
253
25b8784ade9962cdf9c3155abb0987d48efd8033
2,438
py
Python
haystack_topology/query_shannon_entropy_bolt.py
sourcekris/Haystack
d2c9b24660a5c9d06547ed8f59e94e25dde395cc
[ "BSD-3-Clause" ]
2
2016-09-21T01:34:21.000Z
2020-01-12T12:27:35.000Z
haystack_topology/query_shannon_entropy_bolt.py
sourcekris/Haystack
d2c9b24660a5c9d06547ed8f59e94e25dde395cc
[ "BSD-3-Clause" ]
null
null
null
haystack_topology/query_shannon_entropy_bolt.py
sourcekris/Haystack
d2c9b24660a5c9d06547ed8f59e94e25dde395cc
[ "BSD-3-Clause" ]
null
null
null
# ____ _ _ # | _ \ _ __ ___ (_) ___ ___| |_ # | |_) | '__/ _ \| |/ _ \/ __| __| # | __/| | | (_) | | __/ (__| |_ # |_| _|_| \___// |\___|\___|\__| # | | | | __ _ _|__/ ___| |_ __ _ ___| | __ # | |_| |/ _` | | | / __| __/ _` |/ __| |/ / # | _ | (_| | |_| \__ \ || (_| | (__| < # |_| |_|\__,_|\__, |___/\__\__,_|\___|_|\_\ # |___/ # # QueryShannonEntropyBolt - # # Calculates the shannon entropy of the query minus any known # domain suffix and emits a tuple of the query + shannon entropy # value to the topology as a floating point number. # # from collections import namedtuple import logging import math from pyleus.storm import SimpleBolt from haystack_topology.parse_event_bolt import Record log = logging.getLogger('query_shannon-entropy_bolt') QEntropy = namedtuple("QEntropy", "eventid query qentropy") if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, filename='/var/log/haystack/query_shannon-entropy_bolt.log', format="%(message)s", filemode='a', ) QueryShannonEntropyBolt().run()
29.373494
154
0.554553
# ____ _ _ # | _ \ _ __ ___ (_) ___ ___| |_ # | |_) | '__/ _ \| |/ _ \/ __| __| # | __/| | | (_) | | __/ (__| |_ # |_| _|_| \___// |\___|\___|\__| # | | | | __ _ _|__/ ___| |_ __ _ ___| | __ # | |_| |/ _` | | | / __| __/ _` |/ __| |/ / # | _ | (_| | |_| \__ \ || (_| | (__| < # |_| |_|\__,_|\__, |___/\__\__,_|\___|_|\_\ # |___/ # # QueryShannonEntropyBolt - # # Calculates the shannon entropy of the query minus any known # domain suffix and emits a tuple of the query + shannon entropy # value to the topology as a floating point number. # # from collections import namedtuple import logging import math from pyleus.storm import SimpleBolt from haystack_topology.parse_event_bolt import Record log = logging.getLogger('query_shannon-entropy_bolt') QEntropy = namedtuple("QEntropy", "eventid query qentropy") class QueryShannonEntropyBolt(SimpleBolt): OUTPUT_FIELDS = QEntropy OPTIONS = ["public_suffix_list"] def initialize(self): self.pub_sufs = [x.strip() for x in open(self.options["public_suffix_list"],"r").readlines() if not x.startswith('//') and not x.startswith('\n')] def remove_suffix(self, query): possible_suffixes = [] for suf in self.pub_sufs: if len(suf) < len(query): if query.endswith('.'+suf): possible_suffixes.append(suf) if len(possible_suffixes) > 0: return query.replace('.'+max(possible_suffixes,key=len),'') else: return query def shannon(self, query): entropy = 0.0 length = len(query) occ = {} for c in query: if not c in occ: occ[c] = 0 occ[c] += 1 for (k,v) in occ.iteritems(): p = float( v ) / float(length) entropy -= p * math.log(p, 2) return entropy def process_tuple(self, tup): qdata = Record(*tup.values) qse = self.shannon(self.remove_suffix(qdata.query)) qe = QEntropy(qdata.eventid, qdata.query, qse) log.debug(repr(qe)) self.emit(qe, anchors=[tup]) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, filename='/var/log/haystack/query_shannon-entropy_bolt.log', format="%(message)s", filemode='a', ) QueryShannonEntropyBolt().run()
1,057
197
23
6698cda7bc1b99f2131409192b00fe915fe63454
31,453
py
Python
pywbemtools/pywbemcli/_pywbemcli_operations.py
pywbem/pywbemtools
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
[ "Apache-2.0" ]
8
2017-04-01T13:55:00.000Z
2022-03-15T18:28:47.000Z
pywbemtools/pywbemcli/_pywbemcli_operations.py
pywbem/pywbemtools
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
[ "Apache-2.0" ]
918
2017-03-03T14:29:03.000Z
2022-03-29T15:32:16.000Z
pywbemtools/pywbemcli/_pywbemcli_operations.py
pywbem/pywbemtools
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
[ "Apache-2.0" ]
2
2020-01-17T15:56:46.000Z
2020-02-12T18:49:30.000Z
# (C) Copyright 2017 IBM Corp. # (C) Copyright 2017 Inova Development Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Mixin class that adds methods to WBEMConnection and FakeWBEMConnection for pywbemcli usage This contains only methods that use the iter<...> operations but also execute the complete iterations so that we can use these as common operations for pywbemcli instead of having to execute an algorithm of pull vs non-pull everywhere xa WBEMConnection possible pull operation is called. It also adds a method to FakeWBEMConnection to build the repository. """ from __future__ import absolute_import, print_function import os import io import errno import glob import hashlib import pickle import click import packaging.version import pywbem import pywbem_mock from .config import DEFAULT_MAXPULLCNT from .._utils import ensure_bytes, ensure_unicode, DEFAULT_CONNECTIONS_FILE from . import mockscripts PYWBEM_VERSION = packaging.version.parse(pywbem.__version__) # __all__ = ['PYWBEMCLIConnection', 'PYWBEMCLIFakedConnection'] # pylint: disable=useless-object-inheritance class PYWBEMCLIConnectionMixin(object): """ Mixin class to extend WBEMConnection with a set of methods that use the iter<...> methods as the basis for getting Instances, etc. but add the generator processing to retrieve the instances. These can be used within pywbemcli to allow one method call to ack as either a pull or traditional operation pushing the differences into this mixin. These methods do not resolve the core issues between the traditional and pull operations such as the fact that only the pull operations pass the FilterQuery parameter. They are a pywbemcli convience to simplify the individual action processing methods to a single call. """ def PyWbemcliEnumerateInstancePaths(self, ClassName, namespace=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterEnumerateInstancePaths and retrieve the instances. Returns the returned instances. Uses the same parameters as the IterEnumerateInstancePaths method. All exceptions from the underlying command are passed through this method. """ result = self.IterEnumerateInstancePaths( ClassName, namespace=namespace, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliEnumerateInstances(self, ClassName, namespace=None, LocalOnly=None, DeepInheritance=None, IncludeQualifiers=None, IncludeClassOrigin=None, PropertyList=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterEnumerateInstances and retrieve the instances. Returns the returned instances. Uses the same parameters as the IterEnumerateInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterEnumerateInstances( ClassName, namespace=namespace, LocalOnly=LocalOnly, DeepInheritance=DeepInheritance, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, PropertyList=PropertyList, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliReferenceInstancePaths(self, InstanceName, ResultClass=None, Role=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterReferemceInstancePaths and retrieve the instances. Returns the paths that result from iterating the IterReferenceInstancePaths. Uses the same parameters as the IterReferemceInstancePaths method. All exceptions from the underlying method are passed through this method. """ result = self.IterReferenceInstancePaths( InstanceName, ResultClass=ResultClass, Role=Role, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliReferenceInstances(self, InstanceName, ResultClass=None, Role=None, IncludeQualifiers=None, IncludeClassOrigin=None, PropertyList=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterReferencesInstances and retrieve the instances. Returns the returned instances. Uses the same parameters as the IterReferencesInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterReferenceInstances( InstanceName, ResultClass=ResultClass, Role=Role, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, PropertyList=PropertyList, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliAssociatorInstancePaths(self, InstanceName, AssocClass=None, ResultClass=None, Role=None, ResultRole=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterAssociatorInstancePaths and retrieve the paths. Returns the paths that result from iterating the IterAssociatorInstancePaths. Uses the same parameters as the IterAssociatorInstancePaths method. All exceptions from the underlying method are passed through this method. """ result = self.IterAssociatorInstancePaths( InstanceName, AssocClass=AssocClass, ResultClass=ResultClass, Role=Role, ResultRole=ResultRole, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliAssociatorInstances(self, InstanceName, AssocClass=None, ResultClass=None, Role=None, ResultRole=None, IncludeQualifiers=None, IncludeClassOrigin=None, PropertyList=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterAssociatorInstances and retrieve the instances. Returns the instances that result from iterating the IterAssociatorInstances. Uses the same parameters as the IterAssociatorInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterAssociatorInstances( InstanceName, AssocClass=AssocClass, ResultClass=ResultClass, Role=Role, ResultRole=ResultRole, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, PropertyList=PropertyList, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliQueryInstances(self, FilterQueryLanguage, FilterQuery, namespace=None, ReturnQueryResultClass=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterQueryInstances and retrieve the instances. Returns the instances that result from iterating the IterQueryInstances. Uses the same parameters as the IterQueryInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterQueryInstances( FilterQueryLanguage, FilterQuery, namespace=namespace, ReturnQueryResultClass=ReturnQueryResultClass, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) class BuildMockenvMixin(object): # pylint: disable=too-few-public-methods """ Mixin class for pywbem_mock.FakedWBEMConnection that adds the ability to build the mock environment of a connection from a connection definition in a connections file. """ def build_mockenv(self, server, file_path_list, connections_file, connection_name, verbose): """ Builds the mock environment of the 'self' connection from the input files, or from the mock cache of the connection if it is up to date. If the mock environment was built from the input files, the mock environment of the connection is dumped to its cache. The input files for building the mock environment are: * MOF files with a suffix of '.mof'. These files are compiled into the default namespace of the connection. * Python files with a suffix of '.py'. These files are mock scripts that are imported and thereby executed. The mock scripts can be used for any kind of setup of the mock environment, for example for creating namespaces, for defining provider classes and registering providers, or for adding CIM objects either directly through add_cimobjects() or by compiling MOF files. Mock scripts support two approaches for passing the connection and server objects they should operate on: * via a setup() function defined in the mock script. This is the recommended approach, and it supports caching. The setup() function has the following parameters: conn (pywbem_mock.FakedWBEMConnection): The mock connection. server (pywbem.WBEMServer): The server object for the mock connection. verbose (bool): Verbose flag from the command line. * via global variables made available to the mock script. This approach prevents caching. The following global variables are made available: CONN (pywbem_mock.FakedWBEMConnection): The mock connection. SERVER (pywbem.WBEMServer): The server object for the mock connection. VERBOSE (bool): Verbose flag from the command line. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. server (pywbem.WBEMServer): The server object for the mock connection. file_path_list (list of string): The path names of the input files for building the mock environment, from the connection definition. connections_file (string): Path name of the connections file. connection_name (string): The name of the connection definition in the connections file. verbose (bool): Verbose flag from the command line. Raises: MockFileError: Mock file does not exist. MockMOFCompileError: Mock MOF file fails to compile. MockScriptError: Mock script fails to execute. SetupNotSupportedError (py<3.5): New-style setup in mock script not supported. """ # Check that the input files exist. Since we loop through them multiple # times, we check that once. for file_path in file_path_list: if not os.path.exists(file_path): raise mockscripts.MockFileError( "Mock file does not exist: {}".format(file_path)) # The connections file is set if a named connection is used, i.e. # when specifying the -n general option. It is not set when the -s or -m # general options were specified. When no connections file is set, no # caching happens because there is no connection definition context # which is required for caching. if connections_file == DEFAULT_CONNECTIONS_FILE: cache_rootdir = mockcache_rootdir() if not os.path.isdir(cache_rootdir): os.mkdir(cache_rootdir) cache_dir = mockcache_cachedir( cache_rootdir, connections_file, connection_name) if not os.path.isdir(cache_dir): os.mkdir(cache_dir) # The mockenv pickle file contains the pickled state of the mock # environment. mockenv_pickle_file = os.path.join(cache_dir, 'mockenv.pkl') # The depreg pickle file contains the provider dependents # registry of the connection. It is used to look up the dependent # files of a mock script. The content of these dependent files is # also taken into account when determining whether the cache is up # to date. This needs to go into a separate pickle file because # it needs to be loaded and examined before the mckenv pickle # file is loaded. depreg_pickle_file = os.path.join(cache_dir, 'depreg.pkl') # The md5 file contains the MD5 hash value of the content of the # input files for the mock environment, and also taken into account # when determining whether the cache is up to date. md5_file = os.path.join(cache_dir, 'mockfiles.md5') # Flag indicating that the mock environment needs to be built # (or re-built). If False, the mock environment cache can be used. need_rebuild = False # Determine whether the mock environment needs to be rebuilt based # on the (non-)existence of the cache files. if not os.path.isfile(mockenv_pickle_file) \ or not os.path.isfile(depreg_pickle_file) \ or not os.path.isfile(md5_file): if verbose: click.echo("Mock environment for connection definition " "'{}' will be built because it was not cached.". format(connection_name)) need_rebuild = True try: depreg = self._load_depreg(depreg_pickle_file) except (IOError, OSError) as exc: if exc.errno == errno.ENOENT: depreg = pywbem_mock.ProviderDependentRegistry() else: raise # Calculate the MD5 hash value of the content of the input files md5 = hashlib.md5() for file_path in file_path_list: with io.open(file_path, 'rb') as fp: file_source = fp.read() md5.update(file_source) # For mock scripts, take their dependent files into account if file_path.endswith('.py'): dep_files = depreg.iter_dependents(file_path) for dep_file in dep_files: with io.open(dep_file, 'rb') as fp: file_source = fp.read() md5.update(file_source) # Add the cache dir, so that manual tweaks on the cache files # invalidates the cache. md5.update(ensure_bytes(cache_dir)) new_md5_value = ensure_unicode(md5.hexdigest()) # Determine whether the mock environment needs to be rebuilt based # on the MD5 hash value of the input file content. if not need_rebuild: with io.open(md5_file, 'r', encoding='utf-8') as fp: cached_md5_value = fp.read() if new_md5_value != cached_md5_value: if verbose: click.echo("Mock environment for connection " "definition '{}' is cached but will be " "rebuilt because the mock files have " "changed.".format(connection_name)) need_rebuild = True cache_it = True elif connections_file: # User-specified connections file used. if verbose: click.echo("Mock environment for connection definition '{}' " "will be built because user-specified connections " "files are not cached.".format(connection_name)) need_rebuild = True cache_it = False else: # No connections file context. if verbose: click.echo("Mock environment for connection definition '{}' " "will be built because no connections file is " "known.".format(connection_name)) need_rebuild = True cache_it = False if need_rebuild: try: self._build_mockenv(server, file_path_list, verbose) except mockscripts.NotCacheable as exc: if verbose: click.echo("Mock environment for connection definition " "'{}' will be built because it is not " "cacheable: {}.".format(connection_name, exc)) else: if connections_file and cache_it: self._dump_mockenv(mockenv_pickle_file) self._dump_depreg( self.provider_dependent_registry, depreg_pickle_file) with io.open(md5_file, 'w', encoding='utf-8') as fp: fp.write(new_md5_value) if verbose: click.echo("Mock environment for connection " "definition '{}' has been written to " "cache.".format(connection_name)) else: # When no rebuild is needed, there must have been a connections # file set. assert connections_file try: self._load_mockenv(mockenv_pickle_file, file_path_list) if verbose: click.echo("Mock environment for connection definition " "'{}' has been loaded from cache.". format(connection_name)) except mockscripts.NotCacheable as exc: if verbose: click.echo("Mock environment for connection definition " "'{}' will be rebuilt because it is not " "cacheable: {}.".format(connection_name, exc)) self._build_mockenv(server, file_path_list, verbose) def _build_mockenv(self, server, file_path_list, verbose): """ Build the mock environment from the input files. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. server (pywbem.WBEMServer): The server object for the mock connection. file_path_list (list of string): The path names of the input files for building the mock environment, from the connection definition. verbose (bool): Verbose flag from the command line. Raises: NotCacheable (py<3.5): Mock environment is not cacheable. MockMOFCompileError: Mock MOF file fails to compile. MockScriptError: Mock script fails to execute. SetupNotSupportedError (py<3.5): New-style setup in mock script not supported. """ for file_path in file_path_list: ext = os.path.splitext(file_path)[1] if ext == '.mof': try: # Displays any MOFParseError already self.compile_mof_file(file_path, verbose=verbose) except pywbem.Error as er: # Abort the entire pywbemcli command because the # MOF compilation might have caused inconsistencies in # the mock repository. if PYWBEM_VERSION.release >= (1, 0, 0): # display just the exception. msg = "MOF compile failed:\n{0}".format(er) else: # display file name. Error text displayed already. if isinstance(er, pywbem.MOFParseError): msg = "MOF compile failed: File: '{0}'" \ "(see above)".format(file_path) else: # not parse error, display exception msg = "MOF compile failed: File: {0} " \ "Error: {1}".format(file_path, er) new_exc = mockscripts.MockMOFCompileError(msg) new_exc.__cause__ = None raise new_exc else: assert ext == '.py' # already checked # May raise various mockscripts.MockError exceptions. # NotCacheable will be handled by the caller by building the # mock env. mockscripts.setup_script(file_path, self, server, verbose) def _dump_mockenv(self, mockenv_pickle_file): """ Dump the mock environment of the connection to the mockenv pickle file. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. mockenv_pickle_file (pywbem.WBEMServer): Path name of the mockenv pickle file. """ # Save the provider registry and the CIM repository # We construct a single object, because the CIM repository is # referenced from each provider, and pickle properly handles # multiple references to the same object. mockenv = dict( cimrepository=self.cimrepository, # pylint: disable=protected-access provider_registry=self._provider_registry, ) with io.open(mockenv_pickle_file, 'wb') as fp: pickle.dump(mockenv, fp) def _load_mockenv(self, mockenv_pickle_file, file_path_list): """ Load the mock environment from the mockenv pickle file. This method also imports the Python scripts from the input files in order to re-establish any class definitions that may be needed, for example provider classes. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. mockenv_pickle_file (pywbem.WBEMServer): Path name of the mockenv pickle file. file_path_list (list of string): The path names of the input files for building the mock environment, from the connection definition. Raises: NotCacheable (py<3.5): Mock environment is not cacheable. """ # Restore the provider classes for file_path in file_path_list: ext = os.path.splitext(file_path)[1] if ext == '.py': # May raise mockscripts.NotCacheable which will be handled by # the caller by building the mock env. mockscripts.import_script(file_path) # Restore the provider registry and the CIM repository with io.open(mockenv_pickle_file, 'rb') as fp: mockenv = pickle.load(fp) # Others have references to the self._cimrepository object, so we are # not replacing that object, but are rather replacing the state of # that object. cimrepository = mockenv['cimrepository'] assert isinstance(cimrepository, pywbem_mock.InMemoryRepository) # pylint: disable=protected-access self._cimrepository.load(cimrepository) provider_registry = mockenv['provider_registry'] assert isinstance(provider_registry, pywbem_mock.ProviderRegistry) # pylint: disable=protected-access self._provider_registry.load(provider_registry) @staticmethod def _dump_depreg(depreg, depreg_pickle_file): """ Dump a provider dependent registry to a pickle file. Parameters: depreg (pywbem_mock.ProviderDependentRegistry): Provider dependent registry to be dumped. depreg_pickle_file (string): Path name of the pickle file. """ with io.open(depreg_pickle_file, 'wb') as fp: pickle.dump(depreg, fp) @staticmethod def _load_depreg(depreg_pickle_file): """ Load a provider dependent registry from a pickle file and return it. Parameters: depreg_pickle_file (string): Path name of the pickle file to be loaded. Returns: pywbem_mock.ProviderDependentRegistry: Provider dependent registry. """ with io.open(depreg_pickle_file, 'rb') as fp: depreg = pickle.load(fp) return depreg class PYWBEMCLIConnection(pywbem.WBEMConnection, PYWBEMCLIConnectionMixin): """ PyWBEMCLIConnection subclass adds the methods added by PYWBEMCLIConnectionMixin """ def __init__(self, *args, **kwargs): """ ctor passes all input parameters to superclass """ super(PYWBEMCLIConnection, self).__init__(*args, **kwargs) class PYWBEMCLIFakedConnection(BuildMockenvMixin, PYWBEMCLIConnectionMixin, pywbem_mock.FakedWBEMConnection): """ PyWBEMCLIFakedConnection subclass adds the methods added by PYWBEMCLIConnectionMixin """ def __init__(self, *args, **kwargs): """ ctor passes all input parameters to superclass """ super(PYWBEMCLIFakedConnection, self).__init__(*args, **kwargs) def mockcache_rootdir(): """ Return the directory path of the mock cache root directory. """ dir_path = os.path.join(os.path.expanduser('~'), '.pywbemcli_mockcache') return dir_path def mockcache_cachedir(rootdir, connections_file, connection_name): """ Return the directory path of the mock cache directory for a connection. """ # Construct a (reproducible) cache ID from connections file path and # connection definition name. # Example: 6048a3da1a34a3ec605825a1493c7bb5.simple try: connections_file = os.path.relpath( connections_file, os.path.expanduser('~')) except ValueError: # On Windows, os.path.relpath() raises ValueError when the paths # are on different drives pass md5 = hashlib.md5() md5.update(connections_file.encode("utf-8")) cache_id = "{}.{}".format(md5.hexdigest(), connection_name) dir_path = os.path.join(rootdir, cache_id) return dir_path def delete_mock_cache(connections_file, connection_name): """ Delete the mock cache of the connection, if it exists. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. connections_file (string): Path name of the connections file. connection_name (string): The name of the connection definition in the connections file. Raises: OSError: Mock cache cannot be deleted. """ cache_dir = mockcache_cachedir( mockcache_rootdir(), connections_file, connection_name) if os.path.isdir(cache_dir): file_list = glob.glob(os.path.join(cache_dir, '*')) for _file in file_list: os.remove(_file) os.rmdir(cache_dir)
40.848052
80
0.601723
# (C) Copyright 2017 IBM Corp. # (C) Copyright 2017 Inova Development Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Mixin class that adds methods to WBEMConnection and FakeWBEMConnection for pywbemcli usage This contains only methods that use the iter<...> operations but also execute the complete iterations so that we can use these as common operations for pywbemcli instead of having to execute an algorithm of pull vs non-pull everywhere xa WBEMConnection possible pull operation is called. It also adds a method to FakeWBEMConnection to build the repository. """ from __future__ import absolute_import, print_function import os import io import errno import glob import hashlib import pickle import click import packaging.version import pywbem import pywbem_mock from .config import DEFAULT_MAXPULLCNT from .._utils import ensure_bytes, ensure_unicode, DEFAULT_CONNECTIONS_FILE from . import mockscripts PYWBEM_VERSION = packaging.version.parse(pywbem.__version__) # __all__ = ['PYWBEMCLIConnection', 'PYWBEMCLIFakedConnection'] # pylint: disable=useless-object-inheritance class PYWBEMCLIConnectionMixin(object): """ Mixin class to extend WBEMConnection with a set of methods that use the iter<...> methods as the basis for getting Instances, etc. but add the generator processing to retrieve the instances. These can be used within pywbemcli to allow one method call to ack as either a pull or traditional operation pushing the differences into this mixin. These methods do not resolve the core issues between the traditional and pull operations such as the fact that only the pull operations pass the FilterQuery parameter. They are a pywbemcli convience to simplify the individual action processing methods to a single call. """ def PyWbemcliEnumerateInstancePaths(self, ClassName, namespace=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterEnumerateInstancePaths and retrieve the instances. Returns the returned instances. Uses the same parameters as the IterEnumerateInstancePaths method. All exceptions from the underlying command are passed through this method. """ result = self.IterEnumerateInstancePaths( ClassName, namespace=namespace, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliEnumerateInstances(self, ClassName, namespace=None, LocalOnly=None, DeepInheritance=None, IncludeQualifiers=None, IncludeClassOrigin=None, PropertyList=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterEnumerateInstances and retrieve the instances. Returns the returned instances. Uses the same parameters as the IterEnumerateInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterEnumerateInstances( ClassName, namespace=namespace, LocalOnly=LocalOnly, DeepInheritance=DeepInheritance, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, PropertyList=PropertyList, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliReferenceInstancePaths(self, InstanceName, ResultClass=None, Role=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterReferemceInstancePaths and retrieve the instances. Returns the paths that result from iterating the IterReferenceInstancePaths. Uses the same parameters as the IterReferemceInstancePaths method. All exceptions from the underlying method are passed through this method. """ result = self.IterReferenceInstancePaths( InstanceName, ResultClass=ResultClass, Role=Role, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliReferenceInstances(self, InstanceName, ResultClass=None, Role=None, IncludeQualifiers=None, IncludeClassOrigin=None, PropertyList=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterReferencesInstances and retrieve the instances. Returns the returned instances. Uses the same parameters as the IterReferencesInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterReferenceInstances( InstanceName, ResultClass=ResultClass, Role=Role, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, PropertyList=PropertyList, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliAssociatorInstancePaths(self, InstanceName, AssocClass=None, ResultClass=None, Role=None, ResultRole=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterAssociatorInstancePaths and retrieve the paths. Returns the paths that result from iterating the IterAssociatorInstancePaths. Uses the same parameters as the IterAssociatorInstancePaths method. All exceptions from the underlying method are passed through this method. """ result = self.IterAssociatorInstancePaths( InstanceName, AssocClass=AssocClass, ResultClass=ResultClass, Role=Role, ResultRole=ResultRole, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliAssociatorInstances(self, InstanceName, AssocClass=None, ResultClass=None, Role=None, ResultRole=None, IncludeQualifiers=None, IncludeClassOrigin=None, PropertyList=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterAssociatorInstances and retrieve the instances. Returns the instances that result from iterating the IterAssociatorInstances. Uses the same parameters as the IterAssociatorInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterAssociatorInstances( InstanceName, AssocClass=AssocClass, ResultClass=ResultClass, Role=Role, ResultRole=ResultRole, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, PropertyList=PropertyList, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) def PyWbemcliQueryInstances(self, FilterQueryLanguage, FilterQuery, namespace=None, ReturnQueryResultClass=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_MAXPULLCNT, **extra): # pylint: disable=unused-argument # pylint: disable=invalid-name """ Execute IterQueryInstances and retrieve the instances. Returns the instances that result from iterating the IterQueryInstances. Uses the same parameters as the IterQueryInstances method. All exceptions from the underlying method are passed through this method. """ result = self.IterQueryInstances( FilterQueryLanguage, FilterQuery, namespace=namespace, ReturnQueryResultClass=ReturnQueryResultClass, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount) return list(result) class BuildMockenvMixin(object): # pylint: disable=too-few-public-methods """ Mixin class for pywbem_mock.FakedWBEMConnection that adds the ability to build the mock environment of a connection from a connection definition in a connections file. """ def build_mockenv(self, server, file_path_list, connections_file, connection_name, verbose): """ Builds the mock environment of the 'self' connection from the input files, or from the mock cache of the connection if it is up to date. If the mock environment was built from the input files, the mock environment of the connection is dumped to its cache. The input files for building the mock environment are: * MOF files with a suffix of '.mof'. These files are compiled into the default namespace of the connection. * Python files with a suffix of '.py'. These files are mock scripts that are imported and thereby executed. The mock scripts can be used for any kind of setup of the mock environment, for example for creating namespaces, for defining provider classes and registering providers, or for adding CIM objects either directly through add_cimobjects() or by compiling MOF files. Mock scripts support two approaches for passing the connection and server objects they should operate on: * via a setup() function defined in the mock script. This is the recommended approach, and it supports caching. The setup() function has the following parameters: conn (pywbem_mock.FakedWBEMConnection): The mock connection. server (pywbem.WBEMServer): The server object for the mock connection. verbose (bool): Verbose flag from the command line. * via global variables made available to the mock script. This approach prevents caching. The following global variables are made available: CONN (pywbem_mock.FakedWBEMConnection): The mock connection. SERVER (pywbem.WBEMServer): The server object for the mock connection. VERBOSE (bool): Verbose flag from the command line. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. server (pywbem.WBEMServer): The server object for the mock connection. file_path_list (list of string): The path names of the input files for building the mock environment, from the connection definition. connections_file (string): Path name of the connections file. connection_name (string): The name of the connection definition in the connections file. verbose (bool): Verbose flag from the command line. Raises: MockFileError: Mock file does not exist. MockMOFCompileError: Mock MOF file fails to compile. MockScriptError: Mock script fails to execute. SetupNotSupportedError (py<3.5): New-style setup in mock script not supported. """ # Check that the input files exist. Since we loop through them multiple # times, we check that once. for file_path in file_path_list: if not os.path.exists(file_path): raise mockscripts.MockFileError( "Mock file does not exist: {}".format(file_path)) # The connections file is set if a named connection is used, i.e. # when specifying the -n general option. It is not set when the -s or -m # general options were specified. When no connections file is set, no # caching happens because there is no connection definition context # which is required for caching. if connections_file == DEFAULT_CONNECTIONS_FILE: cache_rootdir = mockcache_rootdir() if not os.path.isdir(cache_rootdir): os.mkdir(cache_rootdir) cache_dir = mockcache_cachedir( cache_rootdir, connections_file, connection_name) if not os.path.isdir(cache_dir): os.mkdir(cache_dir) # The mockenv pickle file contains the pickled state of the mock # environment. mockenv_pickle_file = os.path.join(cache_dir, 'mockenv.pkl') # The depreg pickle file contains the provider dependents # registry of the connection. It is used to look up the dependent # files of a mock script. The content of these dependent files is # also taken into account when determining whether the cache is up # to date. This needs to go into a separate pickle file because # it needs to be loaded and examined before the mckenv pickle # file is loaded. depreg_pickle_file = os.path.join(cache_dir, 'depreg.pkl') # The md5 file contains the MD5 hash value of the content of the # input files for the mock environment, and also taken into account # when determining whether the cache is up to date. md5_file = os.path.join(cache_dir, 'mockfiles.md5') # Flag indicating that the mock environment needs to be built # (or re-built). If False, the mock environment cache can be used. need_rebuild = False # Determine whether the mock environment needs to be rebuilt based # on the (non-)existence of the cache files. if not os.path.isfile(mockenv_pickle_file) \ or not os.path.isfile(depreg_pickle_file) \ or not os.path.isfile(md5_file): if verbose: click.echo("Mock environment for connection definition " "'{}' will be built because it was not cached.". format(connection_name)) need_rebuild = True try: depreg = self._load_depreg(depreg_pickle_file) except (IOError, OSError) as exc: if exc.errno == errno.ENOENT: depreg = pywbem_mock.ProviderDependentRegistry() else: raise # Calculate the MD5 hash value of the content of the input files md5 = hashlib.md5() for file_path in file_path_list: with io.open(file_path, 'rb') as fp: file_source = fp.read() md5.update(file_source) # For mock scripts, take their dependent files into account if file_path.endswith('.py'): dep_files = depreg.iter_dependents(file_path) for dep_file in dep_files: with io.open(dep_file, 'rb') as fp: file_source = fp.read() md5.update(file_source) # Add the cache dir, so that manual tweaks on the cache files # invalidates the cache. md5.update(ensure_bytes(cache_dir)) new_md5_value = ensure_unicode(md5.hexdigest()) # Determine whether the mock environment needs to be rebuilt based # on the MD5 hash value of the input file content. if not need_rebuild: with io.open(md5_file, 'r', encoding='utf-8') as fp: cached_md5_value = fp.read() if new_md5_value != cached_md5_value: if verbose: click.echo("Mock environment for connection " "definition '{}' is cached but will be " "rebuilt because the mock files have " "changed.".format(connection_name)) need_rebuild = True cache_it = True elif connections_file: # User-specified connections file used. if verbose: click.echo("Mock environment for connection definition '{}' " "will be built because user-specified connections " "files are not cached.".format(connection_name)) need_rebuild = True cache_it = False else: # No connections file context. if verbose: click.echo("Mock environment for connection definition '{}' " "will be built because no connections file is " "known.".format(connection_name)) need_rebuild = True cache_it = False if need_rebuild: try: self._build_mockenv(server, file_path_list, verbose) except mockscripts.NotCacheable as exc: if verbose: click.echo("Mock environment for connection definition " "'{}' will be built because it is not " "cacheable: {}.".format(connection_name, exc)) else: if connections_file and cache_it: self._dump_mockenv(mockenv_pickle_file) self._dump_depreg( self.provider_dependent_registry, depreg_pickle_file) with io.open(md5_file, 'w', encoding='utf-8') as fp: fp.write(new_md5_value) if verbose: click.echo("Mock environment for connection " "definition '{}' has been written to " "cache.".format(connection_name)) else: # When no rebuild is needed, there must have been a connections # file set. assert connections_file try: self._load_mockenv(mockenv_pickle_file, file_path_list) if verbose: click.echo("Mock environment for connection definition " "'{}' has been loaded from cache.". format(connection_name)) except mockscripts.NotCacheable as exc: if verbose: click.echo("Mock environment for connection definition " "'{}' will be rebuilt because it is not " "cacheable: {}.".format(connection_name, exc)) self._build_mockenv(server, file_path_list, verbose) def _build_mockenv(self, server, file_path_list, verbose): """ Build the mock environment from the input files. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. server (pywbem.WBEMServer): The server object for the mock connection. file_path_list (list of string): The path names of the input files for building the mock environment, from the connection definition. verbose (bool): Verbose flag from the command line. Raises: NotCacheable (py<3.5): Mock environment is not cacheable. MockMOFCompileError: Mock MOF file fails to compile. MockScriptError: Mock script fails to execute. SetupNotSupportedError (py<3.5): New-style setup in mock script not supported. """ for file_path in file_path_list: ext = os.path.splitext(file_path)[1] if ext == '.mof': try: # Displays any MOFParseError already self.compile_mof_file(file_path, verbose=verbose) except pywbem.Error as er: # Abort the entire pywbemcli command because the # MOF compilation might have caused inconsistencies in # the mock repository. if PYWBEM_VERSION.release >= (1, 0, 0): # display just the exception. msg = "MOF compile failed:\n{0}".format(er) else: # display file name. Error text displayed already. if isinstance(er, pywbem.MOFParseError): msg = "MOF compile failed: File: '{0}'" \ "(see above)".format(file_path) else: # not parse error, display exception msg = "MOF compile failed: File: {0} " \ "Error: {1}".format(file_path, er) new_exc = mockscripts.MockMOFCompileError(msg) new_exc.__cause__ = None raise new_exc else: assert ext == '.py' # already checked # May raise various mockscripts.MockError exceptions. # NotCacheable will be handled by the caller by building the # mock env. mockscripts.setup_script(file_path, self, server, verbose) def _dump_mockenv(self, mockenv_pickle_file): """ Dump the mock environment of the connection to the mockenv pickle file. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. mockenv_pickle_file (pywbem.WBEMServer): Path name of the mockenv pickle file. """ # Save the provider registry and the CIM repository # We construct a single object, because the CIM repository is # referenced from each provider, and pickle properly handles # multiple references to the same object. mockenv = dict( cimrepository=self.cimrepository, # pylint: disable=protected-access provider_registry=self._provider_registry, ) with io.open(mockenv_pickle_file, 'wb') as fp: pickle.dump(mockenv, fp) def _load_mockenv(self, mockenv_pickle_file, file_path_list): """ Load the mock environment from the mockenv pickle file. This method also imports the Python scripts from the input files in order to re-establish any class definitions that may be needed, for example provider classes. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. mockenv_pickle_file (pywbem.WBEMServer): Path name of the mockenv pickle file. file_path_list (list of string): The path names of the input files for building the mock environment, from the connection definition. Raises: NotCacheable (py<3.5): Mock environment is not cacheable. """ # Restore the provider classes for file_path in file_path_list: ext = os.path.splitext(file_path)[1] if ext == '.py': # May raise mockscripts.NotCacheable which will be handled by # the caller by building the mock env. mockscripts.import_script(file_path) # Restore the provider registry and the CIM repository with io.open(mockenv_pickle_file, 'rb') as fp: mockenv = pickle.load(fp) # Others have references to the self._cimrepository object, so we are # not replacing that object, but are rather replacing the state of # that object. cimrepository = mockenv['cimrepository'] assert isinstance(cimrepository, pywbem_mock.InMemoryRepository) # pylint: disable=protected-access self._cimrepository.load(cimrepository) provider_registry = mockenv['provider_registry'] assert isinstance(provider_registry, pywbem_mock.ProviderRegistry) # pylint: disable=protected-access self._provider_registry.load(provider_registry) @staticmethod def _dump_depreg(depreg, depreg_pickle_file): """ Dump a provider dependent registry to a pickle file. Parameters: depreg (pywbem_mock.ProviderDependentRegistry): Provider dependent registry to be dumped. depreg_pickle_file (string): Path name of the pickle file. """ with io.open(depreg_pickle_file, 'wb') as fp: pickle.dump(depreg, fp) @staticmethod def _load_depreg(depreg_pickle_file): """ Load a provider dependent registry from a pickle file and return it. Parameters: depreg_pickle_file (string): Path name of the pickle file to be loaded. Returns: pywbem_mock.ProviderDependentRegistry: Provider dependent registry. """ with io.open(depreg_pickle_file, 'rb') as fp: depreg = pickle.load(fp) return depreg class PYWBEMCLIConnection(pywbem.WBEMConnection, PYWBEMCLIConnectionMixin): """ PyWBEMCLIConnection subclass adds the methods added by PYWBEMCLIConnectionMixin """ def __init__(self, *args, **kwargs): """ ctor passes all input parameters to superclass """ super(PYWBEMCLIConnection, self).__init__(*args, **kwargs) class PYWBEMCLIFakedConnection(BuildMockenvMixin, PYWBEMCLIConnectionMixin, pywbem_mock.FakedWBEMConnection): """ PyWBEMCLIFakedConnection subclass adds the methods added by PYWBEMCLIConnectionMixin """ def __init__(self, *args, **kwargs): """ ctor passes all input parameters to superclass """ super(PYWBEMCLIFakedConnection, self).__init__(*args, **kwargs) def mockcache_rootdir(): """ Return the directory path of the mock cache root directory. """ dir_path = os.path.join(os.path.expanduser('~'), '.pywbemcli_mockcache') return dir_path def mockcache_cachedir(rootdir, connections_file, connection_name): """ Return the directory path of the mock cache directory for a connection. """ # Construct a (reproducible) cache ID from connections file path and # connection definition name. # Example: 6048a3da1a34a3ec605825a1493c7bb5.simple try: connections_file = os.path.relpath( connections_file, os.path.expanduser('~')) except ValueError: # On Windows, os.path.relpath() raises ValueError when the paths # are on different drives pass md5 = hashlib.md5() md5.update(connections_file.encode("utf-8")) cache_id = "{}.{}".format(md5.hexdigest(), connection_name) dir_path = os.path.join(rootdir, cache_id) return dir_path def delete_mock_cache(connections_file, connection_name): """ Delete the mock cache of the connection, if it exists. Parameters: self (pywbem_mock.FakedWBEMConnection): The mock connection. connections_file (string): Path name of the connections file. connection_name (string): The name of the connection definition in the connections file. Raises: OSError: Mock cache cannot be deleted. """ cache_dir = mockcache_cachedir( mockcache_rootdir(), connections_file, connection_name) if os.path.isdir(cache_dir): file_list = glob.glob(os.path.join(cache_dir, '*')) for _file in file_list: os.remove(_file) os.rmdir(cache_dir)
0
0
0
3ccfaab268fd992dee668ab9c0ed10bb59ba6a5f
449
py
Python
setup.py
vallsv/atop_raw
531e9c6749f199a55dc3138c73310a5105da7b7f
[ "MIT" ]
null
null
null
setup.py
vallsv/atop_raw
531e9c6749f199a55dc3138c73310a5105da7b7f
[ "MIT" ]
null
null
null
setup.py
vallsv/atop_raw
531e9c6749f199a55dc3138c73310a5105da7b7f
[ "MIT" ]
null
null
null
from setuptools import setup packages = ["atop_raw", "atop_raw.headers"] install_requires = ["numpy"] extras_require = {"pycstruct": ["pycstruct >= 0.9"]} package_data = {"atop_raw.headers": ["*.h"]} setup( name="atop_raw", version="2", packages=packages, extras_require=extras_require, package_data=package_data, license="MIT", description="Reader of raw files from atop", install_requires=install_requires, )
20.409091
52
0.688196
from setuptools import setup packages = ["atop_raw", "atop_raw.headers"] install_requires = ["numpy"] extras_require = {"pycstruct": ["pycstruct >= 0.9"]} package_data = {"atop_raw.headers": ["*.h"]} setup( name="atop_raw", version="2", packages=packages, extras_require=extras_require, package_data=package_data, license="MIT", description="Reader of raw files from atop", install_requires=install_requires, )
0
0
0
a37b60dd8042c70ef8a412bcfc9242934135b726
3,461
py
Python
apps/data_taking_scripts/2016-06-jpl-hex-271/sweep_and_stream_sloppy.py
danielflanigan/kid_readout
07202090d468669200cab78297122880c1c03e87
[ "BSD-2-Clause" ]
null
null
null
apps/data_taking_scripts/2016-06-jpl-hex-271/sweep_and_stream_sloppy.py
danielflanigan/kid_readout
07202090d468669200cab78297122880c1c03e87
[ "BSD-2-Clause" ]
null
null
null
apps/data_taking_scripts/2016-06-jpl-hex-271/sweep_and_stream_sloppy.py
danielflanigan/kid_readout
07202090d468669200cab78297122880c1c03e87
[ "BSD-2-Clause" ]
null
null
null
import time import numpy as np from equipment.custom import mmwave_source from equipment.srs import lockin from equipment.hittite import signal_generator from kid_readout.interactive import * from kid_readout.equipment import hardware from kid_readout.measurement import acquire from kid_readout.roach import analog # fg = FunctionGenerator() hittite = signal_generator.Hittite(ipaddr='192.168.0.200') hittite.set_power(0) hittite.on() lockin = lockin.Lockin(LOCKIN_SERIAL_PORT) tic = time.time() print lockin.identification source = mmwave_source.MMWaveSource() source.set_attenuator_turns(3.0,3.0) source.multiplier_input = 'hittite' source.waveguide_twist_angle = 45 source.ttl_modulation_source = 'roach' ifboard = analog.HeterodyneMarkI() setup = hardware.Hardware(source, lockin, ifboard,hittite) setup.hittite.set_freq(148e9/12.) ri = hardware_tools.r2_with_mk1() ri.set_fft_gain(8) #initial_f0s = np.load('/data/readout/resonances/2016-06-18-jpl-hex-271-32-high-qi-lo-1210-resonances.npy') #initial_f0s = initial_f0s/1e6 initial_lo = 1210. bbtones = np.linspace(5,220,256) initial_f0s = np.hstack((initial_lo-bbtones-0.2,initial_lo+bbtones)) initial_f0s.sort() nsamp = 2**15 step = 1 nstep = 24 offset_bins = np.arange(-(nstep + 1), (nstep + 1)) * step offsets = offset_bins * 512.0 / nsamp print (initial_f0s[1]-initial_f0s[0])*1e6, offsets.ptp() for (lo,f0s) in [(initial_lo,initial_f0s)]: ri.set_lo(lo) for dac_atten in [0]: ncf = new_nc_file(suffix='off_on_cw_%d_dB_dac' % dac_atten) ri.set_modulation_output('high') swpa = acquire.run_sweep(ri, tone_banks=f0s[None,:] + offsets[:,None], num_tone_samples=nsamp, length_seconds=0.5, state=setup.state(), verbose=True, description='source off sweep') print "resonance sweep done", (time.time()-tic)/60. ncf.write(swpa) #print "sweep written", (time.time()-tic)/60. current_f0s = [] for sidx in range(f0s.shape[0]): swp = swpa.sweep(sidx) res = swp.resonator print res.f_0, res.Q, res.current_result.redchi, (f0s[sidx]*1e6-res.f_0) if np.abs(res.f_0 - f0s[sidx]*1e6) > 0.9*(initial_f0s[1]-initial_f0s[0])*1e6: current_f0s.append(f0s[sidx]*1e6) print "using original frequency for ",f0s[sidx] else: current_f0s.append(res.f_0) print "fits complete", (time.time()-tic)/60. current_f0s = np.array(current_f0s)/1e6 current_f0s.sort() if np.any(np.diff(current_f0s)<0.25): print "problematic resonator collision:",current_f0s print "deltas:",np.diff(current_f0s) ri.set_tone_freqs(current_f0s,nsamp) ri.select_fft_bins(range(f0s.shape[0])) meas = ri.get_measurement(num_seconds=30., state=setup.state(),description='source off stream') ncf.write(meas) ri.set_modulation_output('low') meas = ri.get_measurement(num_seconds=30., state=setup.state(),description='source on stream') ncf.write(meas) ri.set_modulation_output(7) time.sleep(1) # wait for source modulation to stabilize meas = ri.get_measurement(num_seconds=4., state=setup.state(),description='source modulated stream') ncf.write(meas) print "dac_atten %f done in %.1f minutes" % (dac_atten, (time.time()-tic)/60.) ncf.close()
37.215054
108
0.675527
import time import numpy as np from equipment.custom import mmwave_source from equipment.srs import lockin from equipment.hittite import signal_generator from kid_readout.interactive import * from kid_readout.equipment import hardware from kid_readout.measurement import acquire from kid_readout.roach import analog # fg = FunctionGenerator() hittite = signal_generator.Hittite(ipaddr='192.168.0.200') hittite.set_power(0) hittite.on() lockin = lockin.Lockin(LOCKIN_SERIAL_PORT) tic = time.time() print lockin.identification source = mmwave_source.MMWaveSource() source.set_attenuator_turns(3.0,3.0) source.multiplier_input = 'hittite' source.waveguide_twist_angle = 45 source.ttl_modulation_source = 'roach' ifboard = analog.HeterodyneMarkI() setup = hardware.Hardware(source, lockin, ifboard,hittite) setup.hittite.set_freq(148e9/12.) ri = hardware_tools.r2_with_mk1() ri.set_fft_gain(8) #initial_f0s = np.load('/data/readout/resonances/2016-06-18-jpl-hex-271-32-high-qi-lo-1210-resonances.npy') #initial_f0s = initial_f0s/1e6 initial_lo = 1210. bbtones = np.linspace(5,220,256) initial_f0s = np.hstack((initial_lo-bbtones-0.2,initial_lo+bbtones)) initial_f0s.sort() nsamp = 2**15 step = 1 nstep = 24 offset_bins = np.arange(-(nstep + 1), (nstep + 1)) * step offsets = offset_bins * 512.0 / nsamp print (initial_f0s[1]-initial_f0s[0])*1e6, offsets.ptp() for (lo,f0s) in [(initial_lo,initial_f0s)]: ri.set_lo(lo) for dac_atten in [0]: ncf = new_nc_file(suffix='off_on_cw_%d_dB_dac' % dac_atten) ri.set_modulation_output('high') swpa = acquire.run_sweep(ri, tone_banks=f0s[None,:] + offsets[:,None], num_tone_samples=nsamp, length_seconds=0.5, state=setup.state(), verbose=True, description='source off sweep') print "resonance sweep done", (time.time()-tic)/60. ncf.write(swpa) #print "sweep written", (time.time()-tic)/60. current_f0s = [] for sidx in range(f0s.shape[0]): swp = swpa.sweep(sidx) res = swp.resonator print res.f_0, res.Q, res.current_result.redchi, (f0s[sidx]*1e6-res.f_0) if np.abs(res.f_0 - f0s[sidx]*1e6) > 0.9*(initial_f0s[1]-initial_f0s[0])*1e6: current_f0s.append(f0s[sidx]*1e6) print "using original frequency for ",f0s[sidx] else: current_f0s.append(res.f_0) print "fits complete", (time.time()-tic)/60. current_f0s = np.array(current_f0s)/1e6 current_f0s.sort() if np.any(np.diff(current_f0s)<0.25): print "problematic resonator collision:",current_f0s print "deltas:",np.diff(current_f0s) ri.set_tone_freqs(current_f0s,nsamp) ri.select_fft_bins(range(f0s.shape[0])) meas = ri.get_measurement(num_seconds=30., state=setup.state(),description='source off stream') ncf.write(meas) ri.set_modulation_output('low') meas = ri.get_measurement(num_seconds=30., state=setup.state(),description='source on stream') ncf.write(meas) ri.set_modulation_output(7) time.sleep(1) # wait for source modulation to stabilize meas = ri.get_measurement(num_seconds=4., state=setup.state(),description='source modulated stream') ncf.write(meas) print "dac_atten %f done in %.1f minutes" % (dac_atten, (time.time()-tic)/60.) ncf.close()
0
0
0
3d98bc5392758f5ff781304dc25462056d321cef
11,674
py
Python
modeemintternet/models.py
modeemi/intternetvelho
f63027d6d845df4ed54516931623a5b7d501297f
[ "BSD-4-Clause" ]
4
2016-03-28T18:09:54.000Z
2018-09-19T16:47:33.000Z
modeemintternet/models.py
modeemi/intternetvelho
f63027d6d845df4ed54516931623a5b7d501297f
[ "BSD-4-Clause" ]
270
2015-04-03T21:42:45.000Z
2019-08-05T17:22:27.000Z
modeemintternet/models.py
modeemi/website
dc1710bbc5495fc525b975c9a3b5026a9c341868
[ "BSD-4-Clause" ]
4
2015-04-03T21:39:58.000Z
2019-05-01T19:30:21.000Z
from logging import getLogger from re import match from time import time from django.conf import settings from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.core.validators import MinValueValidator, MaxValueValidator from django.db import models, transaction from django.urls import reverse from django.utils.timezone import now from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt log = getLogger(__name__) # Existing modeemiuserdb models that have been created manually and previously handled by the custom database router. # Feel free to rename the models, but don't rename db_table values or field names. # Do not change model properties unless you know what you are doing, they are used by other programs.
30.401042
117
0.653161
from logging import getLogger from re import match from time import time from django.conf import settings from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.core.validators import MinValueValidator, MaxValueValidator from django.db import models, transaction from django.urls import reverse from django.utils.timezone import now from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt log = getLogger(__name__) def validate_username(username): User = get_user_model() if not match(r"^[a-z]+$", username): raise ValidationError("Käyttäjätunnuksen pitää koostua pienistä kirjaimista.") try: if ( Passwd.objects.filter(username__iexact=username).exists() or User.objects.filter(username__iexact=username).exists() ): raise ValidationError("Käyttäjätunnus ei ole saatavilla.") except Exception as e: log.exception("Error in querying passwd objects", exc_info=e) class News(models.Model): title = models.TextField(blank=False) text = models.TextField() location = models.TextField(blank=True) lat = models.FloatField(default=0.0) lon = models.FloatField(default=0.0) starts = models.DateTimeField(blank=True, null=True) ends = models.DateTimeField(blank=True, null=True) posted = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) poster = models.ForeignKey( settings.AUTH_USER_MODEL, editable=False, null=True, on_delete=models.PROTECT ) class Meta: verbose_name = "Uutinen" verbose_name_plural = "Uutiset" def __str__(self): return "{0} (luotu {1} UTC)".format(self.title, self.posted) def get_absolute_url(self): return reverse("news", args=[self.id]) class Soda(models.Model): name = models.CharField(max_length=128) price = models.DecimalField(max_digits=3, decimal_places=2) active = models.BooleanField(default=True) class Meta: verbose_name = "Limu" verbose_name_plural = "Limut" def __str__(self): return "{0}".format(self.name) class MembershipFee(models.Model): year = models.PositiveIntegerField( primary_key=True, unique=True, validators=[MinValueValidator(1975), MaxValueValidator(now().year + 1)], verbose_name="Vuosi", ) def __str__(self) -> str: return str(self.year) class Meta: ordering = ["-year"] get_latest_by = ["year"] verbose_name = "Jäsenmaksu" verbose_name_plural = "Jäsenmaksut" class Membership(models.Model): user = models.OneToOneField( settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name="Käyttäjä" ) fee = models.ManyToManyField(MembershipFee, blank=True, verbose_name="Jäsenmaksut") lifetime = models.BooleanField(default=False, verbose_name="Ainaisjäsenyys") municipality = models.CharField( max_length=128, blank=True, default="", verbose_name="Kotipaikka" ) key_engineering = models.BooleanField( default=False, verbose_name="Konehuoneen kova-avain" ) key_physical = models.BooleanField( default=False, verbose_name="Kerhohuoneen kova-avain" ) key_virtual = models.BooleanField( default=False, verbose_name="Kerhohuoneen virtuaaliavain" ) def __str__(self) -> str: return self.user.username def get_fee(self) -> str: if self.lifetime: return "Ainaisjäsen" try: return str(self.fee.latest()) except MembershipFee.DoesNotExist: return "" get_fee.short_description = "Jäsenmaksu" # type: ignore def get_keys(self) -> str: keys = [] if self.key_engineering: keys.append("kovo") if self.key_physical: keys.append("kerhohuone") if self.key_virtual: keys.append("virtuaalinen") return ", ".join(keys).capitalize() get_keys.short_description = "Avaimet" # type: ignore class Meta: ordering = ["user__username"] verbose_name = "Jäsenyys" verbose_name_plural = "Jäsenyydet" class Application(models.Model): class Shell: BASH = "/bin/bash" SH = "/bin/sh" ZSH = "/bin/zsh" TCSH = "/bin/tcsh" FALSE = "/bin/false" CHOICES = ((SH, SH), (BASH, BASH), (ZSH, ZSH), (TCSH, TCSH), (FALSE, FALSE)) # Actual application options first_name = models.CharField(max_length=32) last_name = models.CharField(max_length=32) email = models.EmailField() municipality = models.CharField(max_length=64) username = models.CharField(max_length=32, validators=[validate_username]) shell = models.CharField(max_length=32, choices=Shell.CHOICES, default=Shell.BASH) funet_rules_accepted = models.BooleanField(default=False) virtual_key_required = models.BooleanField(default=False) # Timestamps applied = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) # Password hashes sha512_crypt = models.CharField(max_length=128) sha256_crypt = models.CharField(max_length=128) md5_crypt = models.CharField(max_length=128) # Processing status application_accepted = models.BooleanField(default=False) application_rejected = models.BooleanField(default=False) application_processed = models.BooleanField(default=False) class Meta: verbose_name = "Hakemus" verbose_name_plural = "Hakemukset" def __str__(self): return "{0} {1} ({2})".format(self.first_name, self.last_name, self.applied) def generate_password_hashes(self, password): """ Refer to passlib documentation for adding new hashers: https://pythonhosted.org/passlib/lib/passlib.hash.html """ self.sha512_crypt = sha512_crypt.hash(password) self.sha256_crypt = sha256_crypt.hash(password) self.md5_crypt = md5_crypt.hash(password) self.save() @transaction.atomic def accept(self): if self.application_processed: raise ValidationError( "Application {} has already been accepted".format(self.username) ) User = get_user_model() user = User.objects.create( username=self.username, email=self.email, first_name=self.first_name, last_name=self.last_name, ) Membership.objects.create(user=user, municipality=self.municipality) group = UserGroup.objects.get(groupname="modeemi") passwd = Passwd.objects.create( username=self.username, uid=Passwd.get_free_uid(), gid=group.gid, gecos="{} {}".format(self.first_name, self.last_name), home="/home/{}".format(self.username), shell=self.shell, ) UserGroupMember.objects.create(username=passwd, groupname=group) Shadow.objects.create(username=passwd, lastchanged=int(time()) // 86400, min=0) for f in Format.objects.all(): h = { "SHA512": self.sha512_crypt, "SHA256": self.sha256_crypt, "MD5": self.md5_crypt, }.get(f.format, None) if h: ShadowFormat.objects.create( username=passwd, format=f, hash=h, last_updated=now() ) if self.virtual_key_required: group = UserGroup.objects.get(groupname="ovi") UserGroupMember.objects.create(username=passwd, groupname=group) self.application_accepted = True self.application_processed = True self.save() @transaction.atomic def reject(self): if self.application_processed: raise ValidationError( "Application {} has already been rejected".format(self.username) ) self.application_rejected = True self.application_processed = True self.save() class Feedback(models.Model): sender = models.CharField(blank=True, max_length=64) email = models.EmailField(blank=True) message = models.TextField(blank=False) sent = models.DateTimeField(auto_now_add=True) class Meta: verbose_name = "Palaute" verbose_name_plural = "Palautteet" def __str__(self): return "{0} ({1})".format( self.message[:25], self.sent # pylint: disable=unsubscriptable-object ) # Existing modeemiuserdb models that have been created manually and previously handled by the custom database router. # Feel free to rename the models, but don't rename db_table values or field names. # Do not change model properties unless you know what you are doing, they are used by other programs. class Format(models.Model): format = models.CharField(primary_key=True, max_length=32, db_index=False) description = models.TextField(default="") class Meta: db_table = "format" class Passwd(models.Model): username = models.CharField(primary_key=True, max_length=64, db_index=False) uid = models.IntegerField() gid = models.IntegerField() gecos = models.CharField(max_length=255) home = models.CharField(max_length=255) shell = models.CharField(max_length=255, choices=Application.Shell.CHOICES) @staticmethod def get_free_uid(): return 1 + Passwd.objects.order_by("uid").last().uid class Meta: db_table = "passwd" class Shadow(models.Model): username = models.OneToOneField( Passwd, db_index=False, db_constraint=False, db_column="username", on_delete=models.PROTECT, primary_key=True, ) lastchanged = models.IntegerField() min = models.IntegerField(default=0) max = models.IntegerField(blank=True, null=True) warn = models.IntegerField(blank=True, null=True) inact = models.IntegerField(blank=True, null=True) expire = models.IntegerField(blank=True, null=True) flags = models.IntegerField(blank=True, null=True) class Meta: db_table = "shadow" class ShadowFormat(models.Model): username = models.ForeignKey( Passwd, db_index=False, db_constraint=False, db_column="username", on_delete=models.PROTECT, ) format = models.ForeignKey( Format, db_index=False, db_constraint=False, db_column="format", on_delete=models.PROTECT, ) hash = models.CharField(max_length=1024) last_updated = models.DateTimeField(default=now) class Meta: db_table = "shadowformat" constraints = [ models.UniqueConstraint( fields=["username", "format"], name="shadowformat_username_key" ) ] class UserGroup(models.Model): groupname = models.CharField(primary_key=True, db_index=False, max_length=64) gid = models.IntegerField() class Meta: db_table = "usergroup" constraints = [ models.UniqueConstraint(fields=["gid"], name="usergroup_gid_key") ] class UserGroupMember(models.Model): groupname = models.ForeignKey( UserGroup, db_constraint=False, db_index=False, db_column="groupname", on_delete=models.PROTECT, ) username = models.ForeignKey( Passwd, db_constraint=False, db_index=False, db_column="username", on_delete=models.PROTECT, ) class Meta: db_table = "usergroupmember"
3,381
7,216
299
ca5caf08089302776d94055759a08bf41fcee900
1,535
py
Python
pokemon_project_python_201.py
radekwilk/pokemons-python201-project
38fdd7153d15c51da552bf4e0490ec7f881de5ad
[ "MIT" ]
null
null
null
pokemon_project_python_201.py
radekwilk/pokemons-python201-project
38fdd7153d15c51da552bf4e0490ec7f881de5ad
[ "MIT" ]
null
null
null
pokemon_project_python_201.py
radekwilk/pokemons-python201-project
38fdd7153d15c51da552bf4e0490ec7f881de5ad
[ "MIT" ]
null
null
null
# 1. Ask for user input # 2. Create a dynamic URL based on step 1 # 3. Fetch data from using url # 4. Convert json to dictionary # 5. Print Pokemon data import requests while True: # getting Pokemon name from the user user_pokemon_input = input( 'Please type name of pokemon you want to learn about? ') # getting url with a name entered by user pokemon_url = f'https://pokeapi.co/api/v2/pokemon/{user_pokemon_input.lower()}/' req = requests.get(pokemon_url) abilities = {} ability_name = [] # checking if entered Pokemon name exists if req.status_code != 200: print('Incorrect Pokemon name, try again!') continue # if entered name exists, we get data and display some information about that Pokemon else: pokemon_character = req.json() pokemon_name = pokemon_character['name'].capitalize() print("******* DETAILS ABOUT POKEMON ******") print(f"Entered name: {pokemon_name}") print(f"{pokemon_name} weight: {pokemon_character['weight']}") print(f"{pokemon_name} height: {pokemon_character['height']}") # getting list of Pokemin abilities for value in pokemon_character['abilities']: # appending ability_name list ability_name.append(value['ability']['name']) # print(f"List of {pokemon_name} abilities: {ability_name}\n") print(f"List of {pokemon_name} abilities:") for index, ability in enumerate(ability_name): print(f'{index + 1}. {ability}') break
37.439024
89
0.658632
# 1. Ask for user input # 2. Create a dynamic URL based on step 1 # 3. Fetch data from using url # 4. Convert json to dictionary # 5. Print Pokemon data import requests while True: # getting Pokemon name from the user user_pokemon_input = input( 'Please type name of pokemon you want to learn about? ') # getting url with a name entered by user pokemon_url = f'https://pokeapi.co/api/v2/pokemon/{user_pokemon_input.lower()}/' req = requests.get(pokemon_url) abilities = {} ability_name = [] # checking if entered Pokemon name exists if req.status_code != 200: print('Incorrect Pokemon name, try again!') continue # if entered name exists, we get data and display some information about that Pokemon else: pokemon_character = req.json() pokemon_name = pokemon_character['name'].capitalize() print("******* DETAILS ABOUT POKEMON ******") print(f"Entered name: {pokemon_name}") print(f"{pokemon_name} weight: {pokemon_character['weight']}") print(f"{pokemon_name} height: {pokemon_character['height']}") # getting list of Pokemin abilities for value in pokemon_character['abilities']: # appending ability_name list ability_name.append(value['ability']['name']) # print(f"List of {pokemon_name} abilities: {ability_name}\n") print(f"List of {pokemon_name} abilities:") for index, ability in enumerate(ability_name): print(f'{index + 1}. {ability}') break
0
0
0
efd7a81ad423c38bb92b509ca7e1613355d987e7
69
py
Python
notebook/book/python/Learn-OOP-with-Python/Chapter-2/python_init_test/module_1/say.py
JMwill/note
30e931f18c9ba942f5e5040b524047a996cf0c6c
[ "MIT" ]
null
null
null
notebook/book/python/Learn-OOP-with-Python/Chapter-2/python_init_test/module_1/say.py
JMwill/note
30e931f18c9ba942f5e5040b524047a996cf0c6c
[ "MIT" ]
2
2018-11-27T10:45:45.000Z
2018-12-13T14:44:54.000Z
notebook/book/python/Learn-OOP-with-Python/Chapter-2/python_init_test/module_2/say.py
JMwill/note
30e931f18c9ba942f5e5040b524047a996cf0c6c
[ "MIT" ]
null
null
null
if __name__ == '__main__': say()
11.5
26
0.57971
def say(): print(__name__) if __name__ == '__main__': say()
9
0
22
b864beec013df48447b4dba70c53b9227d007099
59
py
Python
gunicorn_config.py
DS4A-Team19-2021/Agustin-Codazzi-Project
1fbfb093c013a2f840d586754fdd17b5bee9ea24
[ "MIT" ]
1
2021-09-22T00:31:27.000Z
2021-09-22T00:31:27.000Z
gunicorn_config.py
DS4A-Team19-2021/Agustin-Codazzi-Project
1fbfb093c013a2f840d586754fdd17b5bee9ea24
[ "MIT" ]
null
null
null
gunicorn_config.py
DS4A-Team19-2021/Agustin-Codazzi-Project
1fbfb093c013a2f840d586754fdd17b5bee9ea24
[ "MIT" ]
null
null
null
bind = "0.0.0.0:8085" workers = 2 threads = 2 timeout = 120
14.75
21
0.644068
bind = "0.0.0.0:8085" workers = 2 threads = 2 timeout = 120
0
0
0
a37feba7f51aa7fb106c5bc7b7baf3ebca747a23
1,994
py
Python
src/common_utils_data/jupyter_functions.py
Mi524/common_utils_data
400baf3be1bc96a06bd1f3d70abaf8dd749cfb85
[ "MIT" ]
null
null
null
src/common_utils_data/jupyter_functions.py
Mi524/common_utils_data
400baf3be1bc96a06bd1f3d70abaf8dd749cfb85
[ "MIT" ]
null
null
null
src/common_utils_data/jupyter_functions.py
Mi524/common_utils_data
400baf3be1bc96a06bd1f3d70abaf8dd749cfb85
[ "MIT" ]
null
null
null
import re import nbformat from nbconvert import PythonExporter import warnings warnings.filterwarnings('ignore') def nbconverter(notebookPath,directionPath=None): """method name is the same as jupyter default converter name : Nbconvert(notebookPath,directionPath) :param notebookPath: source path of the ipynb file you want to convert :param direction Path : direction path of the formatted .py file """ try: with open(notebookPath,'r',encoding='utf-8') as fh: nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT) except FileNotFoundError: if '.ipynb' not in notebookPath: notebookPath += '.ipynb' with open(notebookPath,'r',encoding='utf-8') as fh: nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT) if directionPath == None: directionPath = notebookPath.replace('.ipynb','.py') pattern_input = r'# In\[[\d\s]*\]:' pattern_comment = '^#.+' pattern_variables = r'^[a-zA-Z0-9_]+\[?[ |0-9]*\]? *$' pattern_square_bracket = r'^\[.*\]$' pattern_string = r"^[\'|\"].*[\'|\"] *$" pattern_number = r'^\d+ *[\+|\-|\*|\/]? *\d* *$' exporter = PythonExporter() source, meta = exporter.from_notebook_node(nb) source = source.split('\n')[2:] source = [ t for t in source if t and not re.match(pattern_input,t)] source = [ '\n' + t if re.match(pattern_comment,t) else t for t in source ] source = [re.sub(pattern=pattern_variables,repl=print_match,string=t) for t in source] source = [re.sub(pattern=pattern_square_bracket,repl=print_match,string=t) for t in source] source = [re.sub(pattern=pattern_string,repl=print_match,string=t) for t in source] source = '\n'.join(source) + '\n\n' with open(directionPath, 'w+',encoding='utf-8') as fh: fh.write(source) print('{} has been saved'.format(directionPath)) nbconverter(r"C:\Users\Administrator.DG-11030335\Scripts\voc_alarm\外销意见反馈预警监控\convert2alarm.ipynb")
37.622642
99
0.679539
import re import nbformat from nbconvert import PythonExporter import warnings warnings.filterwarnings('ignore') def nbconverter(notebookPath,directionPath=None): """method name is the same as jupyter default converter name : Nbconvert(notebookPath,directionPath) :param notebookPath: source path of the ipynb file you want to convert :param direction Path : direction path of the formatted .py file """ try: with open(notebookPath,'r',encoding='utf-8') as fh: nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT) except FileNotFoundError: if '.ipynb' not in notebookPath: notebookPath += '.ipynb' with open(notebookPath,'r',encoding='utf-8') as fh: nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT) if directionPath == None: directionPath = notebookPath.replace('.ipynb','.py') pattern_input = r'# In\[[\d\s]*\]:' pattern_comment = '^#.+' pattern_variables = r'^[a-zA-Z0-9_]+\[?[ |0-9]*\]? *$' pattern_square_bracket = r'^\[.*\]$' pattern_string = r"^[\'|\"].*[\'|\"] *$" pattern_number = r'^\d+ *[\+|\-|\*|\/]? *\d* *$' def print_match(matched): return 'print({})'.format(matched.group().strip()) exporter = PythonExporter() source, meta = exporter.from_notebook_node(nb) source = source.split('\n')[2:] source = [ t for t in source if t and not re.match(pattern_input,t)] source = [ '\n' + t if re.match(pattern_comment,t) else t for t in source ] source = [re.sub(pattern=pattern_variables,repl=print_match,string=t) for t in source] source = [re.sub(pattern=pattern_square_bracket,repl=print_match,string=t) for t in source] source = [re.sub(pattern=pattern_string,repl=print_match,string=t) for t in source] source = '\n'.join(source) + '\n\n' with open(directionPath, 'w+',encoding='utf-8') as fh: fh.write(source) print('{} has been saved'.format(directionPath)) nbconverter(r"C:\Users\Administrator.DG-11030335\Scripts\voc_alarm\外销意见反馈预警监控\convert2alarm.ipynb")
58
0
26
01135b803f5beaf4d240c3e7cf8cfe01a0e87dbb
959
py
Python
solution/lc5373.py
sth4nothing/pyleetcode
70ac2dc55b0cbcd243b38103a96dd796538a3c05
[ "MIT" ]
null
null
null
solution/lc5373.py
sth4nothing/pyleetcode
70ac2dc55b0cbcd243b38103a96dd796538a3c05
[ "MIT" ]
null
null
null
solution/lc5373.py
sth4nothing/pyleetcode
70ac2dc55b0cbcd243b38103a96dd796538a3c05
[ "MIT" ]
null
null
null
# encoding: utf-8 import bisect import collections import json import math import queue import heapq from typing import (Any, Callable, Counter, DefaultDict, Dict, Iterable, List, Set, Tuple) data: Dict[str, List[Any]] = json.loads(''' {"inputs":[[7],[10],[19]],"outputs":[2,2,3]} ''') fib = [1, 1] while True: x = fib[-2] + fib[-1] if x > 0x7fffffff: break fib.append(x) s = Solution() for args, eq in zip(data['inputs'], data['outputs']): assert_equal(s.findMinFibonacciNumbers(*args), eq)
20.847826
78
0.558916
# encoding: utf-8 import bisect import collections import json import math import queue import heapq from typing import (Any, Callable, Counter, DefaultDict, Dict, Iterable, List, Set, Tuple) def assert_equal(x: Any, eq: Any): if x == eq: print(f'√\t{x}=={eq}') else: print(f'×\t{x}!={eq}') data: Dict[str, List[Any]] = json.loads(''' {"inputs":[[7],[10],[19]],"outputs":[2,2,3]} ''') fib = [1, 1] while True: x = fib[-2] + fib[-1] if x > 0x7fffffff: break fib.append(x) class Solution: def findMinFibonacciNumbers(self, k: int) -> int: def findClosestFibNum(num): i = bisect.bisect(fib, num) - 1 return fib[i] ans = 0 while k > 0: k -= findClosestFibNum(k) ans += 1 return ans s = Solution() for args, eq in zip(data['inputs'], data['outputs']): assert_equal(s.findMinFibonacciNumbers(*args), eq)
353
-6
72
ebeb78c187a0b89a66d9bee323f6b1f8040a3ccb
405
py
Python
com/LimePencil/Q13305/Main.py
LimePencil/baekjoonProblems
61eeeeb875585d165d9e39ecdb3d905b4ba6aa87
[ "MIT" ]
2
2021-07-17T13:05:42.000Z
2021-09-12T09:14:24.000Z
com/LimePencil/Q13305/Main.py
LimePencil/baekjoonProblems
61eeeeb875585d165d9e39ecdb3d905b4ba6aa87
[ "MIT" ]
null
null
null
com/LimePencil/Q13305/Main.py
LimePencil/baekjoonProblems
61eeeeb875585d165d9e39ecdb3d905b4ba6aa87
[ "MIT" ]
null
null
null
import sys n = int(sys.stdin.readline().rstrip("\n")) distance = list(map(int,sys.stdin.readline().rstrip("\n").split(" "))) price = list(map(int,sys.stdin.readline().rstrip("\n").split(" "))) money = 0 curr_price = price[0] for i in range(len(price)-1): if curr_price > price[i]: curr_price = price[i] else: price[i]= curr_price money += price[i]*distance[i] print(money)
22.5
70
0.619753
import sys n = int(sys.stdin.readline().rstrip("\n")) distance = list(map(int,sys.stdin.readline().rstrip("\n").split(" "))) price = list(map(int,sys.stdin.readline().rstrip("\n").split(" "))) money = 0 curr_price = price[0] for i in range(len(price)-1): if curr_price > price[i]: curr_price = price[i] else: price[i]= curr_price money += price[i]*distance[i] print(money)
0
0
0
bc94ba777ef1705443dc51220cd1597fd1077ef2
1,020
py
Python
cvat/apps/dataset_repo/migrations/0001_initial.py
adasdevops/ADAS_UPDATE_STABLE
306202b4e291b5876e3dd4fdd201c761e9d182f0
[ "Intel", "MIT" ]
null
null
null
cvat/apps/dataset_repo/migrations/0001_initial.py
adasdevops/ADAS_UPDATE_STABLE
306202b4e291b5876e3dd4fdd201c761e9d182f0
[ "Intel", "MIT" ]
null
null
null
cvat/apps/dataset_repo/migrations/0001_initial.py
adasdevops/ADAS_UPDATE_STABLE
306202b4e291b5876e3dd4fdd201c761e9d182f0
[ "Intel", "MIT" ]
1
2022-03-04T09:18:30.000Z
2022-03-04T09:18:30.000Z
# Generated by Django 3.2.12 on 2022-02-14 16:39 import cvat.apps.dataset_repo.models from django.db import migrations, models import django.db.models.deletion
34
145
0.613725
# Generated by Django 3.2.12 on 2022-02-14 16:39 import cvat.apps.dataset_repo.models from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('engine', '0001_initial'), ] operations = [ migrations.CreateModel( name='GitData', fields=[ ('task', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='engine.task')), ('url', models.URLField(max_length=2000)), ('path', models.CharField(max_length=256)), ('format', models.CharField(blank=True, max_length=256)), ('sync_date', models.DateTimeField(auto_now_add=True)), ('status', models.CharField(default=cvat.apps.dataset_repo.models.GitStatusChoice['NON_SYNCED'], max_length=20)), ('lfs', models.BooleanField(default=True)), ], ), ]
0
835
23
c765363d3dac71f961f0d0c83995838836d3df92
2,942
py
Python
tests/utils/test_crypt_util.py
bhavik2018/WALinuxAgent
684120121b44bfbf533dda3414be2fe10757f53a
[ "Apache-2.0" ]
1
2021-01-22T05:44:51.000Z
2021-01-22T05:44:51.000Z
tests/utils/test_crypt_util.py
bhavik2018/WALinuxAgent
684120121b44bfbf533dda3414be2fe10757f53a
[ "Apache-2.0" ]
null
null
null
tests/utils/test_crypt_util.py
bhavik2018/WALinuxAgent
684120121b44bfbf533dda3414be2fe10757f53a
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import binascii import errno as errno import glob import random import string import subprocess import tempfile import uuid import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.exception import CryptError from azurelinuxagent.common.version import PY_VERSION_MAJOR from tests.tools import * from subprocess import CalledProcessError if __name__ == '__main__': unittest.main()
40.861111
103
0.741672
# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import base64 import binascii import errno as errno import glob import random import string import subprocess import tempfile import uuid import azurelinuxagent.common.conf as conf import azurelinuxagent.common.utils.shellutil as shellutil from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.exception import CryptError from azurelinuxagent.common.version import PY_VERSION_MAJOR from tests.tools import * from subprocess import CalledProcessError class TestCryptoUtilOperations(AgentTestCase): def test_decrypt_encrypted_text(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/sample.pem")) secret = ']aPPEv}uNg1FPnl?' crypto = CryptUtil(conf.get_openssl_cmd()) decrypted_string = crypto.decrypt_secret(encrypted_string, prv_key) self.assertEquals(secret, decrypted_string, "decrypted string does not match expected") def test_decrypt_encrypted_text_missing_private_key(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CalledProcessError, crypto.decrypt_secret, encrypted_string, "abc" + prv_key) def test_decrypt_encrypted_text_wrong_private_key(self): encrypted_string = load_data("wire/encrypted.enc") prv_key = os.path.join(self.tmp_dir, "wrong.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/trans_prv")) crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CalledProcessError, crypto.decrypt_secret, encrypted_string, prv_key) def test_decrypt_encrypted_text_text_not_encrypted(self): encrypted_string = "abc@123" prv_key = os.path.join(self.tmp_dir, "TransportPrivate.pem") with open(prv_key, 'w+') as c: c.write(load_data("wire/sample.pem")) crypto = CryptUtil(conf.get_openssl_cmd()) self.assertRaises(CryptError, crypto.decrypt_secret, encrypted_string, prv_key) if __name__ == '__main__': unittest.main()
1,572
25
134
2ebe99b58d926cf4296236885d782f26416f67ce
4,708
py
Python
scripts/calpha.py
rwk-unil/gqt
9deb46196abc8bebe337d7516f46949d7aeab538
[ "MIT" ]
112
2015-01-04T17:25:04.000Z
2022-02-02T10:39:32.000Z
scripts/calpha.py
rwk-unil/gqt
9deb46196abc8bebe337d7516f46949d7aeab538
[ "MIT" ]
25
2015-03-24T14:29:50.000Z
2021-09-27T10:14:24.000Z
scripts/calpha.py
rwk-unil/gqt
9deb46196abc8bebe337d7516f46949d7aeab538
[ "MIT" ]
31
2015-04-29T20:53:22.000Z
2021-03-29T08:28:33.000Z
#!/usr/bin/env python import sys import math import numpy as np from optparse import OptionParser def choose(n, k): """ A fast way to calculate binomial coefficients by Andrew Dalke (contrib). """ if 0 <= k <= n: ntok = 1 ktok = 1 for t in xrange(1, min(k, n - k) + 1): ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0 if __name__ == "__main__": sys.exit(main()) (END)
27.057471
76
0.402082
#!/usr/bin/env python import sys import math import numpy as np from optparse import OptionParser def choose(n, k): """ A fast way to calculate binomial coefficients by Andrew Dalke (contrib). """ if 0 <= k <= n: ntok = 1 ktok = 1 for t in xrange(1, min(k, n - k) + 1): ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0 def Ta(y_i, n_i, p): return (y_i - n_i*p)**2 - (n_i * p * (1 - p)) def C(n_distro, p): c = 0 for n in n_distro: s = 0 for u in range(n): s += (((u - n*p)**2 - n*p*(1-p))**2 ) * \ choose(n, u) * (p**u) * ((1-p)**(n-u)) c += n_distro[n] * s return c def to_map(s): m = {} for k_v in s.split(';'): A = k_v.split('=') if len(A) > 1: m[A[0]] = A[1] else: m[A[0]] = None return m def prog(): parser = OptionParser() parser.add_option("-v", "--vcf", help="VCF output from gqt calpha command", dest="vcf") parser.add_option("-b", "--bed", dest="bed", help="BED file defining regions, genes, groups, etc.") (options, args) = parser.parse_args() if not options.vcf: parser.error('VCF file not given') if not options.bed: parser.error('BED file not given') Regions = [] f = open(options.bed,'r') for l in f: if l[0] != '#': A = l.rstrip().split('\t') Regions.append([A[0], int(A[1]), int(A[2])]) f = open(options.vcf,'r') T_alpha = 0 n_distro = {} curr_Region = -1 in_region = 0 p = -1 for l in f: if l[0] != '#': A = l.rstrip().split('\t') # test to see if we are entering or exiting a region if (in_region == 0): #currently out # see if we enter the next region if (Regions[curr_Region+1][0] == A[0]) and \ (int(A[1]) >= Regions[curr_Region+1][1]) and \ (int(A[1]) < Regions[curr_Region+1][2]): #print 'in', A[0], A[1] in_region = 1 curr_Region += 1 T_alpha = 0 n_distro = {} elif in_region == 1: #currently in # see if we left if (Regions[curr_Region][0] != A[0]) or \ (int(A[1]) >= Regions[curr_Region][2]): print '\t'.join([Regions[curr_Region][0], \ str(Regions[curr_Region][1]), \ str(Regions[curr_Region][2]), \ 'T:' + \ str(T_alpha), \ 'Z:' + \ str(T_alpha / np.sqrt(C(n_distro, p)))]) in_region = 0 #else: #print 'still in', A[0], A[1] #just left, see if that was the last region if (in_region == 0) and (curr_Region + 1 == len(Regions)): break #just left, see if we entered the next region if in_region == 0 and \ (Regions[curr_Region+1][0] == A[0]) and \ (int(A[1]) >= Regions[curr_Region+1][1]) and \ (int(A[1]) < Regions[curr_Region+1][2]): T_alpha = 0 n_distro = {} in_region = 1 curr_Region += 1 if in_region == 0: continue m = to_map(A[7]) if p == -1: n_case = int(m['N_CASE']) n_ctrl = int(m['N_CTRL']) p = n_case / float(n_case + n_ctrl) y = int(m['O_CASE']) n = y + int(m['O_CTRL']) T_alpha += Ta(y, n, p) if (n) not in n_distro: n_distro[n] = 1 else: n_distro[n] += 1 if in_region == 1: print '\t'.join([Regions[curr_Region][0], \ str(Regions[curr_Region][1]), \ str(Regions[curr_Region][2]), \ 'T:' + \ str(T_alpha), \ 'Z:' + \ str(T_alpha / np.sqrt(C(n_distro, p)))]) f.close() def main(): try: prog(); except IOError as err: sys.stderr.write("IOError:" + str(err) + '\n'); return; if __name__ == "__main__": sys.exit(main()) (END)
4,104
0
115
a73f1c2e588560e15532744d739b6a087d653e0c
3,486
py
Python
examples/fwd_test.py
Jack-Shen/fwd_python_api
d9fd0dfb0dfb6eb8755058028ebe2a09e4835442
[ "MIT" ]
null
null
null
examples/fwd_test.py
Jack-Shen/fwd_python_api
d9fd0dfb0dfb6eb8755058028ebe2a09e4835442
[ "MIT" ]
null
null
null
examples/fwd_test.py
Jack-Shen/fwd_python_api
d9fd0dfb0dfb6eb8755058028ebe2a09e4835442
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # ### 1 - SET UP THE API # In[1]: import requests import sys import os sys.path.insert(0, "/home/admin/ansible/fwd_python_api") sys.path.append("..") import fwd_json from fwd_json import fwdApi username = os.environ['fwd_saas_user'] token = os.environ['fwd_saas_token'] network = 137407 fwd = fwdApi("https://fwd.app/api", username, token,network, {}, verify=True) nqeUrl = "https://fwd.app/api/nqe?networkId={}".format(network) # In[2]: print(fwd.get_all_networks) #start collection fwd.start_collection(network).text # In[3]: #get the latest snapshot id latest_snap = fwd.get_snapshot_latest(network).json()['id'] print(latest_snap) # In[4]: #basic NQE to get a report of the network query = ''' foreach d in network.devices select { name: d.name, mgmtIP: d.platform.managementIps, model: d.platform.model, osType: d.platform.os, osVersion: d.platform.osVersion, serial: (foreach c in d.platform.components where isPresent(c.serialNumber) select c.serialNumber) }''' # In[5]: fwd.post_nqe_check(query) # ### 2 - METHOD 1 OF RUNNING NQE: define the query as a string # In[20]: #define a block blockConfig=''' block=``` ip access-list standard BASELINE_ACL 10 permit 192.168.252.94/31 20 {"permit" | "deny"} host 192.168.252.220 ```; foreach d in network.devices where isPresent(d.platform.osVersion) where d.platform.os == OS.ARISTA_EOS let diff = blockDiff_alpha1(d.files.config, block) //where diff.diffCount != 0 select { name: d.name, model: d.platform.model, missing_config: diff.blocks } ''' # In[21]: fwd.post_nqe_check(blockConfig) # ### 3 - METHOD 2 OF RUNNING NQE: define the API within NQE # In[22]: #check baseline CONFIG for all devices config = open('baseline_acl.txt', 'r').read() config = "test" queryId = "Q_e6ec1965d99271ce3e3a7223897469efc253468f" payload = {"config": config} response = fwd.post_nqe_para_check(queryId, payload) missingConfig = response print(missingConfig) # In[23]: #check BASELINE CONFIG for subset of devices config = open('baseline_acl.txt', 'r').read() config = "test" queryId = "Q_b7ed24370895b73d6ddbef0b81cffe04d22ae6f5" #define which device to check inputDevice = ["leaf1"] payload = {"config": config, "deviceList": inputDevice} response = fwd.post_nqe_para_check(queryId, payload) missingConfig = response print(missingConfig) # In[26]: #parameterized NQE for BGP neighbor queryId = "Q_8178355cfc658ab46cac0f07f2b033f68fa92c80" payload = {"deviceList": ["leaf4", "leaf2"]} response = fwd.post_nqe_para_check(queryId, payload) print(response) # In[27]: #parameterized NQE for interfaces that are down queryId = "Q_37cac69e9e54556d97b175d8392fa307d7a7afc8" payload = {"deviceList": ["leaf4"]} response = fwd.post_nqe_para_check(queryId, payload) print(response) # ### 4 - PATH SEARCH API # In[28]: #simple path search api srcIP = "192.168.100.1" dstIP = "192.168.100.4" fwd.get_path_search(latest_snap,srcIP, dstIP).json() # In[29]: #advanced use - define a path search and add as "Existential" intent check sourceIp = fwd_json.gen_location(SubnetLocationFilter="192.168.100.1/32") destIp = fwd_json.gen_location(SubnetLocationFilter="192.168.100.4/32") fwd.post_existance_check(snapshotID=latest_snap, FROM=(sourceIp), TO=(destIp)) # In[30]: #get results for all "Existential" intent check result = fwd.get_intent_checks(latest_snap, "Existential").json() print(result) # In[ ]:
19.47486
78
0.722892
#!/usr/bin/env python # coding: utf-8 # ### 1 - SET UP THE API # In[1]: import requests import sys import os sys.path.insert(0, "/home/admin/ansible/fwd_python_api") sys.path.append("..") import fwd_json from fwd_json import fwdApi username = os.environ['fwd_saas_user'] token = os.environ['fwd_saas_token'] network = 137407 fwd = fwdApi("https://fwd.app/api", username, token,network, {}, verify=True) nqeUrl = "https://fwd.app/api/nqe?networkId={}".format(network) # In[2]: print(fwd.get_all_networks) #start collection fwd.start_collection(network).text # In[3]: #get the latest snapshot id latest_snap = fwd.get_snapshot_latest(network).json()['id'] print(latest_snap) # In[4]: #basic NQE to get a report of the network query = ''' foreach d in network.devices select { name: d.name, mgmtIP: d.platform.managementIps, model: d.platform.model, osType: d.platform.os, osVersion: d.platform.osVersion, serial: (foreach c in d.platform.components where isPresent(c.serialNumber) select c.serialNumber) }''' # In[5]: fwd.post_nqe_check(query) # ### 2 - METHOD 1 OF RUNNING NQE: define the query as a string # In[20]: #define a block blockConfig=''' block=``` ip access-list standard BASELINE_ACL 10 permit 192.168.252.94/31 20 {"permit" | "deny"} host 192.168.252.220 ```; foreach d in network.devices where isPresent(d.platform.osVersion) where d.platform.os == OS.ARISTA_EOS let diff = blockDiff_alpha1(d.files.config, block) //where diff.diffCount != 0 select { name: d.name, model: d.platform.model, missing_config: diff.blocks } ''' # In[21]: fwd.post_nqe_check(blockConfig) # ### 3 - METHOD 2 OF RUNNING NQE: define the API within NQE # In[22]: #check baseline CONFIG for all devices config = open('baseline_acl.txt', 'r').read() config = "test" queryId = "Q_e6ec1965d99271ce3e3a7223897469efc253468f" payload = {"config": config} response = fwd.post_nqe_para_check(queryId, payload) missingConfig = response print(missingConfig) # In[23]: #check BASELINE CONFIG for subset of devices config = open('baseline_acl.txt', 'r').read() config = "test" queryId = "Q_b7ed24370895b73d6ddbef0b81cffe04d22ae6f5" #define which device to check inputDevice = ["leaf1"] payload = {"config": config, "deviceList": inputDevice} response = fwd.post_nqe_para_check(queryId, payload) missingConfig = response print(missingConfig) # In[26]: #parameterized NQE for BGP neighbor queryId = "Q_8178355cfc658ab46cac0f07f2b033f68fa92c80" payload = {"deviceList": ["leaf4", "leaf2"]} response = fwd.post_nqe_para_check(queryId, payload) print(response) # In[27]: #parameterized NQE for interfaces that are down queryId = "Q_37cac69e9e54556d97b175d8392fa307d7a7afc8" payload = {"deviceList": ["leaf4"]} response = fwd.post_nqe_para_check(queryId, payload) print(response) # ### 4 - PATH SEARCH API # In[28]: #simple path search api srcIP = "192.168.100.1" dstIP = "192.168.100.4" fwd.get_path_search(latest_snap,srcIP, dstIP).json() # In[29]: #advanced use - define a path search and add as "Existential" intent check sourceIp = fwd_json.gen_location(SubnetLocationFilter="192.168.100.1/32") destIp = fwd_json.gen_location(SubnetLocationFilter="192.168.100.4/32") fwd.post_existance_check(snapshotID=latest_snap, FROM=(sourceIp), TO=(destIp)) # In[30]: #get results for all "Existential" intent check result = fwd.get_intent_checks(latest_snap, "Existential").json() print(result) # In[ ]:
0
0
0
b66e07a40f973e481532ee8a3497d7bb086a3065
37
py
Python
SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-PyQt4.Qt3Support.py
alexsigaras/SWIM
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
[ "MIT" ]
47
2020-03-08T08:43:28.000Z
2022-03-18T18:51:55.000Z
SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-PyQt4.Qt3Support.py
alexsigaras/SWIM
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
[ "MIT" ]
null
null
null
SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-PyQt4.Qt3Support.py
alexsigaras/SWIM
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
[ "MIT" ]
16
2020-03-08T08:43:30.000Z
2022-01-10T22:05:57.000Z
hiddenimports = ['sip', 'PyQt4._qt']
18.5
36
0.648649
hiddenimports = ['sip', 'PyQt4._qt']
0
0
0
4c2b38e42ec870c19d3d951346e64ac61ac3bfc2
11,373
py
Python
packages/snet_cli/snet_cli/config.py
anandrgitnirman/snet-cli
6c8a60fe9cc1c1737f6f493369b7fbec5340fe69
[ "MIT" ]
67
2018-05-21T22:15:38.000Z
2022-03-05T15:53:33.000Z
packages/snet_cli/snet_cli/config.py
anandrgitnirman/snet-cli
6c8a60fe9cc1c1737f6f493369b7fbec5340fe69
[ "MIT" ]
295
2018-05-27T23:43:36.000Z
2021-10-06T14:33:40.000Z
packages/snet_cli/snet_cli/config.py
anandrgitnirman/snet-cli
6c8a60fe9cc1c1737f6f493369b7fbec5340fe69
[ "MIT" ]
48
2018-05-22T21:15:28.000Z
2022-02-10T04:11:56.000Z
from configparser import ConfigParser, ExtendedInterpolation from pathlib import Path default_snet_folder = Path("~").expanduser().joinpath(".snet")
47.585774
151
0.660248
from configparser import ConfigParser, ExtendedInterpolation from pathlib import Path default_snet_folder = Path("~").expanduser().joinpath(".snet") class Config(ConfigParser): def __init__(self, _snet_folder=default_snet_folder): super(Config, self).__init__(interpolation=ExtendedInterpolation(), delimiters=("=",)) self._config_file = _snet_folder.joinpath("config") if (self._config_file.exists()): with open(self._config_file) as f: self.read_file(f) else: self.create_default_config() def get_session_network_name(self): session_network = self["session"]["network"] self._check_section("network.%s" % session_network) return session_network def safe_get_session_identity_network_names(self): if ("identity" not in self["session"]): first_identity_message_and_exit() session_identity = self["session"]["identity"] self._check_section("identity.%s" % session_identity) session_network = self.get_session_network_name() network = self._get_identity_section(session_identity).get("network") if (network and network != session_network): raise Exception("Your session identity '%s' is bind to network '%s', which is different from your" " session network '%s', please switch identity or network" % ( session_identity, network, session_network)) return session_identity, session_network def set_session_network(self, network, out_f): self._set_session_network(network, out_f) if ("identity" in self["session"]): session_identity = self["session"]["identity"] identity_network = self._get_identity_section(session_identity).get("network") if (identity_network and identity_network != network): print("Your new session network '%s' is incompatible with your current session identity '%s' " "(which is bind to network '%s'), please switch your identity" % ( network, session_identity, identity_network), file=out_f); def _set_session_network(self, network, out_f): if (network not in self.get_all_networks_names()): raise Exception("Network %s is not in config" % network) print("Switch to network: %s" % network, file=out_f) self["session"]["network"] = network self._persist() def set_session_identity(self, identity, out_f): if (identity not in self.get_all_identities_names()): raise Exception('Identity "%s" is not in config' % identity) network = self._get_identity_section(identity).get("network") if (network): print('Identity "%s" is bind to network "%s"' % (identity, network), file=out_f) self._set_session_network(network, out_f) else: print( 'Identity "%s" is not bind to any network. You should switch network manually if you need.' % identity, file=out_f) print("Switch to identity: %s" % (identity), file=out_f) self["session"]["identity"] = identity self._persist() # session is the union of session.identity + session.network + default_ipfs_endpoint # if value is presented in both session.identity and session.network we get it from session.identity (can happen only for default_eth_rpc_endpoint) def get_session_field(self, key, exception_if_not_found=True): session_identity, session_network = self.safe_get_session_identity_network_names() rez_identity = self._get_identity_section(session_identity).get(key) rez_network = self._get_network_section(session_network).get(key) rez_ipfs = None if (key == "default_ipfs_endpoint"): rez_ipfs = self.get_ipfs_endpoint() rez = rez_identity or rez_network or rez_ipfs if (not rez and exception_if_not_found): raise Exception("Cannot find %s in the session.identity and in the session.network" % key) return rez def set_session_field(self, key, value, out_f): if (key == "default_ipfs_endpoint"): self.set_ipfs_endpoint(value) print("set default_ipfs_endpoint=%s" % value, file=out_f) elif (key in get_session_network_keys()): session_network = self.get_session_network_name(); self.set_network_field(session_network, key, value) print("set {}={} for network={}".format(key, value, session_network), file=out_f) elif (key in get_session_identity_keys()): session_identity, _ = self.safe_get_session_identity_network_names() self.set_identity_field(session_identity, key, value) print("set {}={} for identity={}".format(key, value, session_identity), file=out_f) else: all_keys = get_session_network_keys() + get_session_identity_keys() + ["default_ipfs_endpoint"] raise Exception("key {} not in {}".format(key, all_keys)) def unset_session_field(self, key, out_f): if (key in get_session_network_keys_removable()): print("unset %s from network %s" % (key, self["session"]["network"]), file=out_f) del self._get_network_section(self["session"]["network"])[key] self._persist() def session_to_dict(self): session_identity, session_network = self.safe_get_session_identity_network_names() show = {"session", "network.%s" % session_network, "identity.%s" % session_identity, "ipfs"} rez = {f: dict(self[f]) for f in show} return rez def add_network(self, network, rpc_endpoint, default_gas_price): network_section = "network.%s" % network if (network_section in self): raise Exception("Network section %s already exists in config" % network) self[network_section] = {} self[network_section]["default_eth_rpc_endpoint"] = str(rpc_endpoint) self[network_section]["default_gas_price"] = str(default_gas_price) self._persist() def set_network_field(self, network, key, value): self._get_network_section(network)[key] = str(value) self._persist() def add_identity(self, identity_name, identity, out_f): identity_section = "identity.%s" % identity_name if (identity_section in self): raise Exception("Identity section %s already exists in config" % identity_section) if ("network" in identity and identity["network"] not in self.get_all_networks_names()): raise Exception("Network %s is not in config" % identity["network"]) self[identity_section] = identity self._persist() # switch to it, if it was the first identity if (len(self.get_all_identities_names()) == 1): print("You've just added your first identity %s. We will automatically switch to it!" % identity_name) self.set_session_identity(identity_name, out_f) def set_identity_field(self, identity, key, value): self._get_identity_section(identity)[key] = str(value) self._persist() def _get_network_section(self, network): """ return section for network or identity """ return self["network.%s" % network] def _get_identity_section(self, identity): """ return section for the specific identity """ return self["identity.%s" % identity] def get_ipfs_endpoint(self): return self["ipfs"]["default_ipfs_endpoint"] def set_ipfs_endpoint(self, ipfs_endpoint): self["ipfs"]["default_ipfs_endpoint"] = ipfs_endpoint self._persist() def get_all_identities_names(self): return [x[len("identity."):] for x in self.sections() if x.startswith("identity.")] def get_all_networks_names(self): return [x[len("network."):] for x in self.sections() if x.startswith("network.")] def delete_identity(self, identity_name): if (identity_name not in self.get_all_identities_names()): raise Exception("identity_name {} does not exist".format(identity_name)) session_identity, _ = self.safe_get_session_identity_network_names() if (identity_name == session_identity): raise Exception("identity_name {} is in use".format(identity_name)) self.remove_section("identity.{}".format(identity_name)) self._persist() def create_default_config(self): """ Create default configuration if config file does not exist """ # make config directory with the minimal possible permission self._config_file.parent.mkdir(mode=0o700, exist_ok=True) self["network.kovan"] = { "default_eth_rpc_endpoint": "https://kovan.infura.io/v3/09027f4a13e841d48dbfefc67e7685d5", "default_gas_price": "medium"} self["network.mainnet"] = { "default_eth_rpc_endpoint": "https://mainnet.infura.io/v3/09027f4a13e841d48dbfefc67e7685d5", "default_gas_price": "medium"} self["network.ropsten"] = { "default_eth_rpc_endpoint": "https://ropsten.infura.io/v3/09027f4a13e841d48dbfefc67e7685d5", "default_gas_price": "medium"} self["network.rinkeby"] = { "default_eth_rpc_endpoint": "https://rinkeby.infura.io/v3/09027f4a13e841d48dbfefc67e7685d5", "default_gas_price": "medium"} self["ipfs"] = {"default_ipfs_endpoint": "http://ipfs.singularitynet.io:80"} self["session"] = {"network": "ropsten"} self._persist() print("We've created configuration file with default values in: %s\n" % str(self._config_file)) def _check_section(self, s): if (s not in self): raise Exception("Config error, section %s is absent" % s) def _persist(self): with open(self._config_file, "w") as f: self.write(f) self._config_file.chmod(0o600) def first_identity_message_and_exit(): print("\nPlease create your first identity by running 'snet identity create'.\n\n" "The available identity types are:\n" " - 'rpc' (yields to a required ethereum json-rpc endpoint for signing using a given wallet\n" " index)\n" " - 'mnemonic' (uses a required bip39 mnemonic for HDWallet/account derivation and signing\n" " using a given wallet index)\n" " - 'key' (uses a required hex-encoded private key for signing)\n" " - 'ledger' (yields to a required ledger nano s device for signing using a given wallet\n" " index)\n" " - 'trezor' (yields to a required trezor device for signing using a given wallet index)\n" "\n") exit(1) def get_session_identity_keys(): return ["default_wallet_index"] def get_session_network_keys(): return ["default_gas_price", "current_registry_at", "current_multipartyescrow_at", "current_singularitynettoken_at", "default_eth_rpc_endpoint"] def get_session_network_keys_removable(): return ["default_gas_price", "current_registry_at", "current_multipartyescrow_at", "current_singularitynettoken_at"] def get_session_keys(): return get_session_network_keys() + get_session_identity_keys() + ["default_ipfs_endpoint"]
8,728
2,351
138
0d68e81ea58e5535999a29ec68b1af0b6ea9df2f
23,395
py
Python
vistrails/packages/tabledata/convert/convert_dates.py
remram44/VisTrails-mybinder
ee7477b471920d738f3ac430932f01901b56ed44
[ "BSD-3-Clause" ]
1
2020-07-13T11:33:45.000Z
2020-07-13T11:33:45.000Z
vistrails/packages/tabledata/convert/convert_dates.py
remram44/VisTrails-mybinder
ee7477b471920d738f3ac430932f01901b56ed44
[ "BSD-3-Clause" ]
null
null
null
vistrails/packages/tabledata/convert/convert_dates.py
remram44/VisTrails-mybinder
ee7477b471920d738f3ac430932f01901b56ed44
[ "BSD-3-Clause" ]
null
null
null
############################################################################### ## ## Copyright (C) 2014-2016, New York University. ## Copyright (C) 2013-2014, NYU-Poly. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### from __future__ import division import datetime from distutils.version import LooseVersion import re import time import warnings from vistrails.core.modules.vistrails_module import Module, ModuleError from vistrails.core.bundles import py_import from vistrails.core.utils import VistrailsWarning PYTZ_MIN_VER = LooseVersion('2012') utc = UTC() _decimal_fmt = re.compile( r'^' '([-+]?)' # + means we are advancing compared to UTC '([0-9]?[0-9])' # hours '([0-9][0-9])?$') # minutes class TimestampsToDates(Module): """ Converts a List or numpy array of timestamps into datetime objects. A UNIX timestamp represents the number of seconds since Jan 1 1970 0:00, UTC. It represents a specific point in time that is not dependent on a timezone. The returned datetime objects are in the UTC timezone. """ _input_ports = [ ('timestamps', '(org.vistrails.vistrails.basic:List)')] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod class StringsToDates(Module): """ Converts a List of dates (as strings) into datetime objects. If no format is given, the dateutil.parser module will be used to guess what each string refers to; else, it is passed to strptime() to read each date. For example: '%Y-%m-%d %H:%M:%S'. The 'timezone' parameter indicates which timezone these dates are expressed in. It can be either: * 'local', in which case each date is interpreted as being in whatever timezone the system is set to use; * an offset in hours/minutes from UTC, for instance '-0400' for DST (eastern America time, when daylight saving is in effect). Note that in this case, the same offset is used for every date, without regard for daylight saving (if a date falls in winter, '-0500' will not be used instead). * if pytz is available, anything else will be passed to pytz.timezone(), so you would be able to use strings such as 'US/Eastern' or 'Europe/Amsterdam'. """ _input_ports = [ ('strings', '(org.vistrails.vistrails.basic:List)'), ('format', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"}), ('timezone', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"})] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod class DatesToMatplotlib(Module): """ Converts a List of Python's datetime objects to an array for matplotlib. """ _input_ports = [('datetimes', '(org.vistrails.vistrails.basic:List)')] _output_ports = [('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod class TimestampsToMatplotlib(Module): """ Converts a List or numpy array of timestamps into an array for matplotlib. """ _input_ports = [ ('timestamps', '(org.vistrails.vistrails.basic:List)')] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod class StringsToMatplotlib(Module): """ Converts a List of dates (as strings) to an array accepted by matplotlib. """ _input_ports = [ ('strings', '(org.vistrails.vistrails.basic:List)'), ('format', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"}), ('timezone', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"})] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod _modules = {'dates': [ TimestampsToDates, StringsToDates, DatesToMatplotlib, TimestampsToMatplotlib, StringsToMatplotlib]} ############################################################################### import unittest from vistrails.tests.utils import execute, intercept_result from ..identifiers import identifier
37.491987
79
0.554178
############################################################################### ## ## Copyright (C) 2014-2016, New York University. ## Copyright (C) 2013-2014, NYU-Poly. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### from __future__ import division import datetime from distutils.version import LooseVersion import re import time import warnings from vistrails.core.modules.vistrails_module import Module, ModuleError from vistrails.core.bundles import py_import from vistrails.core.utils import VistrailsWarning PYTZ_MIN_VER = LooseVersion('2012') class UTC(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(0) def tzname(self, dt): return "UTC" def dst(self, dt): return datetime.timedelta(0) utc = UTC() class FixedOffset(datetime.tzinfo): def __init__(self, offset, name): self.offset = offset self.name = name def utcoffset(self, dt): return self.offset def tzname(self, dt): return self.name def dst(self, dt): return datetime.timedelta(0) class LocalTimezone(datetime.tzinfo): STDOFFSET = datetime.timedelta(seconds=time.timezone) if time.daylight: DSTOFFSET = datetime.timedelta(seconds=-time.altzone) else: DSTOFFSET = STDOFFSET DSTDIFF = DSTOFFSET - STDOFFSET def utcoffset(self, dt): if self._isdst(dt): return self.DSTOFFSET else: return self.STDOFFSET def dst(self, dt): if self._isdst(dt): return self.DSTDIFF else: return datetime.timedelta(0) def tzname(self, dt): return time.tzname[self._isdst(dt)] def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, 0) timestamp = time.mktime(tt) tt = time.localtime(timestamp) return tt.tm_isdst > 0 _decimal_fmt = re.compile( r'^' '([-+]?)' # + means we are advancing compared to UTC '([0-9]?[0-9])' # hours '([0-9][0-9])?$') # minutes def make_timezone(s): if s == 'local': return LocalTimezone() match = _decimal_fmt.match(s) if match is not None: sign, hours, minutes = match.groups('') sign = -1 if sign == '-' else 1 hours = int(hours) minutes = int(minutes) if minutes else 0 offset = datetime.timedelta( minutes=sign * (hours*60 + minutes)) name = '%s%02d%02d' % ( '-' if sign == -1 else '+', hours, minutes) return FixedOffset(offset, name) try: pytz = py_import('pytz', { 'pip': 'pytz', 'linux-debian': 'python-tz', 'linux-ubuntu': 'python-tz', 'linux-fedora': 'pytz'}) except ImportError: # pragma: no cover raise ValueError("can't understand timezone %r (maybe you should " "install pytz?)" % s) else: ver = LooseVersion(pytz.__version__) if ver < PYTZ_MIN_VER: # pragma: no cover warnings.warn( "You are using an old version of pytz (%s). You might " "run into some issues with daylight saving handling." % pytz.__version__, category=VistrailsWarning) try: return pytz.timezone(s) except KeyError: raise ValueError("can't understand timezone %r (defaulted to " "pytz, which gave no answer)" % s) class TimestampsToDates(Module): """ Converts a List or numpy array of timestamps into datetime objects. A UNIX timestamp represents the number of seconds since Jan 1 1970 0:00, UTC. It represents a specific point in time that is not dependent on a timezone. The returned datetime objects are in the UTC timezone. """ _input_ports = [ ('timestamps', '(org.vistrails.vistrails.basic:List)')] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod def convert(timestamps): return [datetime.datetime.fromtimestamp(t, utc) for t in timestamps] def compute(self): timestamps = self.get_input('timestamps') result = self.convert(timestamps) self.set_output('dates', result) class StringsToDates(Module): """ Converts a List of dates (as strings) into datetime objects. If no format is given, the dateutil.parser module will be used to guess what each string refers to; else, it is passed to strptime() to read each date. For example: '%Y-%m-%d %H:%M:%S'. The 'timezone' parameter indicates which timezone these dates are expressed in. It can be either: * 'local', in which case each date is interpreted as being in whatever timezone the system is set to use; * an offset in hours/minutes from UTC, for instance '-0400' for DST (eastern America time, when daylight saving is in effect). Note that in this case, the same offset is used for every date, without regard for daylight saving (if a date falls in winter, '-0500' will not be used instead). * if pytz is available, anything else will be passed to pytz.timezone(), so you would be able to use strings such as 'US/Eastern' or 'Europe/Amsterdam'. """ _input_ports = [ ('strings', '(org.vistrails.vistrails.basic:List)'), ('format', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"}), ('timezone', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"})] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod def convert(strings, fmt, tz): if tz: tz = make_timezone(tz) # Might raise ValueError else: tz = None if not fmt: try: py_import('dateutil', { 'pip': 'python-dateutil', 'linux-debian': 'python-dateutil', 'linux-ubuntu': 'python-dateutil', 'linux-fedora': 'python-dateutil'}) except ImportError: # pragma: no cover raise ValueError("can't read dates without a format without " "the dateutil package") from dateutil import parser result = [parser.parse(s, ignoretz=tz is not None) for s in strings] else: result = [datetime.datetime.strptime(s, fmt) for s in strings] if tz is None: result = [dt.replace(tzinfo=None) for dt in result] elif hasattr(tz, 'localize'): result = [tz.localize(dt) for dt in result] else: # Compute the time without daylight saving result = [dt.replace(tzinfo=tz) for dt in result] # Check if it is in fact during daylight saving if hasattr(tz, 'normalize'): for i in xrange(len(result)): dt = result[i] dst = tz.dst(dt.replace(tzinfo=None)) if dst: result[i] = tz.normalize(dt) - dst # This is close enough but the way this work is by # essence ambiguous # If the given datetime falls right during the time # change period (one hour, two times a year): # For standard -> dst (spring): the time will be in # dst, although it doesn't actually exist (we stepped # over it by changing the clock) # For dst -> standard (fall): the time will be in dst, # although it could also have been standard (there is # noway to know which one was meant) return result def compute(self): tz = self.get_input('timezone') strings = self.get_input('strings') fmt = self.get_input('format') try: result = self.convert(strings, fmt, tz) except ValueError, e: raise ModuleError(self, e.message) self.set_output('dates', result) class DatesToMatplotlib(Module): """ Converts a List of Python's datetime objects to an array for matplotlib. """ _input_ports = [('datetimes', '(org.vistrails.vistrails.basic:List)')] _output_ports = [('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod def convert(datetimes): from matplotlib.dates import date2num return date2num(datetimes) def compute(self): try: py_import('matplotlib', { 'pip': 'matplotlib', 'linux-debian': 'python-matplotlib', 'linux-ubuntu': 'python-matplotlib', 'linux-fedora': 'python-matplotlib'}) except ImportError: # pragma: no cover raise ModuleError(self, "matplotlib is not available") datetimes = self.get_input('datetimes') result = self.convert(datetimes) self.set_output('dates', result) class TimestampsToMatplotlib(Module): """ Converts a List or numpy array of timestamps into an array for matplotlib. """ _input_ports = [ ('timestamps', '(org.vistrails.vistrails.basic:List)')] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod def convert(timestamps): from matplotlib.dates import date2num result = TimestampsToDates.convert(timestamps) return date2num(result) def compute(self): try: py_import('matplotlib', { 'pip': 'matplotlib', 'linux-debian': 'python-matplotlib', 'linux-ubuntu': 'python-matplotlib', 'linux-fedora': 'python-matplotlib'}) except ImportError: # pragma: no cover raise ModuleError(self, "matplotlib is not available") timestamps = self.get_input('timestamps') result = self.convert(timestamps) self.set_output('dates', result) class StringsToMatplotlib(Module): """ Converts a List of dates (as strings) to an array accepted by matplotlib. """ _input_ports = [ ('strings', '(org.vistrails.vistrails.basic:List)'), ('format', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"}), ('timezone', '(org.vistrails.vistrails.basic:String)', {'optional': True, 'defaults': "['']"})] _output_ports = [ ('dates', '(org.vistrails.vistrails.basic:List)')] @staticmethod def convert(strings, fmt, tz): from matplotlib.dates import date2num datetimes = StringsToDates.convert(strings, fmt, tz) return date2num(datetimes) def compute(self): try: py_import('matplotlib', { 'pip': 'matplotlib', 'linux-debian': 'python-matplotlib', 'linux-ubuntu': 'python-matplotlib', 'linux-fedora': 'python-matplotlib'}) except ImportError: # pragma: no cover raise ModuleError(self, "matplotlib is not available") tz = self.get_input('timezone') strings = self.get_input('strings') fmt = self.get_input('format') try: result = self.convert(strings, fmt, tz) except ValueError, e: raise ModuleError(self, e.message) self.set_output('dates', result) _modules = {'dates': [ TimestampsToDates, StringsToDates, DatesToMatplotlib, TimestampsToMatplotlib, StringsToMatplotlib]} ############################################################################### import unittest from vistrails.tests.utils import execute, intercept_result from ..identifiers import identifier class TestTimestampToDates(unittest.TestCase): def test_timestamps(self): """Test conversion to datetime objects. """ timestamps = [1369041900, 1369042260, 1357153500] with intercept_result(TimestampsToDates, 'dates') as results: self.assertFalse(execute([ ('convert|dates|TimestampsToDates', identifier, [ ('timestamps', [('List', repr(timestamps))]), ]), ])) self.assertEqual(len(results), 1) results = results[0] self.assertTrue(all(d.tzinfo is utc for d in results)) fmt = '%Y-%m-%d %H:%M:%S %Z %z' self.assertEqual( [d.strftime(fmt) for d in results], ['2013-05-20 09:25:00 UTC +0000', '2013-05-20 09:31:00 UTC +0000', '2013-01-02 19:05:00 UTC +0000']) try: import pytz except ImportError: # pragma: no cover pass else: self.assertEqual( [d.astimezone(pytz.timezone('US/Eastern')).strftime(fmt) for d in results], ['2013-05-20 05:25:00 EDT -0400', '2013-05-20 05:31:00 EDT -0400', '2013-01-02 14:05:00 EST -0500']) class TestStringsToDates(unittest.TestCase): def test_naive(self): """Test reading non-timezone-aware dates. """ dates = ['2013-05-20 9:25', '2013-05-20 09:31', '2013-01-02 19:05'] in_fmt = '%Y-%m-%d %H:%M' with intercept_result(StringsToDates, 'dates') as results: self.assertFalse(execute([ ('convert|dates|StringsToDates', identifier, [ ('strings', [('List', repr(dates))]), ('format', [('String', in_fmt)]), ]), ])) self.assertEqual(len(results), 1) results = results[0] self.assertTrue(all(d.tzinfo is None for d in results)) fmt = '%Y-%m-%d %H:%M:%S %Z %z' self.assertEqual( [d.strftime(fmt) for d in results], ['2013-05-20 09:25:00 ', '2013-05-20 09:31:00 ', '2013-01-02 19:05:00 ']) def test_dateutil(self): """Test reading non-timezone-aware dates without providing the format. dateutil is required for this one. """ try: import dateutil except ImportError: # pragma: no cover self.skipTest("dateutil is not available") dates = ['2013-05-20 9:25', 'Thu Sep 25 10:36:26 2003', '2003 10:36:28 CET 25 Sep Thu'] # Timezone will be ignored with intercept_result(StringsToDates, 'dates') as results: self.assertFalse(execute([ ('convert|dates|StringsToDates', identifier, [ ('strings', [('List', repr(dates))]), ]), ])) self.assertEqual(len(results), 1) results = results[0] fmt = '%Y-%m-%d %H:%M:%S %Z %z' self.assertEqual( [d.strftime(fmt) for d in results], ['2013-05-20 09:25:00 ', '2003-09-25 10:36:26 ', '2003-09-25 10:36:28 ']) def test_timezone(self): """Test reading timezone-aware dates by supplying an offset. """ dates = ['2013-05-20 9:25', '2013-05-20 09:31', '2013-01-02 19:05'] in_fmt = '%Y-%m-%d %H:%M' with intercept_result(StringsToDates, 'dates') as results: self.assertFalse(execute([ ('convert|dates|StringsToDates', identifier, [ ('strings', [('List', repr(dates))]), ('format', [('String', in_fmt)]), ('timezone', [('String', '-0500')]) ]), ])) self.assertEqual(len(results), 1) results = results[0] self.assertTrue(all(d.tzinfo is not None for d in results)) fmt = '%Y-%m-%d %H:%M:%S %z' self.assertEqual( [d.strftime(fmt) for d in results], ['2013-05-20 09:25:00 -0500', '2013-05-20 09:31:00 -0500', '2013-01-02 19:05:00 -0500']) def test_timezone_pytz(self): """Test reading timezone-aware dates through pytz. """ try: import pytz except ImportError: # pragma: no cover self.skipTest("pytz is not available") if LooseVersion(pytz.__version__) < PYTZ_MIN_VER: # pragma: no cover self.skipTest("pytz version is known to cause issues (%s)" % pytz.__version__) dates = ['2013-01-20 9:25', '2013-01-20 09:31', '2013-06-02 19:05'] in_fmt = '%Y-%m-%d %H:%M' with intercept_result(StringsToDates, 'dates') as results: self.assertFalse(execute([ ('convert|dates|StringsToDates', identifier, [ ('strings', [('List', repr(dates))]), ('format', [('String', in_fmt)]), ('timezone', [('String', 'America/New_York')]) ]), ])) self.assertEqual(len(results), 1) results = results[0] self.assertTrue(all(d.tzinfo is not None for d in results)) fmt = '%Y-%m-%d %H:%M:%S %Z %z' self.assertEqual( [d.strftime(fmt) for d in results], ['2013-01-20 09:25:00 EST -0500', '2013-01-20 09:31:00 EST -0500', '2013-06-02 19:05:00 EDT -0400']) class TestDatesToMatplotlib(unittest.TestCase): def test_simple(self): """Test converting datetime objects into matplotlib's format. This uses a PythonSource module to emit the datetime objects. """ try: import matplotlib except ImportError: # pragma: no cover self.skipTest("matplotlib is not available") from matplotlib.dates import date2num import urllib2 source = ("" "import datetime\n" "from vistrails.packages.tabledata.convert.convert_dates import \\\n" " make_timezone\n" "datetimes = [\n" " datetime.datetime(2013, 5, 29, 11, 18, 33),\n" " datetime.datetime(2013, 5, 29, 8, 11, 47,\n" " tzinfo=make_timezone('-0700'))]\n") source = urllib2.quote(source) with intercept_result(DatesToMatplotlib, 'dates') as results: self.assertFalse(execute([ ('PythonSource', 'org.vistrails.vistrails.basic', [ ('source', [('String', source)]), ]), ('convert|dates|DatesToMatplotlib', identifier, []), ], [ (0, 'datetimes', 1, 'datetimes') ], add_port_specs=[ (0, 'output', 'datetimes', 'org.vistrails.vistrails.basic:List'), ])) self.assertEqual(len(results), 1) results = results[0] self.assertEqual(list(results), list(date2num([ datetime.datetime(2013, 5, 29, 11, 18, 33), datetime.datetime(2013, 5, 29, 15, 11, 47)]))) class TestTimestampsToMatplotlib(unittest.TestCase): def test_simple(self): """Test converting timestamps into matplotlib's format. """ try: import matplotlib except ImportError: # pragma: no cover self.skipTest("matplotlib is not available") from matplotlib.dates import date2num with intercept_result(TimestampsToMatplotlib, 'dates') as results: self.assertFalse(execute([ ('convert|dates|TimestampsToMatplotlib', identifier, [ ('timestamps', [('List', '[1324842375, 1369842877]')]), ]), ])) self.assertEqual(len(results), 1) results = results[0] self.assertEqual(list(results), list(date2num([ datetime.datetime.utcfromtimestamp(1324842375), datetime.datetime.utcfromtimestamp(1369842877)]))) class TestStringsToMatplotlib(unittest.TestCase): def test_timezone(self): """Test reading timezone-aware dates by supplying an offset. """ try: import matplotlib except ImportError: # pragma: no cover self.skipTest("matplotlib is not available") from matplotlib.dates import date2num dates = ['2013-05-20 9:25', '2013-05-20 09:31', '2013-01-02 18:05'] in_fmt = '%Y-%m-%d %H:%M' with intercept_result(StringsToMatplotlib, 'dates') as results: self.assertFalse(execute([ ('convert|dates|StringsToMatplotlib', identifier, [ ('strings', [('List', repr(dates))]), ('format', [('String', in_fmt)]), ('timezone', [('String', '-0500')]) ]), ])) self.assertEqual(len(results), 1) results = results[0] self.assertEqual(list(results), list(date2num([ datetime.datetime(2013, 5, 20, 14, 25, 0), datetime.datetime(2013, 5, 20, 14, 31, 0), datetime.datetime(2013, 1, 2, 23, 5, 0)])))
7,010
9,784
659
2d0627e9080608c23776616f34c3e2904bb98bcb
14,076
py
Python
src/04_visualization/plotly_visualization.py
saraghsm/sound-of-failure
4c71ea1c4a0891f9149a2f09af2246dc1c5b9e10
[ "MIT" ]
4
2021-03-24T10:03:24.000Z
2021-09-11T21:50:20.000Z
src/04_visualization/plotly_visualization.py
wrijupan/sound-of-failure
a235a9041de15d9b33364e5e3f6d2811bbafe636
[ "MIT" ]
13
2021-03-15T21:51:58.000Z
2021-04-04T11:54:46.000Z
src/04_visualization/plotly_visualization.py
wrijupan/sound-of-failure
a235a9041de15d9b33364e5e3f6d2811bbafe636
[ "MIT" ]
2
2021-04-04T10:49:57.000Z
2021-04-12T08:14:40.000Z
import sys import plotly.graph_objects as go import numpy as np from plotly.subplots import make_subplots from scipy import interpolate sys.path += ['src/01_data_processing', 'src/02_modelling', 'src/03_modell_evaluation', 'src/00_utils'] import spectrogram as spec import train_test_split as splt import train_model_autoencoder as train import naming import eval_model_autoencoder as eval def make_mel_trace(mel, colorbar_len=0.3, colorbar_y=1.01): """ Make heatmap trace of mel spectrogram """ mel_trace = dict(visible=True, type='heatmap', x=np.array(range(mel.shape[0])), y=np.array(range(mel.shape[1])), z=mel, colorscale='inferno', colorbar=dict(len=colorbar_len, y=colorbar_y, yanchor='top', thickness=10)) return mel_trace def make_invisible_error_traces(timewise_recon_error, times, thresh): """ Make invisible traces showing error over time and threshold. """ above_thresh = timewise_recon_error.copy() below_thresh = timewise_recon_error.copy() above_thresh[above_thresh < thresh] = np.nan below_thresh[below_thresh > thresh] = np.nan thresh_trace = dict(visible=False, type='scatter', x=[0, 10], y=[thresh, thresh], marker=dict(color='black'), mode='lines', showlegend=False) above_trace = dict(visible=False, type='scatter', x=times, y=above_thresh, marker=dict(color='red'), mode='lines', showlegend=False) below_trace = dict(visible=False, type='scatter', x=times, y=below_thresh, marker=dict(color='green'), mode='lines', showlegend=False) return above_trace, below_trace, thresh_trace def make_ref_thresh_trace_error(ref_thresh, thresh_range): """ Make dashed horizontal line for reference threshold """ assert thresh_range[0] <= ref_thresh <= thresh_range[-1], 'reference threshold outside of threshold range' ref_step = np.abs(ref_thresh - thresh_range).argmin() ref_thresh = thresh_range[ref_step] ref_thresh_trace = dict(visible=True, type='scatter', x=[0, 10], y=[ref_thresh, ref_thresh], marker=dict(color='black'), mode='lines', line=dict(color='black', dash='dash', width=1), showlegend=False) return ref_thresh_trace, ref_step def make_mean_error_trace(mean_recon_error): """ Sert marker for mean error of sample file """ mean_error_trace = dict(visible=True, type='scatter', x=[mean_recon_error], y=[5], mode='markers', marker_symbol='x-thin', marker=dict(size=8, color='black', line=dict(width=2, color='black')), name='mean error of<br>sample + percentile', showlegend=True) return mean_error_trace def make_hist_traces(reco_loss_train, ref_thresh, thresh_range): """ Make visible traces for histogram """ # Histogram trace hist_trace = dict(visible=True, type='histogram', x=reco_loss_train, marker=dict(color='green'), histnorm='probability density', opacity=0.3, showlegend=False) # Probability distribution trace from sklearn.neighbors import KernelDensity kde = KernelDensity(kernel='gaussian', bandwidth=0.001).fit(reco_loss_train.reshape(-1, 1)) errors = np.arange(0, 0.1, 0.0001) prob_density = np.exp(kde.score_samples(errors.reshape(-1, 1))) dist_trace = dict(visible=True, type='scatter', x=errors, y=prob_density, mode='lines', line=dict(color='green', width=1), showlegend=False) # Ref threshold trace assert thresh_range[0] <= ref_thresh <= thresh_range[-1], 'reference threshold outside of threshold range' ref_step = np.abs(ref_thresh - thresh_range).argmin() ref_thresh = thresh_range[ref_step] ref_thresh_trace = dict(visible=True, type='scatter', x=[ref_thresh, ref_thresh], y=[0, 100], mode='lines', line=dict(color='black', dash='dash', width=1), name='recommended<br>threshold', showlegend=True) return hist_trace, dist_trace, ref_thresh_trace def make_sliders(thresh_range, active_step, num_visible, num_invisible): """ Make slider to select error trace based on threshold. """ steps = [] imgs_per_step = int(num_invisible / len(thresh_range)) for i, thr in enumerate(thresh_range): # import pdb; pdb.set_trace() step = dict(label=round(thr, 2), method="update", args=[{"visible": [True] * num_visible + [False] * num_invisible}]) for j in range(imgs_per_step): step["args"][0]["visible"][num_visible + i * imgs_per_step + j] = True steps.append(step) sliders = [dict(currentvalue=dict(visible=False), active=active_step, steps=steps)] return sliders def make_figure_layout(fig, sliders, mel, thresh_range, width=600, height=1000): """ Make layout for figure with three subplots for mel spectrogram, error over time and training error distribution. """ fig.update_layout( height=height, width=width, xaxis1=dict( tickmode='array', tickvals=np.linspace(0, mel.shape[1] - 1, 6), ticktext=[0, 2, 4, 6, 8, 10]), yaxis1=dict( tickmode='array', tickvals=np.linspace(0, mel.shape[0], 6), ticktext=[0, 512, 1024, 2048, 4096, 8000], title='Hz'), yaxis2=dict(range=[0, 0.2]), xaxis3=dict(range=[thresh_range[0], thresh_range[-1]]), yaxis3=dict(range=[0, 100]), sliders=sliders, legend=dict( traceorder='reversed', font=dict(size=10), yanchor="top", y=0.275, xanchor="right", x=0.99)) def make_eval_visualisation(mel_file, model, scaler, reco_loss_train, dim, step, thresh_range, ref_thresh, width=600, height=1000, status_bar_width=0.025, as_images=True): """ Call functions in this module to make a visualization for a given mel spectrogram file. """ times, timewise_recon_error = eval.reco_loss_over_time(model=model, scaler=scaler, mel_file=mel_file, dim=dim, step=step, as_images=as_images) mean_recon_error = timewise_recon_error.mean() # Interpolate linearly between point for plotting f = interpolate.interp1d(times, timewise_recon_error) times = np.arange(times[0], times[-1], 0.005) timewise_recon_error = f(times) # Generate figure with two subplots fig = make_subplots(rows=3, cols=1, vertical_spacing=0.05, subplot_titles=( 'spectrogram', 'reconstruction error over time', 'mean error distribution training'), shared_xaxes=False) ######################## # VISIBLE TRACES FIRST # ######################## num_visible = 0 # First row: Spectrogram mel = np.load(mel_file) mel_trace = make_mel_trace(mel) fig.add_trace(mel_trace, row=1, col=1) num_visible += 1 # Second row: Reference threshold ref_thresh_trace, ref_step = make_ref_thresh_trace_error(ref_thresh, thresh_range) fig.add_trace(ref_thresh_trace, row=2, col=1) active_step = ref_step num_visible += 1 # Third row: mean error mean_error_trace = make_mean_error_trace(mean_recon_error) fig.add_trace(mean_error_trace, row=3, col=1) num_visible += 1 # Third row: histogram and distribution hist_traces = make_hist_traces(reco_loss_train, ref_thresh, thresh_range) for trace in hist_traces: fig.add_trace(trace, row=3, col=1) num_visible += 1 # Third row: percentile label x_range = thresh_range[-1] - thresh_range[0] xlo = mean_recon_error - 0.055*x_range xhi = mean_recon_error + 0.055*x_range ylo = 9 yhi = 15 percentage_box_trace = dict(visible=True, showlegend=False, type='scatter', mode='lines', x=[xlo, xlo, xhi, xhi, xlo], y=[ylo, yhi, yhi, ylo, ylo], fill='toself', fillcolor='white', line=dict(width=0)) percentage = str(round(sum(sorted(reco_loss_train) < mean_recon_error) / len(reco_loss_train) * 100, 2)) percentage_text_trace = dict(visible=True, type='scatter', x=[(xhi + xlo) / 2], y=[(yhi + ylo) / 2], mode='text', text=percentage + '%', textposition='middle center', showlegend=False) fig.add_trace(percentage_box_trace, row=3, col=1) fig.add_trace(percentage_text_trace, row=3, col=1) num_visible += 1 # Second row: status box xlo = 7.5 xhi = 9.5 yhi = 0.185 ylo = yhi-status_bar_width status_box_trace = dict(visible=True, showlegend=False, type='scatter', mode='lines', x=[xlo, xlo, xhi, xhi, xlo], y=[ylo, yhi, yhi, ylo, ylo], fill='toself', fillcolor='white', line=dict(width=0)) fig.add_trace(status_box_trace, row=2, col=1) num_visible += 1 status_text_trace = dict(visible=True, type='scatter', x=[xlo + 0.1 * (xhi - xlo)], y=[ylo + 0.5 * (yhi - ylo)], mode='text', text='Status', textposition='middle right', showlegend=False) fig.add_trace(status_text_trace, row=2, col=1) num_visible += 2 ######################### # INVISIBLE TRACES LAST # ######################### num_invisible = 0 # Add invsisible error traces to the second row for thresh in thresh_range: # Second row: colored error traces and horizontal threshold line invisible_error_traces = make_invisible_error_traces(timewise_recon_error, times, thresh) for trace in invisible_error_traces: fig.add_trace(trace, row=2, col=1) num_invisible += 1 # Second row: status if mean_recon_error > thresh: color = 'red' else: color = 'green' status_trace = dict(visible=False, type='scatter', x=[xlo + 0.8 * (xhi - xlo)], y=[ylo + 0.5 * (yhi - ylo)], mode='markers', marker=dict(size=18, color=color, line=dict(width=0)), showlegend=False) fig.add_trace(status_trace, row=2, col=1) num_invisible += 1 # Third row: vertical threshold line invisible_hist_trace = dict(visible=False, type='scatter', x=[thresh, thresh], y=[0, 100], mode='lines', line=dict(color='black', width=2), name='threshold', showlegend=True) fig.add_trace(invisible_hist_trace, row=3, col=1) num_invisible += 1 ############################ # MAKE ACTIVE STEP VISIBLE # ############################ imgs_per_step = int(num_invisible / len(thresh_range)) for data in fig.data[num_visible + imgs_per_step * active_step: num_visible + imgs_per_step * (active_step + 1)]: data.visible = True ################################# # SLIDERS TO CONTROL VISIBILITY # ################################# sliders = make_sliders(thresh_range, active_step, num_visible, num_invisible) # Make figure layout and show make_figure_layout(fig, sliders, mel, thresh_range, width, height) return fig
38.67033
117
0.50135
import sys import plotly.graph_objects as go import numpy as np from plotly.subplots import make_subplots from scipy import interpolate sys.path += ['src/01_data_processing', 'src/02_modelling', 'src/03_modell_evaluation', 'src/00_utils'] import spectrogram as spec import train_test_split as splt import train_model_autoencoder as train import naming import eval_model_autoencoder as eval def make_mel_trace(mel, colorbar_len=0.3, colorbar_y=1.01): """ Make heatmap trace of mel spectrogram """ mel_trace = dict(visible=True, type='heatmap', x=np.array(range(mel.shape[0])), y=np.array(range(mel.shape[1])), z=mel, colorscale='inferno', colorbar=dict(len=colorbar_len, y=colorbar_y, yanchor='top', thickness=10)) return mel_trace def make_invisible_error_traces(timewise_recon_error, times, thresh): """ Make invisible traces showing error over time and threshold. """ above_thresh = timewise_recon_error.copy() below_thresh = timewise_recon_error.copy() above_thresh[above_thresh < thresh] = np.nan below_thresh[below_thresh > thresh] = np.nan thresh_trace = dict(visible=False, type='scatter', x=[0, 10], y=[thresh, thresh], marker=dict(color='black'), mode='lines', showlegend=False) above_trace = dict(visible=False, type='scatter', x=times, y=above_thresh, marker=dict(color='red'), mode='lines', showlegend=False) below_trace = dict(visible=False, type='scatter', x=times, y=below_thresh, marker=dict(color='green'), mode='lines', showlegend=False) return above_trace, below_trace, thresh_trace def make_ref_thresh_trace_error(ref_thresh, thresh_range): """ Make dashed horizontal line for reference threshold """ assert thresh_range[0] <= ref_thresh <= thresh_range[-1], 'reference threshold outside of threshold range' ref_step = np.abs(ref_thresh - thresh_range).argmin() ref_thresh = thresh_range[ref_step] ref_thresh_trace = dict(visible=True, type='scatter', x=[0, 10], y=[ref_thresh, ref_thresh], marker=dict(color='black'), mode='lines', line=dict(color='black', dash='dash', width=1), showlegend=False) return ref_thresh_trace, ref_step def make_mean_error_trace(mean_recon_error): """ Sert marker for mean error of sample file """ mean_error_trace = dict(visible=True, type='scatter', x=[mean_recon_error], y=[5], mode='markers', marker_symbol='x-thin', marker=dict(size=8, color='black', line=dict(width=2, color='black')), name='mean error of<br>sample + percentile', showlegend=True) return mean_error_trace def make_hist_traces(reco_loss_train, ref_thresh, thresh_range): """ Make visible traces for histogram """ # Histogram trace hist_trace = dict(visible=True, type='histogram', x=reco_loss_train, marker=dict(color='green'), histnorm='probability density', opacity=0.3, showlegend=False) # Probability distribution trace from sklearn.neighbors import KernelDensity kde = KernelDensity(kernel='gaussian', bandwidth=0.001).fit(reco_loss_train.reshape(-1, 1)) errors = np.arange(0, 0.1, 0.0001) prob_density = np.exp(kde.score_samples(errors.reshape(-1, 1))) dist_trace = dict(visible=True, type='scatter', x=errors, y=prob_density, mode='lines', line=dict(color='green', width=1), showlegend=False) # Ref threshold trace assert thresh_range[0] <= ref_thresh <= thresh_range[-1], 'reference threshold outside of threshold range' ref_step = np.abs(ref_thresh - thresh_range).argmin() ref_thresh = thresh_range[ref_step] ref_thresh_trace = dict(visible=True, type='scatter', x=[ref_thresh, ref_thresh], y=[0, 100], mode='lines', line=dict(color='black', dash='dash', width=1), name='recommended<br>threshold', showlegend=True) return hist_trace, dist_trace, ref_thresh_trace def make_sliders(thresh_range, active_step, num_visible, num_invisible): """ Make slider to select error trace based on threshold. """ steps = [] imgs_per_step = int(num_invisible / len(thresh_range)) for i, thr in enumerate(thresh_range): # import pdb; pdb.set_trace() step = dict(label=round(thr, 2), method="update", args=[{"visible": [True] * num_visible + [False] * num_invisible}]) for j in range(imgs_per_step): step["args"][0]["visible"][num_visible + i * imgs_per_step + j] = True steps.append(step) sliders = [dict(currentvalue=dict(visible=False), active=active_step, steps=steps)] return sliders def make_figure_layout(fig, sliders, mel, thresh_range, width=600, height=1000): """ Make layout for figure with three subplots for mel spectrogram, error over time and training error distribution. """ fig.update_layout( height=height, width=width, xaxis1=dict( tickmode='array', tickvals=np.linspace(0, mel.shape[1] - 1, 6), ticktext=[0, 2, 4, 6, 8, 10]), yaxis1=dict( tickmode='array', tickvals=np.linspace(0, mel.shape[0], 6), ticktext=[0, 512, 1024, 2048, 4096, 8000], title='Hz'), yaxis2=dict(range=[0, 0.2]), xaxis3=dict(range=[thresh_range[0], thresh_range[-1]]), yaxis3=dict(range=[0, 100]), sliders=sliders, legend=dict( traceorder='reversed', font=dict(size=10), yanchor="top", y=0.275, xanchor="right", x=0.99)) def make_eval_visualisation(mel_file, model, scaler, reco_loss_train, dim, step, thresh_range, ref_thresh, width=600, height=1000, status_bar_width=0.025, as_images=True): """ Call functions in this module to make a visualization for a given mel spectrogram file. """ times, timewise_recon_error = eval.reco_loss_over_time(model=model, scaler=scaler, mel_file=mel_file, dim=dim, step=step, as_images=as_images) mean_recon_error = timewise_recon_error.mean() # Interpolate linearly between point for plotting f = interpolate.interp1d(times, timewise_recon_error) times = np.arange(times[0], times[-1], 0.005) timewise_recon_error = f(times) # Generate figure with two subplots fig = make_subplots(rows=3, cols=1, vertical_spacing=0.05, subplot_titles=( 'spectrogram', 'reconstruction error over time', 'mean error distribution training'), shared_xaxes=False) ######################## # VISIBLE TRACES FIRST # ######################## num_visible = 0 # First row: Spectrogram mel = np.load(mel_file) mel_trace = make_mel_trace(mel) fig.add_trace(mel_trace, row=1, col=1) num_visible += 1 # Second row: Reference threshold ref_thresh_trace, ref_step = make_ref_thresh_trace_error(ref_thresh, thresh_range) fig.add_trace(ref_thresh_trace, row=2, col=1) active_step = ref_step num_visible += 1 # Third row: mean error mean_error_trace = make_mean_error_trace(mean_recon_error) fig.add_trace(mean_error_trace, row=3, col=1) num_visible += 1 # Third row: histogram and distribution hist_traces = make_hist_traces(reco_loss_train, ref_thresh, thresh_range) for trace in hist_traces: fig.add_trace(trace, row=3, col=1) num_visible += 1 # Third row: percentile label x_range = thresh_range[-1] - thresh_range[0] xlo = mean_recon_error - 0.055*x_range xhi = mean_recon_error + 0.055*x_range ylo = 9 yhi = 15 percentage_box_trace = dict(visible=True, showlegend=False, type='scatter', mode='lines', x=[xlo, xlo, xhi, xhi, xlo], y=[ylo, yhi, yhi, ylo, ylo], fill='toself', fillcolor='white', line=dict(width=0)) percentage = str(round(sum(sorted(reco_loss_train) < mean_recon_error) / len(reco_loss_train) * 100, 2)) percentage_text_trace = dict(visible=True, type='scatter', x=[(xhi + xlo) / 2], y=[(yhi + ylo) / 2], mode='text', text=percentage + '%', textposition='middle center', showlegend=False) fig.add_trace(percentage_box_trace, row=3, col=1) fig.add_trace(percentage_text_trace, row=3, col=1) num_visible += 1 # Second row: status box xlo = 7.5 xhi = 9.5 yhi = 0.185 ylo = yhi-status_bar_width status_box_trace = dict(visible=True, showlegend=False, type='scatter', mode='lines', x=[xlo, xlo, xhi, xhi, xlo], y=[ylo, yhi, yhi, ylo, ylo], fill='toself', fillcolor='white', line=dict(width=0)) fig.add_trace(status_box_trace, row=2, col=1) num_visible += 1 status_text_trace = dict(visible=True, type='scatter', x=[xlo + 0.1 * (xhi - xlo)], y=[ylo + 0.5 * (yhi - ylo)], mode='text', text='Status', textposition='middle right', showlegend=False) fig.add_trace(status_text_trace, row=2, col=1) num_visible += 2 ######################### # INVISIBLE TRACES LAST # ######################### num_invisible = 0 # Add invsisible error traces to the second row for thresh in thresh_range: # Second row: colored error traces and horizontal threshold line invisible_error_traces = make_invisible_error_traces(timewise_recon_error, times, thresh) for trace in invisible_error_traces: fig.add_trace(trace, row=2, col=1) num_invisible += 1 # Second row: status if mean_recon_error > thresh: color = 'red' else: color = 'green' status_trace = dict(visible=False, type='scatter', x=[xlo + 0.8 * (xhi - xlo)], y=[ylo + 0.5 * (yhi - ylo)], mode='markers', marker=dict(size=18, color=color, line=dict(width=0)), showlegend=False) fig.add_trace(status_trace, row=2, col=1) num_invisible += 1 # Third row: vertical threshold line invisible_hist_trace = dict(visible=False, type='scatter', x=[thresh, thresh], y=[0, 100], mode='lines', line=dict(color='black', width=2), name='threshold', showlegend=True) fig.add_trace(invisible_hist_trace, row=3, col=1) num_invisible += 1 ############################ # MAKE ACTIVE STEP VISIBLE # ############################ imgs_per_step = int(num_invisible / len(thresh_range)) for data in fig.data[num_visible + imgs_per_step * active_step: num_visible + imgs_per_step * (active_step + 1)]: data.visible = True ################################# # SLIDERS TO CONTROL VISIBILITY # ################################# sliders = make_sliders(thresh_range, active_step, num_visible, num_invisible) # Make figure layout and show make_figure_layout(fig, sliders, mel, thresh_range, width, height) return fig
0
0
0
007082f4b7ad2ef310f02199ed70e651282b80b5
3,361
py
Python
SNOMEDCTToOWL/RF2Files/Relationship.py
hsolbrig/SNOMEDToOWL
b9994e842822e98f6d01561d4b54902e4da5e79a
[ "Apache-2.0" ]
9
2016-11-22T14:54:10.000Z
2021-02-16T01:49:54.000Z
SNOMEDCTToOWL/RF2Files/Relationship.py
hsolbrig/SNOMEDToOWL
b9994e842822e98f6d01561d4b54902e4da5e79a
[ "Apache-2.0" ]
24
2016-12-20T17:55:05.000Z
2021-08-17T14:07:08.000Z
SNOMEDCTToOWL/RF2Files/Relationship.py
hsolbrig/SNOMEDToOWL
b9994e842822e98f6d01561d4b54902e4da5e79a
[ "Apache-2.0" ]
1
2017-01-09T16:47:14.000Z
2017-01-09T16:47:14.000Z
from typing import Dict, Set from .RF2File import RF2File from .Transitive import Transitive from SNOMEDCTToOWL.TransformationContext import TransformationContext from SNOMEDCTToOWL.SNOMEDToOWLConstants import * class Relationship: """ A RF2 stated relationship or relationship entry Properties: * sourceId -- concept identifier of the subject * typeId -- concept identifier of the predicate (if not IS a) * destinationId -- concept identifier of the target * relationshipGroup -- group the assertion belongs to Filters: * active -- only active relationships (active=='1') are included * characteristicTypeId -- only descendants of 900000000000006009 |Defining relationship| (stated, inferred) are included in the transformation. * moduleId -- NOT used as a filter because fully defined definitions have to be complete to be valid * modifierId -- only the existential modifier, 900000000000451002 |Some|, is included in the transformation """
43.089744
115
0.65903
from typing import Dict, Set from .RF2File import RF2File from .Transitive import Transitive from SNOMEDCTToOWL.TransformationContext import TransformationContext from SNOMEDCTToOWL.SNOMEDToOWLConstants import * class Relationship: """ A RF2 stated relationship or relationship entry Properties: * sourceId -- concept identifier of the subject * typeId -- concept identifier of the predicate (if not IS a) * destinationId -- concept identifier of the target * relationshipGroup -- group the assertion belongs to Filters: * active -- only active relationships (active=='1') are included * characteristicTypeId -- only descendants of 900000000000006009 |Defining relationship| (stated, inferred) are included in the transformation. * moduleId -- NOT used as a filter because fully defined definitions have to be complete to be valid * modifierId -- only the existential modifier, 900000000000451002 |Some|, is included in the transformation """ def __init__(self, row): self.sourceId = int(row["sourceId"]) self.destinationId = int(row["destinationId"]) self.typeId = int(row["typeId"]) self.relationshipGroup = int(row["relationshipGroup"]) class Relationships(RF2File): relationship_prefix = RelationshipFilePrefix def __init__(self): self._parents = {} # Dict[sourceId, Set[destinationId] self._entries = {} # Dict[sourceId, Dict[relationshipGroup, Set[Relationship]] @classmethod def filtr(cls, fname: str, context: TransformationContext) -> bool: return fname.startswith(Relationships.relationship_prefix) def add(self, row: Dict, _: TransformationContext, transitive: Transitive) -> None: """ Add an RF2 relationship or statedrelationship row :param row: row to add -- already tested for active :param _: unused :param transitive: Transitive relationship closure """ sourceid = int(row['sourceId']) relid = int(row['characteristicTypeId']) if not transitive.is_descendant_of(relid, Additional_relationship_sctid) and \ not transitive.is_descendant_of(relid, Qualifying_relationship_scitd) and \ int(row['modifierId']) == Some_sctid: if int(row['typeId']) == Is_a_sctid: self._parents.setdefault(sourceid, set()).add(int(row['destinationId'])) else: self._entries.setdefault(sourceid, {})\ .setdefault(int(row['relationshipGroup']), set())\ .add(Relationship(row)) def parents(self, concept: SCTID) -> Set[SCTID]: """ Return the direct parents of the concept. Only assertions in the relationship file are returned :param concept: child concept :return: set of parents """ return self._parents.get(concept, set()) def groups(self, concept: SCTID) -> Dict[int, Set[Relationship]]: """ Return a dictionary that maps relationship group identifier to a set of Relationship entries :param concept: source councept identifier :return: dictionary, key = relationship group (int), data = Set(Relationship) """ return self._entries.get(concept, {})
486
1,763
50
763eba53c8b3ff1b00fb89db187325b7c17da143
7,317
py
Python
vulnerable_image_check/lib/report.py
cloudpassage-community/vulnerable_image_check
5ba1133753b366efe5276bf0879bc5c3a74b3263
[ "BSD-2-Clause" ]
2
2018-02-05T20:09:21.000Z
2018-02-20T16:11:09.000Z
vulnerable_image_check/lib/report.py
cloudpassage-community/vulnerable_image_check
5ba1133753b366efe5276bf0879bc5c3a74b3263
[ "BSD-2-Clause" ]
11
2018-02-07T01:53:09.000Z
2018-03-15T00:49:04.000Z
vulnerable_image_check/lib/report.py
cloudpassage-community/vulnerable_image_check
5ba1133753b366efe5276bf0879bc5c3a74b3263
[ "BSD-2-Clause" ]
null
null
null
import base64 import csv import io from utility import Utility class Report(object): """ We use this class to generate text reports. Class based on awilson@cloudpassage.com's Report.py class and modified for this purpose """ @classmethod def create_csv_report(cls, vulnerable_image_check_data): """ Expect a dictionary object, produce text in CSV format. Args: - cls - reference to the current instance of hte class - vulnerable_image_check_data (dict) - dictionary of vulnerability data Return: - result (str) - base64 encoded vulnerability report """ # initialize the list as we will have a list of dicts rows = [] # let's build the output for all sets in the dataset # dataset is vulnerability info for all images in request for set in vulnerable_image_check_data["image_issues"]: # format the data for a cvs report row = cls.format_vulnerable_image_data_csv(set) # append each returned data set to the whole rows.append(row) # the fieldnames for the csv - DictWriter will order by these fieldnames = \ ["registry", "repository", "tag", "package", "version", "image_digest"] # get a stream io object ephemeral_obj = io.BytesIO() # write the csv data csv_writer = csv.DictWriter(ephemeral_obj, fieldnames=fieldnames) csv_writer.writeheader() csv_writer.writerows(rows) # encode to base64 result = base64.b64encode(ephemeral_obj.getvalue()) # clean up ephemeral_obj.close() # return report data return result @classmethod def create_stdout_report(cls, vulnerable_image_check_data): """ Expect a dictionary object, produce text appropriate for stdout. Args: - cls - reference to the current instance of the class - vulnerable_image_check_data (dict) - dictionary of vulnerability data Return: - result (str) - base64 encoded vulnerability report Format of encoded data: Registry: DPR Repository: bkumar89/centos Tag: 7.1.1503 Vulnerabilities: Package: binutils Package Version: 2.23.52.0.1-30.el7 | CVE List: cve-2014-8484 cve-2014-8485 # NOQA """ result = "" # for each data set in all the data for set in vulnerable_image_check_data["image_issues"]: # format data as noted above pieces = cls.format_vulnerable_image_data(set) pieces = pieces.split('\n') pieces = "\n".join(pieces) # build full dataset result += pieces # encode data result = base64.b64encode(result) # return report data return result @classmethod def create_slack_reports(cls, channel_reference, default_channel, routing_rules, instances): """Create a plaintext report for Slack. Args: channel_reference(dict): Keys are channel names, values are channel IDs. default_channel(str): Name of default Slack channel. routing_rules(dict): Rules for routing messages to different Slack channels. Formatted like {"metadata_field_name": {"metadata_field_value_to_match": "slack_channel_name"}} instances(dict): Instance metadata. Returns: dict: {"channel": "report"} where "channel" is the Slack channel ID and "report" is the text of the report. """ organized = {} # Group by target Slack channel. for instance in instances: channel = Utility.get_channel_for_message(channel_reference, instance, routing_rules, default_channel) if channel not in organized: organized[channel] = [] organized[channel].append(instance) # Build report per channel, each sorted by instance ID. report = {} for target, content in organized.items(): x_content = {c.keys()[0]: c.values()[0] for c in content} report[target] = cls.create_stdout_report(x_content) return report @classmethod def format_vulnerable_image_data(cls, vic_data): """Format vulnerability data for reporting. Args: - cls - reference to the current instance of the class - vic_data (dict): Formatted like this: Registry: DPR Repository: bkumar89/centos Tag: 7.1.1503 Vulnerabilities: Package: binutils Package Version: 2.23.52.0.1-30.el7 | CVE List: cve-2014-8484 cve-2014-8485 # NOQA """ registry = \ "\n\nRegistry: {registry}" \ "".format(registry=vic_data["image"]["registry"]["name"]) repository = \ " Repository: {repository}" \ "".format(repository=vic_data["image"]["repository"]["name"]) tags = "" for tag in vic_data["image"]["tags"]: tags += tag tags += " " tag_list = \ " Tag(s): {tag_list}".format(tag_list=tags) vulnerabilities = " Vulnerabilities:" # NOQA package = " Package: {package}".format(package=vic_data["name"]) # build package, package version and cve's into one line package_version = \ " Package Version: {package_version}" \ "".format(package_version=vic_data["version"]) package += package_version cves = "" for cve in vic_data["cves"]: cves += cve["name"] cves += " " cve_list = " | CVE List: {cve_list}".format(cve_list=cves) package += cve_list # order the fields and separate them by a newline ordered_fields = [registry, repository, tag_list, vulnerabilities, package] # return formatted report data return "\n".join(ordered_fields) @classmethod def format_vulnerable_image_data_csv(cls, vic_data): """ Format vulnerability data for reporting in CSV format. Args: vic_data (dict) - vulnerability data Returns: result - (dict) - vulnerability report data """ number_tags = len(vic_data["image"]["tags"]) counter = 0 tags = "" increment = 1 while counter < number_tags: tags += vic_data["image"]["tags"][counter] tags += " " counter = counter + increment result = {"registry": vic_data["image"]["registry"]["name"], "repository": vic_data["image"]["repository"]["name"], "tag": tags, "package": vic_data["name"], "image_digest": vic_data["image"]["image_sha"], "version": vic_data["version"]} return result
33.410959
120
0.563619
import base64 import csv import io from utility import Utility class Report(object): """ We use this class to generate text reports. Class based on awilson@cloudpassage.com's Report.py class and modified for this purpose """ @classmethod def create_csv_report(cls, vulnerable_image_check_data): """ Expect a dictionary object, produce text in CSV format. Args: - cls - reference to the current instance of hte class - vulnerable_image_check_data (dict) - dictionary of vulnerability data Return: - result (str) - base64 encoded vulnerability report """ # initialize the list as we will have a list of dicts rows = [] # let's build the output for all sets in the dataset # dataset is vulnerability info for all images in request for set in vulnerable_image_check_data["image_issues"]: # format the data for a cvs report row = cls.format_vulnerable_image_data_csv(set) # append each returned data set to the whole rows.append(row) # the fieldnames for the csv - DictWriter will order by these fieldnames = \ ["registry", "repository", "tag", "package", "version", "image_digest"] # get a stream io object ephemeral_obj = io.BytesIO() # write the csv data csv_writer = csv.DictWriter(ephemeral_obj, fieldnames=fieldnames) csv_writer.writeheader() csv_writer.writerows(rows) # encode to base64 result = base64.b64encode(ephemeral_obj.getvalue()) # clean up ephemeral_obj.close() # return report data return result @classmethod def create_stdout_report(cls, vulnerable_image_check_data): """ Expect a dictionary object, produce text appropriate for stdout. Args: - cls - reference to the current instance of the class - vulnerable_image_check_data (dict) - dictionary of vulnerability data Return: - result (str) - base64 encoded vulnerability report Format of encoded data: Registry: DPR Repository: bkumar89/centos Tag: 7.1.1503 Vulnerabilities: Package: binutils Package Version: 2.23.52.0.1-30.el7 | CVE List: cve-2014-8484 cve-2014-8485 # NOQA """ result = "" # for each data set in all the data for set in vulnerable_image_check_data["image_issues"]: # format data as noted above pieces = cls.format_vulnerable_image_data(set) pieces = pieces.split('\n') pieces = "\n".join(pieces) # build full dataset result += pieces # encode data result = base64.b64encode(result) # return report data return result @classmethod def create_slack_reports(cls, channel_reference, default_channel, routing_rules, instances): """Create a plaintext report for Slack. Args: channel_reference(dict): Keys are channel names, values are channel IDs. default_channel(str): Name of default Slack channel. routing_rules(dict): Rules for routing messages to different Slack channels. Formatted like {"metadata_field_name": {"metadata_field_value_to_match": "slack_channel_name"}} instances(dict): Instance metadata. Returns: dict: {"channel": "report"} where "channel" is the Slack channel ID and "report" is the text of the report. """ organized = {} # Group by target Slack channel. for instance in instances: channel = Utility.get_channel_for_message(channel_reference, instance, routing_rules, default_channel) if channel not in organized: organized[channel] = [] organized[channel].append(instance) # Build report per channel, each sorted by instance ID. report = {} for target, content in organized.items(): x_content = {c.keys()[0]: c.values()[0] for c in content} report[target] = cls.create_stdout_report(x_content) return report @classmethod def format_vulnerable_image_data(cls, vic_data): """Format vulnerability data for reporting. Args: - cls - reference to the current instance of the class - vic_data (dict): Formatted like this: Registry: DPR Repository: bkumar89/centos Tag: 7.1.1503 Vulnerabilities: Package: binutils Package Version: 2.23.52.0.1-30.el7 | CVE List: cve-2014-8484 cve-2014-8485 # NOQA """ registry = \ "\n\nRegistry: {registry}" \ "".format(registry=vic_data["image"]["registry"]["name"]) repository = \ " Repository: {repository}" \ "".format(repository=vic_data["image"]["repository"]["name"]) tags = "" for tag in vic_data["image"]["tags"]: tags += tag tags += " " tag_list = \ " Tag(s): {tag_list}".format(tag_list=tags) vulnerabilities = " Vulnerabilities:" # NOQA package = " Package: {package}".format(package=vic_data["name"]) # build package, package version and cve's into one line package_version = \ " Package Version: {package_version}" \ "".format(package_version=vic_data["version"]) package += package_version cves = "" for cve in vic_data["cves"]: cves += cve["name"] cves += " " cve_list = " | CVE List: {cve_list}".format(cve_list=cves) package += cve_list # order the fields and separate them by a newline ordered_fields = [registry, repository, tag_list, vulnerabilities, package] # return formatted report data return "\n".join(ordered_fields) @classmethod def format_vulnerable_image_data_csv(cls, vic_data): """ Format vulnerability data for reporting in CSV format. Args: vic_data (dict) - vulnerability data Returns: result - (dict) - vulnerability report data """ number_tags = len(vic_data["image"]["tags"]) counter = 0 tags = "" increment = 1 while counter < number_tags: tags += vic_data["image"]["tags"][counter] tags += " " counter = counter + increment result = {"registry": vic_data["image"]["registry"]["name"], "repository": vic_data["image"]["repository"]["name"], "tag": tags, "package": vic_data["name"], "image_digest": vic_data["image"]["image_sha"], "version": vic_data["version"]} return result
0
0
0
b6673d7bf9274ebe5763cc02700041918991fdc3
248
py
Python
webserver/configure.py
cuauv/software
5ad4d52d603f81a7f254f365d9b0fe636d03a260
[ "BSD-3-Clause" ]
70
2015-11-16T18:04:01.000Z
2022-03-05T09:04:02.000Z
webserver/configure.py
cuauv/software
5ad4d52d603f81a7f254f365d9b0fe636d03a260
[ "BSD-3-Clause" ]
1
2016-08-03T05:13:19.000Z
2016-08-03T06:19:39.000Z
webserver/configure.py
cuauv/software
5ad4d52d603f81a7f254f365d9b0fe636d03a260
[ "BSD-3-Clause" ]
34
2015-12-15T17:29:23.000Z
2021-11-18T14:15:12.000Z
#!/usr/bin/env python3 from build import ninja_common build = ninja_common.Build('webserver') build.webpack('static/bundle.js', 'webpack.config.js', 'src', 'webserver/package.json') build.install('auv-webserver', f='webserver/auv-webserver.py')
27.555556
87
0.75
#!/usr/bin/env python3 from build import ninja_common build = ninja_common.Build('webserver') build.webpack('static/bundle.js', 'webpack.config.js', 'src', 'webserver/package.json') build.install('auv-webserver', f='webserver/auv-webserver.py')
0
0
0
f2fdc059f1c387105beb7cef04d51ec6284369dc
1,608
py
Python
N10037/main.py
carmocca/UVA
c02bf55fc444309c94d938618911f22b0d9a14e1
[ "MIT" ]
3
2019-05-05T06:00:06.000Z
2021-02-25T19:03:32.000Z
N10037/main.py
carmocca/UVA
c02bf55fc444309c94d938618911f22b0d9a14e1
[ "MIT" ]
null
null
null
N10037/main.py
carmocca/UVA
c02bf55fc444309c94d938618911f22b0d9a14e1
[ "MIT" ]
3
2019-10-16T15:42:58.000Z
2021-04-11T16:50:20.000Z
import sys if __name__ == '__main__': print(''.join(main(sys.stdin)), end='')
27.724138
81
0.518657
import sys def get_movements(waiting): people = len(waiting) if people == 1: return [[waiting.pop()]] elif people == 2: return [[waiting.pop(), waiting.pop()]] elif people == 3: fastest = waiting.pop(0) return [[fastest, waiting.pop()], [fastest], [fastest, waiting.pop()]] else: slowest = waiting.pop() snd_slowest = waiting.pop() if 2 * waiting[1] < waiting[0] + snd_slowest: return [[waiting[0], waiting[1]], [waiting[0]], [snd_slowest, slowest], [waiting[1]]] else: return [[waiting[0], snd_slowest], [waiting[0]], [waiting[0], slowest], [waiting[0]]] def solve(crossing_times): total = 0 movements = [] waiting = sorted(crossing_times) while len(waiting): moves = get_movements(waiting) total += sum(max(m) for m in moves) movements.extend(moves) return total, movements def main(file): res = [] cases = int(file.readline()) file.readline() for _ in range(cases): n = int(file.readline()) crossing_times = [int(file.readline()) for _ in range(n)] total, movements = solve(crossing_times) res.append('{}\n'.format(total)) res.extend('{}\n'.format(' '.join(str(p) for p in m)) for m in movements) res.append('\n') file.readline() return res[:-1] if __name__ == '__main__': print(''.join(main(sys.stdin)), end='')
1,452
0
69
f12aafcc0f274588085b0864de694cadc3f97e94
3,409
py
Python
tests/feature_tests/test_avg_size_of_hole.py
smogork/TAiO_ImageClassification
14b2f6e707475b45e936a8ddd4345309aaef78f2
[ "MIT" ]
null
null
null
tests/feature_tests/test_avg_size_of_hole.py
smogork/TAiO_ImageClassification
14b2f6e707475b45e936a8ddd4345309aaef78f2
[ "MIT" ]
2
2021-10-12T17:45:49.000Z
2021-12-21T19:23:30.000Z
tests/feature_tests/test_avg_size_of_hole.py
smogork/TAiO_ImageClassification
14b2f6e707475b45e936a8ddd4345309aaef78f2
[ "MIT" ]
null
null
null
#! /usr/bin/env python """ Modul zawiera testy klasy AvgSizeOfHoleFeature """ import unittest from bitmap.bitmap_grayscale import BitmapGrayscale from feature.simple_features.avg_size_of_hole_feature import AvgSizeOfHoleFeature from tests.bitmap_generator import BitmapGenerator class TestAvgSizeOfHoleFeature(unittest.TestCase): """ Klasa testująca klase AvgSizeOfHoleFeature """ def count_feature(self, bitmap: BitmapGrayscale) -> float: """ Prawidłowo wylicza wartośc feature :param bitmap: Bitmapa, dla której wyliczamy feature :return: Wyliczony feature """ self.feature.prepare(bitmap) return self.feature.calculate() def test_reorder_calculate_prepare(self): """ Test sprawdza, czy wywołanie w złej kolejności metody prepare i calculate zgłaszaja wyjątek. Oczekujemy zgłoszenia wyjątku AttributeError. :return: """ with self.assertRaises(AttributeError): self.feature.calculate() def test_white_plain(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem. Oczekujemy Liczby -1 jako informacji o braku dziur. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) res = self.count_feature(bitmap) self.assertIs(-1, res) def test_white_plain_one_hole_of_size_1(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza jednym pikselem. Oczekujemy Liczby 1 jako informacji o jednej dziurze o rozmiarze 1. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(1.0, res) def test_white_plain_one_hole_of_size_2(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza dwoma czarnymi pikselami. Oczekujemy Liczby 2 jako informacji o jednej dziurze o rozmiarze 2. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) bitmap.set_cell_value(1, 2, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(2.0, res) def test_white_plain_two_holes_of_size_1(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza jednym pikselem. Oczekujemy Liczby 1 jako informacji o dwórch dziurach o rozmiarze 1. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) bitmap.set_cell_value(3, 3, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(1.0, res) def test_white_plain_two_holes_of_size_1_and_2(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza dwoma dziurami o rozmiarach 1 i 2. Oczekujemy Liczby 1.5. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) bitmap.set_cell_value(3, 3, 0.0) bitmap.set_cell_value(3, 4, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(1.5, res)
28.889831
100
0.652684
#! /usr/bin/env python """ Modul zawiera testy klasy AvgSizeOfHoleFeature """ import unittest from bitmap.bitmap_grayscale import BitmapGrayscale from feature.simple_features.avg_size_of_hole_feature import AvgSizeOfHoleFeature from tests.bitmap_generator import BitmapGenerator class TestAvgSizeOfHoleFeature(unittest.TestCase): """ Klasa testująca klase AvgSizeOfHoleFeature """ def setUp(self): self.feature = AvgSizeOfHoleFeature(0.5) def count_feature(self, bitmap: BitmapGrayscale) -> float: """ Prawidłowo wylicza wartośc feature :param bitmap: Bitmapa, dla której wyliczamy feature :return: Wyliczony feature """ self.feature.prepare(bitmap) return self.feature.calculate() def test_reorder_calculate_prepare(self): """ Test sprawdza, czy wywołanie w złej kolejności metody prepare i calculate zgłaszaja wyjątek. Oczekujemy zgłoszenia wyjątku AttributeError. :return: """ with self.assertRaises(AttributeError): self.feature.calculate() def test_white_plain(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem. Oczekujemy Liczby -1 jako informacji o braku dziur. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) res = self.count_feature(bitmap) self.assertIs(-1, res) def test_white_plain_one_hole_of_size_1(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza jednym pikselem. Oczekujemy Liczby 1 jako informacji o jednej dziurze o rozmiarze 1. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(1.0, res) def test_white_plain_one_hole_of_size_2(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza dwoma czarnymi pikselami. Oczekujemy Liczby 2 jako informacji o jednej dziurze o rozmiarze 2. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) bitmap.set_cell_value(1, 2, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(2.0, res) def test_white_plain_two_holes_of_size_1(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza jednym pikselem. Oczekujemy Liczby 1 jako informacji o dwórch dziurach o rozmiarze 1. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) bitmap.set_cell_value(3, 3, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(1.0, res) def test_white_plain_two_holes_of_size_1_and_2(self): """ Dostarczamy bitmapę wypełniona tylko białym kolorem, poza dwoma dziurami o rozmiarach 1 i 2. Oczekujemy Liczby 1.5. :return: """ size = 5 bitmap = BitmapGenerator.plain_white(size, size) bitmap.set_cell_value(1, 1, 0.0) bitmap.set_cell_value(3, 3, 0.0) bitmap.set_cell_value(3, 4, 0.0) res = self.count_feature(bitmap) self.assertAlmostEqual(1.5, res)
44
0
26
2f9d999e06154f8a14aebf5627c5c8382781dca7
1,126
py
Python
utility/read_input.py
ParsaHejabi/USC-CSCI570-AnalysisOfAlgorithms
0c565c8ddc799d6ca379b3f807fda5579f8e1607
[ "MIT" ]
null
null
null
utility/read_input.py
ParsaHejabi/USC-CSCI570-AnalysisOfAlgorithms
0c565c8ddc799d6ca379b3f807fda5579f8e1607
[ "MIT" ]
null
null
null
utility/read_input.py
ParsaHejabi/USC-CSCI570-AnalysisOfAlgorithms
0c565c8ddc799d6ca379b3f807fda5579f8e1607
[ "MIT" ]
null
null
null
def read_input_file(input_file_address) -> dict: """ Takes a string as input file address and returns the base strings along indices for each base string """ res = dict() res['first_base_string'] = '' res['first_base_string_indices'] = [] res['second_base_string'] = '' res['second_base_string_indices'] = [] with open(input_file_address, 'r') as f: input_file_lines = [line.strip() for line in f.readlines()] is_first_base_string = True for line in input_file_lines: # isnumeric complexity if not line.isnumeric() and res['first_base_string'] == '': res['first_base_string'] = line elif line.isnumeric() and is_first_base_string: res['first_base_string_indices'].append(int(line)) elif not line.isnumeric() and res['first_base_string'] != '': is_first_base_string = False res['second_base_string'] = line elif line.isnumeric() and not is_first_base_string: res['second_base_string_indices'].append(int(line)) return res
43.307692
104
0.624334
def read_input_file(input_file_address) -> dict: """ Takes a string as input file address and returns the base strings along indices for each base string """ res = dict() res['first_base_string'] = '' res['first_base_string_indices'] = [] res['second_base_string'] = '' res['second_base_string_indices'] = [] with open(input_file_address, 'r') as f: input_file_lines = [line.strip() for line in f.readlines()] is_first_base_string = True for line in input_file_lines: # isnumeric complexity if not line.isnumeric() and res['first_base_string'] == '': res['first_base_string'] = line elif line.isnumeric() and is_first_base_string: res['first_base_string_indices'].append(int(line)) elif not line.isnumeric() and res['first_base_string'] != '': is_first_base_string = False res['second_base_string'] = line elif line.isnumeric() and not is_first_base_string: res['second_base_string_indices'].append(int(line)) return res
0
0
0
867c88608ae0b2f7eae8b9bf37ebf6d007238668
7,479
py
Python
applications/link/modules/wrapper/db.py
link-money-dev/link-api-web-service
3da226c7115ee4267f8346620029b710b9987e74
[ "BSD-3-Clause" ]
null
null
null
applications/link/modules/wrapper/db.py
link-money-dev/link-api-web-service
3da226c7115ee4267f8346620029b710b9987e74
[ "BSD-3-Clause" ]
1
2021-06-01T22:32:25.000Z
2021-06-01T22:32:25.000Z
applications/watcher/modules/wrapper/db.py
link-money-dev/link-api-web-service
3da226c7115ee4267f8346620029b710b9987e74
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: UTF-8 -*- # 基于 sqlite4dummy 的基本架构重新写了一个超简易版本的 sqlite 数据库管理库 # 本程序提供必要的sqlite操作方法,并对一个sqlite数据库进行初始化 # 本程序从自己启动,外部只能调用其方法 # 本程序的主入口程序为 main() # 请填写下面几个参数: # 第一个参数为数据库名称 DB_NAME='' import os import sqlite3 import sqlite4dummy import psycopg2 class Column(): """Represent a Column in a :class:`Table`. Construct a Column object:: >>> from sqlite4dummy import * >>> c = Column("employee_id", dtype.TEXT, primary_key=True) >>> c Column('employee_id', dtype.TEXT, nullable=True, default=None, primary_key=True) :param column_name: the column name, alpha, digit and understore only. Can't start with digit. :type column_name: string :param data_type: Data type object. :param nullable: (default True) whether it is allow None value. :type nullable: boolean :param default: (default None) default value. :type default: any Python types :param primary_key: (default False) whether it is a primary_key. :type primary_key: boolean For usage example, go :mod:`unittest page<sqlite4dummy.tests.test_Column>` and read the testcase source code. """ class Table(object): """Represent a table in a database. Define a Table:: >>> from sqlite4dummy import * >>> metadata = MetaData() >>> mytable = Table("mytable", metadata, Column("mytable_id", dtype.INTEGER, primary_key=True), Column("value", dtype.TEXT), ) columns can be accessed by table.c.column_name:: >>> mytable.c.mytable_id # return a Column object _id :param table_name: the table name, alpha, digit and understore only. Can't start with digit. :type table_name: string :param metadata: Data type object. :type metadata: :class:`MetaData` :param args: list of Column object :type args: :class:`Column` For usage example, go :mod:`unittest page<sqlite4dummy.tests.test_Table>` and read the testcase source code. **中文文档** :class:`sqlite4dummy.schema.Table` 是抽象数据表对象类。 定义Table的方法如下:: >>> from sqlite4dummy import * >>> metadata = MetaData() # 定义metadata >>> mytable = Table("mytable", metadata, # 定义表名, metadata和列 Column("mytable_id", dtype.INTEGER, primary_key=True), Column("value", dtype.TEXT), ) 从Table中获得Column对象有如下两种方法:: >>> mytable.c._id _id >>> mytable.get_column("_id") _id """ class CreateTable(object): """Generate 'CREATE TABLE' SQL statement. Example:: CREATE TABLE table_name ( column_name1 dtype1 CONSTRAINS, column_name2 dtype2 CONSTRAINS, PRIMARY KEY (column, ...), FOREIGN KEY (table_column, ...) ) **中文文档** 创建Table的抽象类, 用于根据Schema生成CREATE TABLE ...的SQL语句。目前不支持 FOREIGN KEY语法。 """
28.876448
110
0.598476
# -*- coding: UTF-8 -*- # 基于 sqlite4dummy 的基本架构重新写了一个超简易版本的 sqlite 数据库管理库 # 本程序提供必要的sqlite操作方法,并对一个sqlite数据库进行初始化 # 本程序从自己启动,外部只能调用其方法 # 本程序的主入口程序为 main() # 请填写下面几个参数: # 第一个参数为数据库名称 DB_NAME='' import os import sqlite3 import sqlite4dummy import psycopg2 class Column(): """Represent a Column in a :class:`Table`. Construct a Column object:: >>> from sqlite4dummy import * >>> c = Column("employee_id", dtype.TEXT, primary_key=True) >>> c Column('employee_id', dtype.TEXT, nullable=True, default=None, primary_key=True) :param column_name: the column name, alpha, digit and understore only. Can't start with digit. :type column_name: string :param data_type: Data type object. :param nullable: (default True) whether it is allow None value. :type nullable: boolean :param default: (default None) default value. :type default: any Python types :param primary_key: (default False) whether it is a primary_key. :type primary_key: boolean For usage example, go :mod:`unittest page<sqlite4dummy.tests.test_Column>` and read the testcase source code. """ def __init__(self, column_name, data_type, nullable=True, default=None, primary_key=False, auto_increment=False): self.column_name = column_name self.full_name = column_name self.table_name = None self.data_type = data_type self.nullable = nullable self.default = default self.primary_key = primary_key if primary_key == True: self.auto_increment=True else: self.auto_increment=False class Table(object): """Represent a table in a database. Define a Table:: >>> from sqlite4dummy import * >>> metadata = MetaData() >>> mytable = Table("mytable", metadata, Column("mytable_id", dtype.INTEGER, primary_key=True), Column("value", dtype.TEXT), ) columns can be accessed by table.c.column_name:: >>> mytable.c.mytable_id # return a Column object _id :param table_name: the table name, alpha, digit and understore only. Can't start with digit. :type table_name: string :param metadata: Data type object. :type metadata: :class:`MetaData` :param args: list of Column object :type args: :class:`Column` For usage example, go :mod:`unittest page<sqlite4dummy.tests.test_Table>` and read the testcase source code. **中文文档** :class:`sqlite4dummy.schema.Table` 是抽象数据表对象类。 定义Table的方法如下:: >>> from sqlite4dummy import * >>> metadata = MetaData() # 定义metadata >>> mytable = Table("mytable", metadata, # 定义表名, metadata和列 Column("mytable_id", dtype.INTEGER, primary_key=True), Column("value", dtype.TEXT), ) 从Table中获得Column对象有如下两种方法:: >>> mytable.c._id _id >>> mytable.get_column("_id") _id """ def __init__(self, table_name, columns, auto_ID=True): self.table_name = table_name self.all = list() self.column_names = list() self.primary_key_columns = list() self.pickletype_columns = list() self.columns=[] if auto_ID==True: ID_column=Column(column_name="ID",data_type="INTEGER",nullable=False,default=0,primary_key=True) self.columns.append(ID_column) self.columns.extend(columns) for column in columns: self.column_names.append(column.column_name) if column.primary_key: # 定位PRIMARY KEY的列 self.primary_key_columns.append(column.column_name) self.create_table_sql = CreateTable(self).sql class CreateTable(object): """Generate 'CREATE TABLE' SQL statement. Example:: CREATE TABLE table_name ( column_name1 dtype1 CONSTRAINS, column_name2 dtype2 CONSTRAINS, PRIMARY KEY (column, ...), FOREIGN KEY (table_column, ...) ) **中文文档** 创建Table的抽象类, 用于根据Schema生成CREATE TABLE ...的SQL语句。目前不支持 FOREIGN KEY语法。 """ def __init__(self, table): clause_CREATE_TABLE = "CREATE TABLE {} ".format(table.table_name) field_list = [] for column in table.columns: field_list.append(column.column_name+"\t"+column.data_type+"\t"+ ("NOT NULL" if column.nullable==False else "") +"\t" + ("PRIMARY KEY\tAUTOINCREMENT" if column.primary_key==True else "")) clause_DATATYPE=",".join(field_list) self.sql="%s\n(\n%s\n)" % ( clause_CREATE_TABLE, clause_DATATYPE) # query_string += ",".join(field_list) # query_string += ");" # clause_CREATE_TABLE = "CREATE TABLE %s" % table.table_name # # clause_DATATYPE = "\t" + ",\n\t".join( # [self._column_param(column) for column in table] # ) # # if len(table.primary_key_columns) == 0: # clause_PRIMARY_KEY = "" # else: # clause_PRIMARY_KEY = ",\n\tPRIMARY KEY (%s)" % ", ".join( # table.primary_key_columns) # # self.sql = "%s\n(\n%s%s\n)" % ( # clause_CREATE_TABLE, clause_DATATYPE, clause_PRIMARY_KEY) class SQLManager(): def __init__(self, db_name): self.db_name=db_name # 创建一个空文件,如果存在,则跳过 if os.path.exists(db_name): print("File already exists! Try to connect to this file!") else: f=open(db_name,'w') f.close() # 连接到该文件 self.conn=sqlite3.connect(db_name) def reconnet(self): self.conn.close() self.conn=sqlite3.connect(self.db_name) def close(self): self.conn.close() def insert_into_table(self, table_name, data): # 判断table是否存在? # SELECT count(*) FROM sqlite_master WHERE type='table' AND name='要查询的表名'; c=self.conn.cursor() cursor = c.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='%s'" % table_name) a=1 pass def select(self): pass def execute(self, sql): try: c = self.conn.cursor() result=c.execute(sql) self.conn.commit() return result.fetchall() except Exception as e: print(e) def execute_many(self,sql,args): self.conn.executemany(sql,args) self.conn.commit() class PGManager(): def __init__(self, database='', user='', pw='', host='', port=''): self.conn_args={'database':database, 'user':user, 'password':pw, 'host':host, 'port':port} def execute(self,sql): self.conn = psycopg2.connect(**self.conn_args) cursor=self.conn.cursor() cursor.execute(sql) self.conn.commit() self.conn.close() def select(self, sql): self.conn = psycopg2.connect(**self.conn_args) cursor = self.conn.cursor() cursor.execute(sql) rows = cursor.fetchall() self.conn.close() return rows def execute_many(self,query, sql_sequence): self.conn = psycopg2.connect(**self.conn_args) cursor=self.conn.cursor() cursor.executemany(query, sql_sequence) self.conn.commit() self.conn.close() def close(self): self.conn.close()
4,171
-5
449
deeecbe0ef1f614800c14507ab55e8e7531582f4
255
py
Python
src/gedml/launcher/creators/factories/selectors_creator.py
wangck20/GeDML
1f76ac2094d7b88be7fd4eb6145e5586e547b9ca
[ "MIT" ]
25
2021-09-06T13:26:02.000Z
2022-01-06T13:25:24.000Z
src/gedml/launcher/creators/factories/selectors_creator.py
wangck20/GeDML
1f76ac2094d7b88be7fd4eb6145e5586e547b9ca
[ "MIT" ]
1
2021-09-09T08:29:29.000Z
2021-09-13T15:05:59.000Z
src/gedml/launcher/creators/factories/selectors_creator.py
wangck20/GeDML
1f76ac2094d7b88be7fd4eb6145e5586e547b9ca
[ "MIT" ]
2
2021-09-07T08:44:41.000Z
2021-09-09T08:31:55.000Z
from .base_creator import BaseCreator
28.333333
38
0.670588
from .base_creator import BaseCreator class selectorsCreator(BaseCreator): def __init__(self, **kwargs): super().__init__(**kwargs) def prepare_packages(self): from ....core import selectors self.package = [selectors]
123
15
80
729dea7bac9475aa81c26b745205c007344b419a
2,773
py
Python
src/AuShadha/immunisation/urls.py
GosthMan/AuShadha
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
[ "PostgreSQL" ]
46
2015-03-04T14:19:47.000Z
2021-12-09T02:58:46.000Z
src/AuShadha/immunisation/urls.py
aytida23/AuShadha
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
[ "PostgreSQL" ]
2
2015-06-05T10:29:04.000Z
2015-12-06T16:54:10.000Z
src/AuShadha/immunisation/urls.py
aytida23/AuShadha
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
[ "PostgreSQL" ]
24
2015-03-23T01:38:11.000Z
2022-01-24T16:23:42.000Z
################################################################################ # Project : AuShadha # Description : URLS for Immunisation History # Author : Dr. Easwar T.R # Date : 21-09-2013 # License : GNU-GPL Version 3, see AuShadha/LICENSE.txt ################################################################################ from django.conf.urls import * from django.contrib import admin import AuShadha.settings from immunisation.views import * from .dijit_widgets.pane import render_immunisation_pane admin.autodiscover() urlpatterns = patterns('', url(r'json/(?P<patient_id>\d+)/$', 'immunisation.views.immunisation_json', name='immunisation_json' ), url(r'json/$', 'immunisation.views.immunisation_json', name='immunisation_json_without_id' ), url(r'pane/(?P<patient_id>\d+)/$', render_immunisation_pane, name='render_immunisation_pane_with_id' ), url(r'pane/$', render_immunisation_pane, name='render_immunisation_pane_without_id' ), # url(r'list/(?P<patient_id>\d+)/$', #'immunisation.views.immunisation_list', #name = 'immunisation_list' #), # url(r'list/$', #'immunisation.views.immunisation_list', #name = 'immunisation_list_without_id' #), url(r'add/(?P<patient_id>\d+)/$', 'immunisation.views.immunisation_add', name='immunisation_add' ), url(r'add/$', 'immunisation.views.immunisation_add', name='immunisation_add_without_id' ), url(r'edit/(?P<immunisation_id>\d+)/$', 'immunisation.views.immunisation_edit', name='immunisation_edit' ), url(r'edit/$', 'immunisation.views.immunisation_edit', name='immunisation_edit_without_id' ), url(r'del/(?P<immunisation_id>\d+)/$', 'immunisation.views.immunisation_del', name='immunisation_del' ), url(r'del/$', 'immunisation.views.immunisation_del', name='immunisation_del_without_id' ), )
34.6625
80
0.434187
################################################################################ # Project : AuShadha # Description : URLS for Immunisation History # Author : Dr. Easwar T.R # Date : 21-09-2013 # License : GNU-GPL Version 3, see AuShadha/LICENSE.txt ################################################################################ from django.conf.urls import * from django.contrib import admin import AuShadha.settings from immunisation.views import * from .dijit_widgets.pane import render_immunisation_pane admin.autodiscover() urlpatterns = patterns('', url(r'json/(?P<patient_id>\d+)/$', 'immunisation.views.immunisation_json', name='immunisation_json' ), url(r'json/$', 'immunisation.views.immunisation_json', name='immunisation_json_without_id' ), url(r'pane/(?P<patient_id>\d+)/$', render_immunisation_pane, name='render_immunisation_pane_with_id' ), url(r'pane/$', render_immunisation_pane, name='render_immunisation_pane_without_id' ), # url(r'list/(?P<patient_id>\d+)/$', #'immunisation.views.immunisation_list', #name = 'immunisation_list' #), # url(r'list/$', #'immunisation.views.immunisation_list', #name = 'immunisation_list_without_id' #), url(r'add/(?P<patient_id>\d+)/$', 'immunisation.views.immunisation_add', name='immunisation_add' ), url(r'add/$', 'immunisation.views.immunisation_add', name='immunisation_add_without_id' ), url(r'edit/(?P<immunisation_id>\d+)/$', 'immunisation.views.immunisation_edit', name='immunisation_edit' ), url(r'edit/$', 'immunisation.views.immunisation_edit', name='immunisation_edit_without_id' ), url(r'del/(?P<immunisation_id>\d+)/$', 'immunisation.views.immunisation_del', name='immunisation_del' ), url(r'del/$', 'immunisation.views.immunisation_del', name='immunisation_del_without_id' ), )
0
0
0
d255b9bc53c5841e75c0734453756785611dcc3d
694
py
Python
examples/collection.py
tushkanin/jsondataclass
a96d749de9a1c46b1f0bbf380d61dc40e72c51d9
[ "MIT" ]
null
null
null
examples/collection.py
tushkanin/jsondataclass
a96d749de9a1c46b1f0bbf380d61dc40e72c51d9
[ "MIT" ]
1
2021-11-15T17:49:32.000Z
2021-11-15T17:49:32.000Z
examples/collection.py
tushkanin/jsondataclass
a96d749de9a1c46b1f0bbf380d61dc40e72c51d9
[ "MIT" ]
null
null
null
from dataclasses import dataclass from typing import List, Tuple, Dict from jsondataclass import from_json, to_json @dataclass movie = Movie(["comedy", "crime"], (5.6, 100), {"en": "WALL-E", "de": "WALL-E"}) json_str = to_json(movie) print(json_str) # {"genres": ["comedy", "crime"], "rating": [5.6, 100], "name": {"en": "WALL-E", "de": "WALL-E"}} json_str = '{"genres": ["comedy", "crime"], "rating": [5.6, 100], "name": {"en": "WALL-E", "de": "WALL-E"}}' movie = from_json(json_str, Movie) print(movie) # Movie(genres=['comedy', 'crime'], rating=(5.6, 100), name={'en': 'WALL-E', 'de': 'WALL-E'})
31.545455
108
0.610951
from dataclasses import dataclass from typing import List, Tuple, Dict from jsondataclass import from_json, to_json @dataclass class Movie: genres: List[str] rating: Tuple[float, int] name: Dict[str, str] movie = Movie(["comedy", "crime"], (5.6, 100), {"en": "WALL-E", "de": "WALL-E"}) json_str = to_json(movie) print(json_str) # {"genres": ["comedy", "crime"], "rating": [5.6, 100], "name": {"en": "WALL-E", "de": "WALL-E"}} json_str = '{"genres": ["comedy", "crime"], "rating": [5.6, 100], "name": {"en": "WALL-E", "de": "WALL-E"}}' movie = from_json(json_str, Movie) print(movie) # Movie(genres=['comedy', 'crime'], rating=(5.6, 100), name={'en': 'WALL-E', 'de': 'WALL-E'})
0
68
22
2204dc414b6912eb03b9d5e8e2dbe8e4c2b009aa
2,865
py
Python
utils/metrics.py
PPGod95/FIDTM
b5582c5cc485496d85af2043ffd6e4266f354f3b
[ "MIT" ]
null
null
null
utils/metrics.py
PPGod95/FIDTM
b5582c5cc485496d85af2043ffd6e4266f354f3b
[ "MIT" ]
null
null
null
utils/metrics.py
PPGod95/FIDTM
b5582c5cc485496d85af2043ffd6e4266f354f3b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ @Project : @FileName: @Author :penghr @Time :202x/xx/xx xx:xx @Desc : """ import math import cv2 import numpy as np import scipy.spatial import torch import torch.nn as nn import torch.nn.functional as F def generate_point_map(kpoint, f_loc, rate=1): '''obtain the location coordinates''' pred_coord = np.nonzero(kpoint) point_map = np.zeros((int(kpoint.shape[0] * rate), int(kpoint.shape[1] * rate), 3), dtype="uint8") + 255 # 22 # count = len(pred_coor[0]) coord_list = [] for i in range(0, len(pred_coord[0])): h = int(pred_coord[0][i] * rate) w = int(pred_coord[1][i] * rate) coord_list.append([w, h]) cv2.circle(point_map, (w, h), 2, (0, 0, 0), -1) for data in coord_list: f_loc.write('{} {} '.format(math.floor(data[0]), math.floor(data[1]))) f_loc.write('\n') return point_map def generate_bounding_boxes(kpoint, fname, resize): '''change the data path''' Img_data = cv2.imread(fname) # ori_Img_data = Img_data.copy() Img_data = cv2.resize(Img_data, resize) '''generate sigma''' pts = np.array(list(zip(np.nonzero(kpoint)[1], np.nonzero(kpoint)[0]))) leafsize = 2048 # build kdtree tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize) distances, locations = tree.query(pts, k=4) for index, pt in enumerate(pts): pt2d = np.zeros(kpoint.shape, dtype=np.float32) pt2d[pt[1], pt[0]] = 1. if np.sum(kpoint) > 1: sigma = (distances[index][1] + distances[index][2] + distances[index][3]) * 0.1 else: sigma = np.average(np.array(kpoint.shape)) / 2. / 2. # case: 1 point sigma = min(sigma, min(Img_data.shape[0], Img_data.shape[1]) * 0.05) if sigma < 6: t = 2 else: t = 2 Img_data = cv2.rectangle(Img_data, ( int((pt[0] * Img_data.shape[1] / resize[0] - sigma)), int((pt[1] * Img_data.shape[0] / resize[1] - sigma))), (int((pt[0] * Img_data.shape[1] / resize[0] + sigma)), int((pt[1] * Img_data.shape[0] / resize[1] + sigma))), (0, 255, 0), t) return Img_data
30.478723
120
0.575567
# -*- coding: utf-8 -*- """ @Project : @FileName: @Author :penghr @Time :202x/xx/xx xx:xx @Desc : """ import math import cv2 import numpy as np import scipy.spatial import torch import torch.nn as nn import torch.nn.functional as F def LMDS_counting(fmap, img_name, f_loc): input_max = torch.max(fmap).item() ''' find local maxima''' keep = nn.functional.max_pool2d(fmap, (3, 3), stride=1, padding=1) keep = (keep == fmap).float() fmap = keep * fmap '''set the pixel valur of local maxima as 1 for counting''' fmap[fmap < 100.0 / 255.0 * input_max] = 0 fmap[fmap > 0] = 1 ''' negative sample''' if input_max < 0.1: fmap = fmap * 0 count = int(torch.sum(fmap).item()) kpoint = fmap.data.squeeze(0).squeeze(0).cpu().numpy() f_loc.write('{} {} '.format(img_name, count)) return count, kpoint, f_loc def generate_point_map(kpoint, f_loc, rate=1): '''obtain the location coordinates''' pred_coord = np.nonzero(kpoint) point_map = np.zeros((int(kpoint.shape[0] * rate), int(kpoint.shape[1] * rate), 3), dtype="uint8") + 255 # 22 # count = len(pred_coor[0]) coord_list = [] for i in range(0, len(pred_coord[0])): h = int(pred_coord[0][i] * rate) w = int(pred_coord[1][i] * rate) coord_list.append([w, h]) cv2.circle(point_map, (w, h), 2, (0, 0, 0), -1) for data in coord_list: f_loc.write('{} {} '.format(math.floor(data[0]), math.floor(data[1]))) f_loc.write('\n') return point_map def generate_bounding_boxes(kpoint, fname, resize): '''change the data path''' Img_data = cv2.imread(fname) # ori_Img_data = Img_data.copy() Img_data = cv2.resize(Img_data, resize) '''generate sigma''' pts = np.array(list(zip(np.nonzero(kpoint)[1], np.nonzero(kpoint)[0]))) leafsize = 2048 # build kdtree tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize) distances, locations = tree.query(pts, k=4) for index, pt in enumerate(pts): pt2d = np.zeros(kpoint.shape, dtype=np.float32) pt2d[pt[1], pt[0]] = 1. if np.sum(kpoint) > 1: sigma = (distances[index][1] + distances[index][2] + distances[index][3]) * 0.1 else: sigma = np.average(np.array(kpoint.shape)) / 2. / 2. # case: 1 point sigma = min(sigma, min(Img_data.shape[0], Img_data.shape[1]) * 0.05) if sigma < 6: t = 2 else: t = 2 Img_data = cv2.rectangle(Img_data, ( int((pt[0] * Img_data.shape[1] / resize[0] - sigma)), int((pt[1] * Img_data.shape[0] / resize[1] - sigma))), (int((pt[0] * Img_data.shape[1] / resize[0] + sigma)), int((pt[1] * Img_data.shape[0] / resize[1] + sigma))), (0, 255, 0), t) return Img_data
612
0
23
2935d7d8b452877c5c148013fc96811bccebe1ad
33,897
py
Python
examples/rsfmri_preprocessing.py
dmordom/nipype
e815741ad68d63b7134b6db6225aabb0c38511ac
[ "BSD-3-Clause" ]
1
2018-04-18T12:13:37.000Z
2018-04-18T12:13:37.000Z
examples/rsfmri_preprocessing.py
ito-takuya/nipype
9099a5809487b55868cdec82a719030419cbd6ba
[ "BSD-3-Clause" ]
null
null
null
examples/rsfmri_preprocessing.py
ito-takuya/nipype
9099a5809487b55868cdec82a719030419cbd6ba
[ "BSD-3-Clause" ]
1
2021-09-08T14:31:47.000Z
2021-09-08T14:31:47.000Z
#!/usr/bin/env python """ ================================================================ rsfMRI: AFNI, ANTS, DicomStack, FreeSurfer, FSL, Nipy, aCompCorr ================================================================ A preprocessing workflow for Siemens resting state data. This workflow makes use of: - AFNI - ANTS - C3D_Affine_Tool - DicomStack - FreeSurfer - FSL - NiPy For example:: python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii -s subj001 -n 2 --despike -o output -p PBS --plugin_args "dict(qsub_args='-q many')" This workflow takes resting timeseries and a Siemens dicom file corresponding to it and preprocesses it to produce timeseries coordinates or grayordinates. This workflow also requires 2mm subcortical atlas and templates that are available from: http://mindboggle.info/data.html specifically the 2mm versions of: - `Joint Fusion Atlas <http://mindboggle.info/data/atlases/jointfusion/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm.nii.gz>`_ - `MNI template <http://mindboggle.info/data/templates/ants/OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz>`_ The 2mm version was generated with:: >>> from nipype import freesurfer as fs >>> rs = fs.Resample() >>> rs.inputs.in_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152.nii.gz' >>> rs.inputs.resampled_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm.nii.gz' >>> rs.inputs.voxel_size = (2., 2., 2.) >>> rs.inputs.args = '-rt nearest -ns 1' >>> res = rs.run() """ import os from nipype.interfaces.base import CommandLine CommandLine.set_default_terminal_output('file') from nipype import config config.enable_provenance() from nipype import (ants, afni, fsl, freesurfer, nipy, Function, DataSink) from nipype import Workflow, Node, MapNode from nipype.algorithms.rapidart import ArtifactDetect from nipype.algorithms.misc import TSNR from nipype.interfaces.fsl import EPIDeWarp from nipype.interfaces.io import FreeSurferSource from nipype.interfaces.c3 import C3dAffineTool from nipype.interfaces.utility import Merge, IdentityInterface from nipype.utils.filemanip import filename_to_list import numpy as np import scipy as sp import nibabel as nb from dcmstack.extract import default_extractor from dicom import read_file imports = ['import os', 'import nibabel as nb', 'import numpy as np', 'import scipy as sp', 'from nipype.utils.filemanip import filename_to_list' ] def get_info(dicom_files): """Given a Siemens dicom file return metadata Returns ------- RepetitionTime Slice Acquisition Times Spacing between slices """ meta = default_extractor(read_file(filename_to_list(dicom_files)[0], stop_before_pixels=True, force=True)) return (meta['RepetitionTime']/1000., meta['CsaImage.MosaicRefAcqTimes'], meta['SpacingBetweenSlices']) def median(in_files): """Computes an average of the median of each realigned timeseries Parameters ---------- in_files: one or more realigned Nifti 4D time series Returns ------- out_file: a 3D Nifti file """ average = None for idx, filename in enumerate(filename_to_list(in_files)): img = nb.load(filename) data = np.median(img.get_data(), axis=3) if not average: average = data else: average = average + data median_img = nb.Nifti1Image(average/float(idx + 1), img.get_affine(), img.get_header()) filename = os.path.join(os.getcwd(), 'median.nii.gz') median_img.to_filename(filename) return filename def get_aparc_aseg(files): """Return the aparc+aseg.mgz file""" for name in files: if 'aparc+aseg.mgz' in name: return name raise ValueError('aparc+aseg.mgz not found') def motion_regressors(motion_params, order=2, derivatives=2): """Compute motion regressors upto given order and derivative motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) """ out_files = [] for idx, filename in enumerate(filename_to_list(motion_params)): params = np.genfromtxt(filename) out_params = params for d in range(1, derivatives + 1): cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0), params)) out_params = np.hstack((out_params, np.diff(cparams, d, axis=0))) out_params2 = out_params for i in range(2, order + 1): out_params2 = np.hstack((out_params2, np.power(out_params, i))) filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx) np.savetxt(filename, out_params2, fmt="%.10f") out_files.append(filename) return out_files def build_filter1(motion_params, comp_norm, outliers): """Builds a regressor set comparison motion parameters, composite norm and outliers The outliers are added as a single time point column for each outlier Parameters ---------- motion_params: a text file containing motion parameters and its derivatives comp_norm: a text file containing the composite norm outliers: a text file containing 0-based outlier indices Returns ------- components_file: a text file containing all the regressors """ out_files = [] for idx, filename in enumerate(filename_to_list(motion_params)): params = np.genfromtxt(filename) norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx]) out_params = np.hstack((params, norm_val[:, None])) try: outlier_val = np.genfromtxt(filename_to_list(outliers)[idx]) except IOError: outlier_val = np.empty((0)) for index in np.atleast_1d(outlier_val): outlier_vector = np.zeros((out_params.shape[0], 1)) outlier_vector[index] = 1 out_params = np.hstack((out_params, outlier_vector)) filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx) np.savetxt(filename, out_params, fmt="%.10f") out_files.append(filename) return out_files def extract_noise_components(realigned_file, mask_file, num_components=6): """Derive components most reflective of physiological noise Parameters ---------- realigned_file: a 4D Nifti file containing realigned volumes mask_file: a 3D Nifti file containing white matter + ventricular masks num_components: number of components to use for noise decomposition Returns ------- components_file: a text file containing the noise components """ imgseries = nb.load(realigned_file) noise_mask = nb.load(mask_file) voxel_timecourses = imgseries.get_data()[np.nonzero(noise_mask.get_data())] voxel_timecourses = voxel_timecourses.byteswap().newbyteorder() voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 _, _, v = sp.linalg.svd(voxel_timecourses, full_matrices=False) components_file = os.path.join(os.getcwd(), 'noise_components.txt') np.savetxt(components_file, v[:num_components, :].T) return components_file def extract_subrois(timeseries_file, label_file, indices): """Extract voxel time courses for each subcortical roi index Parameters ---------- timeseries_file: a 4D Nifti file label_file: a 3D file containing rois in the same space/size of the 4D file indices: a list of indices for ROIs to extract. Returns ------- out_file: a text file containing time courses for each voxel of each roi The first four columns are: freesurfer index, i, j, k positions in the label file """ img = nb.load(timeseries_file) data = img.get_data() roiimg = nb.load(label_file) rois = roiimg.get_data() out_ts_file = os.path.join(os.getcwd(), 'subcortical_timeseries.txt') with open(out_ts_file, 'wt') as fp: for fsindex in indices: ijk = np.nonzero(rois == fsindex) ts = data[ijk] for i0, row in enumerate(ts): fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0], ijk[1][i0], ijk[2][i0]) + ','.join(['%.10f' % val for val in row]) + '\n') return out_ts_file def combine_hemi(left, right): """Combine left and right hemisphere time series into a single text file """ lh_data = nb.load(left).get_data() rh_data = nb.load(right).get_data() indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None], 2000000 + np.arange(0, rh_data.shape[0])[:, None])) all_data = np.hstack((indices, np.vstack((lh_data.squeeze(), rh_data.squeeze())))) filename = 'combined_surf.txt' np.savetxt(filename, all_data, fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1))) return os.path.abspath(filename) """ Creates the main preprocessing workflow """ """ Creates the full workflow including getting information from dicom files """ if __name__ == "__main__": from argparse import ArgumentParser parser = ArgumentParser(description=__doc__) parser.add_argument("-d", "--dicom_file", dest="dicom_file", help="an example dicom file from the resting series") parser.add_argument("-f", "--files", dest="files", nargs="+", help="4d nifti files for resting state", required=True) parser.add_argument("-s", "--subject_id", dest="subject_id", help="FreeSurfer subject id", required=True) parser.add_argument("-n", "--n_vol", dest="n_vol", default=0, type=int, help="Volumes to skip at the beginning") parser.add_argument("--despike", dest="despike", default=False, action="store_true", help="Use despiked data") parser.add_argument("--TR", dest="TR", default=None, help="TR if dicom not provided in seconds") parser.add_argument("--slice_times", dest="slice_times", nargs="+", type=float, help="Slice times in seconds") parser.add_argument("-l", "--lowpass_freq", dest="lowpass_freq", default=-1, help="Low pass frequency (Hz)") parser.add_argument("-u", "--highpass_freq", dest="highpass_freq", default=-1, help="High pass frequency (Hz)") parser.add_argument("-o", "--output_dir", dest="sink", help="Output directory base") parser.add_argument("-w", "--work_dir", dest="work_dir", help="Output directory base") parser.add_argument("-p", "--plugin", dest="plugin", default='Linear', help="Plugin to use") parser.add_argument("--plugin_args", dest="plugin_args", help="Plugin arguments") parser.add_argument("--field_maps", dest="field_maps", nargs="+", help="field map niftis") parser.add_argument("--fm_echospacing", dest="echo_spacing", type=float, help="field map echo spacing") parser.add_argument("--fm_TE_diff", dest='TE_diff', type=float, help="field map echo time difference") parser.add_argument("--fm_sigma", dest='sigma', type=float, help="field map sigma value") args = parser.parse_args() wf = create_resting_workflow(args) if args.work_dir: work_dir = os.path.abspath(args.work_dir) else: work_dir = os.getcwd() wf.base_dir = work_dir if args.plugin_args: wf.run(args.plugin, plugin_args=eval(args.plugin_args)) else: wf.run(args.plugin)
42.85335
136
0.620468
#!/usr/bin/env python """ ================================================================ rsfMRI: AFNI, ANTS, DicomStack, FreeSurfer, FSL, Nipy, aCompCorr ================================================================ A preprocessing workflow for Siemens resting state data. This workflow makes use of: - AFNI - ANTS - C3D_Affine_Tool - DicomStack - FreeSurfer - FSL - NiPy For example:: python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii -s subj001 -n 2 --despike -o output -p PBS --plugin_args "dict(qsub_args='-q many')" This workflow takes resting timeseries and a Siemens dicom file corresponding to it and preprocesses it to produce timeseries coordinates or grayordinates. This workflow also requires 2mm subcortical atlas and templates that are available from: http://mindboggle.info/data.html specifically the 2mm versions of: - `Joint Fusion Atlas <http://mindboggle.info/data/atlases/jointfusion/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm.nii.gz>`_ - `MNI template <http://mindboggle.info/data/templates/ants/OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz>`_ The 2mm version was generated with:: >>> from nipype import freesurfer as fs >>> rs = fs.Resample() >>> rs.inputs.in_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152.nii.gz' >>> rs.inputs.resampled_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm.nii.gz' >>> rs.inputs.voxel_size = (2., 2., 2.) >>> rs.inputs.args = '-rt nearest -ns 1' >>> res = rs.run() """ import os from nipype.interfaces.base import CommandLine CommandLine.set_default_terminal_output('file') from nipype import config config.enable_provenance() from nipype import (ants, afni, fsl, freesurfer, nipy, Function, DataSink) from nipype import Workflow, Node, MapNode from nipype.algorithms.rapidart import ArtifactDetect from nipype.algorithms.misc import TSNR from nipype.interfaces.fsl import EPIDeWarp from nipype.interfaces.io import FreeSurferSource from nipype.interfaces.c3 import C3dAffineTool from nipype.interfaces.utility import Merge, IdentityInterface from nipype.utils.filemanip import filename_to_list import numpy as np import scipy as sp import nibabel as nb from dcmstack.extract import default_extractor from dicom import read_file imports = ['import os', 'import nibabel as nb', 'import numpy as np', 'import scipy as sp', 'from nipype.utils.filemanip import filename_to_list' ] def get_info(dicom_files): """Given a Siemens dicom file return metadata Returns ------- RepetitionTime Slice Acquisition Times Spacing between slices """ meta = default_extractor(read_file(filename_to_list(dicom_files)[0], stop_before_pixels=True, force=True)) return (meta['RepetitionTime']/1000., meta['CsaImage.MosaicRefAcqTimes'], meta['SpacingBetweenSlices']) def median(in_files): """Computes an average of the median of each realigned timeseries Parameters ---------- in_files: one or more realigned Nifti 4D time series Returns ------- out_file: a 3D Nifti file """ average = None for idx, filename in enumerate(filename_to_list(in_files)): img = nb.load(filename) data = np.median(img.get_data(), axis=3) if not average: average = data else: average = average + data median_img = nb.Nifti1Image(average/float(idx + 1), img.get_affine(), img.get_header()) filename = os.path.join(os.getcwd(), 'median.nii.gz') median_img.to_filename(filename) return filename def get_aparc_aseg(files): """Return the aparc+aseg.mgz file""" for name in files: if 'aparc+aseg.mgz' in name: return name raise ValueError('aparc+aseg.mgz not found') def motion_regressors(motion_params, order=2, derivatives=2): """Compute motion regressors upto given order and derivative motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) """ out_files = [] for idx, filename in enumerate(filename_to_list(motion_params)): params = np.genfromtxt(filename) out_params = params for d in range(1, derivatives + 1): cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0), params)) out_params = np.hstack((out_params, np.diff(cparams, d, axis=0))) out_params2 = out_params for i in range(2, order + 1): out_params2 = np.hstack((out_params2, np.power(out_params, i))) filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx) np.savetxt(filename, out_params2, fmt="%.10f") out_files.append(filename) return out_files def build_filter1(motion_params, comp_norm, outliers): """Builds a regressor set comparison motion parameters, composite norm and outliers The outliers are added as a single time point column for each outlier Parameters ---------- motion_params: a text file containing motion parameters and its derivatives comp_norm: a text file containing the composite norm outliers: a text file containing 0-based outlier indices Returns ------- components_file: a text file containing all the regressors """ out_files = [] for idx, filename in enumerate(filename_to_list(motion_params)): params = np.genfromtxt(filename) norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx]) out_params = np.hstack((params, norm_val[:, None])) try: outlier_val = np.genfromtxt(filename_to_list(outliers)[idx]) except IOError: outlier_val = np.empty((0)) for index in np.atleast_1d(outlier_val): outlier_vector = np.zeros((out_params.shape[0], 1)) outlier_vector[index] = 1 out_params = np.hstack((out_params, outlier_vector)) filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx) np.savetxt(filename, out_params, fmt="%.10f") out_files.append(filename) return out_files def extract_noise_components(realigned_file, mask_file, num_components=6): """Derive components most reflective of physiological noise Parameters ---------- realigned_file: a 4D Nifti file containing realigned volumes mask_file: a 3D Nifti file containing white matter + ventricular masks num_components: number of components to use for noise decomposition Returns ------- components_file: a text file containing the noise components """ imgseries = nb.load(realigned_file) noise_mask = nb.load(mask_file) voxel_timecourses = imgseries.get_data()[np.nonzero(noise_mask.get_data())] voxel_timecourses = voxel_timecourses.byteswap().newbyteorder() voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 _, _, v = sp.linalg.svd(voxel_timecourses, full_matrices=False) components_file = os.path.join(os.getcwd(), 'noise_components.txt') np.savetxt(components_file, v[:num_components, :].T) return components_file def extract_subrois(timeseries_file, label_file, indices): """Extract voxel time courses for each subcortical roi index Parameters ---------- timeseries_file: a 4D Nifti file label_file: a 3D file containing rois in the same space/size of the 4D file indices: a list of indices for ROIs to extract. Returns ------- out_file: a text file containing time courses for each voxel of each roi The first four columns are: freesurfer index, i, j, k positions in the label file """ img = nb.load(timeseries_file) data = img.get_data() roiimg = nb.load(label_file) rois = roiimg.get_data() out_ts_file = os.path.join(os.getcwd(), 'subcortical_timeseries.txt') with open(out_ts_file, 'wt') as fp: for fsindex in indices: ijk = np.nonzero(rois == fsindex) ts = data[ijk] for i0, row in enumerate(ts): fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0], ijk[1][i0], ijk[2][i0]) + ','.join(['%.10f' % val for val in row]) + '\n') return out_ts_file def combine_hemi(left, right): """Combine left and right hemisphere time series into a single text file """ lh_data = nb.load(left).get_data() rh_data = nb.load(right).get_data() indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None], 2000000 + np.arange(0, rh_data.shape[0])[:, None])) all_data = np.hstack((indices, np.vstack((lh_data.squeeze(), rh_data.squeeze())))) filename = 'combined_surf.txt' np.savetxt(filename, all_data, fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1))) return os.path.abspath(filename) """ Creates the main preprocessing workflow """ def create_workflow(files, subject_id, n_vol=0, despike=True, TR=None, slice_times=None, slice_thickness=None, fieldmap_images=[], norm_threshold=1, num_components=6, vol_fwhm=None, surf_fwhm=None, lowpass_freq=-1, highpass_freq=-1, sink_directory=os.getcwd(), FM_TEdiff=2.46, FM_sigma=2, FM_echo_spacing=.7, target_subject=['fsaverage3', 'fsaverage4'], name='resting'): wf = Workflow(name=name) # Skip starting volumes remove_vol = MapNode(fsl.ExtractROI(t_min=n_vol, t_size=-1), iterfield=['in_file'], name="remove_volumes") remove_vol.inputs.in_file = files # Run AFNI's despike. This is always run, however, whether this is fed to # realign depends on the input configuration despiker = MapNode(afni.Despike(outputtype='NIFTI_GZ'), iterfield=['in_file'], name='despike') #despiker.plugin_args = {'qsub_args': '-l nodes=1:ppn='} wf.connect(remove_vol, 'roi_file', despiker, 'in_file') # Run Nipy joint slice timing and realignment algorithm realign = Node(nipy.SpaceTimeRealigner(), name='realign') realign.inputs.tr = TR realign.inputs.slice_times = slice_times realign.inputs.slice_info = 2 if despike: wf.connect(despiker, 'out_file', realign, 'in_file') else: wf.connect(remove_vol, 'roi_file', realign, 'in_file') # Comute TSNR on realigned data regressing polynomials upto order 2 tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr') wf.connect(realign, 'out_file', tsnr, 'in_file') # Compute the median image across runs calc_median = Node(Function(input_names=['in_files'], output_names=['median_file'], function=median, imports=imports), name='median') wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') # Coregister the median to the surface register = Node(freesurfer.BBRegister(), name='bbregister') register.inputs.subject_id = subject_id register.inputs.init = 'fsl' register.inputs.contrast_type = 't2' register.inputs.out_fsl_file = True register.inputs.epi_mask = True # Compute fieldmaps and unwarp using them if fieldmap_images: fieldmap = Node(interface=EPIDeWarp(), name='fieldmap_unwarp') fieldmap.inputs.tediff = FM_TEdiff fieldmap.inputs.esp = FM_echo_spacing fieldmap.inputs.sigma = FM_sigma fieldmap.inputs.mag_file = fieldmap_images[0] fieldmap.inputs.dph_file = fieldmap_images[1] wf.connect(calc_median, 'median_file', fieldmap, 'exf_file') dewarper = MapNode(interface=fsl.FUGUE(), iterfield=['in_file'], name='dewarper') wf.connect(tsnr, 'detrended_file', dewarper, 'in_file') wf.connect(fieldmap, 'exf_mask', dewarper, 'mask_file') wf.connect(fieldmap, 'vsm_file', dewarper, 'shift_in_file') wf.connect(fieldmap, 'exfdw', register, 'source_file') else: wf.connect(calc_median, 'median_file', register, 'source_file') # Get the subject's freesurfer source directory fssource = Node(FreeSurferSource(), name='fssource') fssource.inputs.subject_id = subject_id fssource.inputs.subjects_dir = os.environ['SUBJECTS_DIR'] # Extract wm+csf, brain masks by eroding freesurfer labels and then # transform the masks into the space of the median wmcsf = Node(freesurfer.Binarize(), name='wmcsfmask') mask = wmcsf.clone('anatmask') wmcsftransform = Node(freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), name='wmcsftransform') wmcsftransform.inputs.subjects_dir = os.environ['SUBJECTS_DIR'] wmcsf.inputs.wm_ven_csf = True wmcsf.inputs.match = [4, 5, 14, 15, 24, 31, 43, 44, 63] wmcsf.inputs.binary_file = 'wmcsf.nii.gz' wmcsf.inputs.erode = int(np.ceil(slice_thickness)) wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), wmcsf, 'in_file') if fieldmap_images: wf.connect(fieldmap, 'exf_mask', wmcsftransform, 'source_file') else: wf.connect(calc_median, 'median_file', wmcsftransform, 'source_file') wf.connect(register, 'out_reg_file', wmcsftransform, 'reg_file') wf.connect(wmcsf, 'binary_file', wmcsftransform, 'target_file') mask.inputs.binary_file = 'mask.nii.gz' mask.inputs.dilate = int(np.ceil(slice_thickness)) + 1 mask.inputs.erode = int(np.ceil(slice_thickness)) mask.inputs.min = 0.5 wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), mask, 'in_file') masktransform = wmcsftransform.clone("masktransform") if fieldmap_images: wf.connect(fieldmap, 'exf_mask', masktransform, 'source_file') else: wf.connect(calc_median, 'median_file', masktransform, 'source_file') wf.connect(register, 'out_reg_file', masktransform, 'reg_file') wf.connect(mask, 'binary_file', masktransform, 'target_file') # Compute Art outliers art = Node(interface=ArtifactDetect(use_differences=[True, False], use_norm=True, norm_threshold=norm_threshold, zintensity_threshold=3, parameter_source='NiPy', bound_by_brainmask=True, save_plot=False, mask_type='file'), name="art") if fieldmap_images: wf.connect(dewarper, 'unwarped_file', art, 'realigned_files') else: wf.connect(tsnr, 'detrended_file', art, 'realigned_files') wf.connect(realign, 'par_file', art, 'realignment_parameters') wf.connect(masktransform, 'transformed_file', art, 'mask_file') # Compute motion regressors motreg = Node(Function(input_names=['motion_params', 'order', 'derivatives'], output_names=['out_files'], function=motion_regressors, imports=imports), name='getmotionregress') wf.connect(realign, 'par_file', motreg, 'motion_params') # Create a filter to remove motion and art confounds createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm', 'outliers'], output_names=['out_files'], function=build_filter1, imports=imports), name='makemotionbasedfilter') wf.connect(motreg, 'out_files', createfilter1, 'motion_params') wf.connect(art, 'norm_files', createfilter1, 'comp_norm') wf.connect(art, 'outlier_files', createfilter1, 'outliers') # Filter the motion and art confounds filter1 = MapNode(fsl.GLM(out_res_name='timeseries.nii.gz', demean=True), iterfield=['in_file', 'design'], name='filtermotion') if fieldmap_images: wf.connect(dewarper, 'unwarped_file', filter1, 'in_file') else: wf.connect(tsnr, 'detrended_file', filter1, 'in_file') wf.connect(createfilter1, 'out_files', filter1, 'design') wf.connect(masktransform, 'transformed_file', filter1, 'mask') # Create a filter to remove noise components based on white matter and CSF createfilter2 = MapNode(Function(input_names=['realigned_file', 'mask_file', 'num_components'], output_names=['out_files'], function=extract_noise_components, imports=imports), iterfield=['realigned_file'], name='makecompcorrfilter') createfilter2.inputs.num_components = num_components wf.connect(filter1, 'out_res', createfilter2, 'realigned_file') wf.connect(masktransform, 'transformed_file', createfilter2, 'mask_file') # Filter noise components filter2 = MapNode(fsl.GLM(out_res_name='timeseries_cleaned.nii.gz', demean=True), iterfield=['in_file', 'design'], name='filtercompcorr') wf.connect(filter1, 'out_res', filter2, 'in_file') wf.connect(createfilter2, 'out_files', filter2, 'design') wf.connect(masktransform, 'transformed_file', filter2, 'mask') # Smoothing using surface and volume smoothing smooth = MapNode(freesurfer.Smooth(), iterfield=['in_file'], name='smooth') smooth.inputs.proj_frac_avg = (0.1, 0.9, 0.1) if surf_fwhm is None: surf_fwhm = 5 * slice_thickness smooth.inputs.surface_fwhm = surf_fwhm if vol_fwhm is None: vol_fwhm = 2 * slice_thickness smooth.inputs.vol_fwhm = vol_fwhm wf.connect(filter2, 'out_res', smooth, 'in_file') wf.connect(register, 'out_reg_file', smooth, 'reg_file') # Bandpass filter the data bandpass = MapNode(fsl.TemporalFilter(), iterfield=['in_file'], name='bandpassfilter') if highpass_freq < 0: bandpass.inputs.highpass_sigma = -1 else: bandpass.inputs.highpass_sigma = 1. / (2 * TR * highpass_freq) if lowpass_freq < 0: bandpass.inputs.lowpass_sigma = -1 else: bandpass.inputs.lowpass_sigma = 1. / (2 * TR * lowpass_freq) wf.connect(smooth, 'smoothed_file', bandpass, 'in_file') # Convert aparc to subject functional space aparctransform = wmcsftransform.clone("aparctransform") if fieldmap_images: wf.connect(fieldmap, 'exf_mask', aparctransform, 'source_file') else: wf.connect(calc_median, 'median_file', aparctransform, 'source_file') wf.connect(register, 'out_reg_file', aparctransform, 'reg_file') wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparctransform, 'target_file') # Sample the average time series in aparc ROIs sampleaparc = MapNode(freesurfer.SegStats(avgwf_txt_file=True, default_color_table=True), iterfield=['in_file'], name='aparc_ts') sampleaparc.inputs.segment_id = ([8] + range(10, 14) + [17, 18, 26, 47] + range(49, 55) + [58] + range(1001, 1036) + range(2001, 2036)) wf.connect(aparctransform, 'transformed_file', sampleaparc, 'segmentation_file') wf.connect(bandpass, 'out_file', sampleaparc, 'in_file') # Sample the time series onto the surface of the target surface. Performs # sampling into left and right hemisphere target = Node(IdentityInterface(fields=['target_subject']), name='target') target.iterables = ('target_subject', filename_to_list(target_subject)) samplerlh = MapNode(freesurfer.SampleToSurface(), iterfield=['source_file'], name='sampler_lh') samplerlh.inputs.sampling_method = "average" samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1) samplerlh.inputs.sampling_units = "frac" samplerlh.inputs.interp_method = "trilinear" #samplerlh.inputs.cortex_mask = True samplerlh.inputs.out_type = 'niigz' samplerlh.inputs.subjects_dir = os.environ['SUBJECTS_DIR'] samplerrh = samplerlh.clone('sampler_rh') samplerlh.inputs.hemi = 'lh' wf.connect(bandpass, 'out_file', samplerlh, 'source_file') wf.connect(register, 'out_reg_file', samplerlh, 'reg_file') wf.connect(target, 'target_subject', samplerlh, 'target_subject') samplerrh.set_input('hemi', 'rh') wf.connect(bandpass, 'out_file', samplerrh, 'source_file') wf.connect(register, 'out_reg_file', samplerrh, 'reg_file') wf.connect(target, 'target_subject', samplerrh, 'target_subject') # Combine left and right hemisphere to text file combiner = MapNode(Function(input_names=['left', 'right'], output_names=['out_file'], function=combine_hemi, imports=imports), iterfield=['left', 'right'], name="combiner") wf.connect(samplerlh, 'out_file', combiner, 'left') wf.connect(samplerrh, 'out_file', combiner, 'right') # Compute registration between the subject's structural and MNI template # This is currently set to perform a very quick registration. However, the # registration can be made significantly more accurate for cortical # structures by increasing the number of iterations # All parameters are set using the example from: # https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh reg = Node(ants.Registration(), name='antsRegister') reg.inputs.output_transform_prefix = "output_" reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN'] reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)] # reg.inputs.number_of_iterations = ([[10000, 111110, 11110]]*3 + # [[100, 50, 30]]) reg.inputs.number_of_iterations = [[100, 100, 100]] * 3 + [[100, 20, 10]] reg.inputs.dimension = 3 reg.inputs.write_composite_transform = True reg.inputs.collapse_output_transforms = False reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']] reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]] reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]] reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]] reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]] reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01] reg.inputs.convergence_window_size = [20] * 3 + [5] reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]] reg.inputs.sigma_units = ['vox'] * 4 reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]]*2 + [[4, 2, 1]] reg.inputs.use_estimate_learning_rate_once = [True] * 4 reg.inputs.use_histogram_matching = [False] * 3 + [True] reg.inputs.output_warped_image = 'output_warped_image.nii.gz' reg.inputs.fixed_image = \ os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz') reg.inputs.num_threads = 4 reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'} # Convert T1.mgz to nifti for using with ANTS convert = Node(freesurfer.MRIConvert(out_type='niigz'), name='convert2nii') wf.connect(fssource, 'T1', convert, 'in_file') # Mask the T1.mgz file with the brain mask computed earlier maskT1 = Node(fsl.BinaryMaths(operation='mul'), name='maskT1') wf.connect(mask, 'binary_file', maskT1, 'operand_file') wf.connect(convert, 'out_file', maskT1, 'in_file') wf.connect(maskT1, 'out_file', reg, 'moving_image') # Convert the BBRegister transformation to ANTS ITK format convert2itk = MapNode(C3dAffineTool(), iterfield=['transform_file', 'source_file'], name='convert2itk') convert2itk.inputs.fsl2ras = True convert2itk.inputs.itk_transform = True wf.connect(register, 'out_fsl_file', convert2itk, 'transform_file') if fieldmap_images: wf.connect(fieldmap, 'exf_mask', convert2itk, 'source_file') else: wf.connect(calc_median, 'median_file', convert2itk, 'source_file') wf.connect(convert, 'out_file', convert2itk, 'reference_file') # Concatenate the affine and ants transforms into a list pickfirst = lambda x: x[0] merge = MapNode(Merge(2), iterfield=['in2'], name='mergexfm') wf.connect(convert2itk, 'itk_transform', merge, 'in2') wf.connect(reg, ('composite_transform', pickfirst), merge, 'in1') # Apply the combined transform to the time series file sample2mni = MapNode(ants.ApplyTransforms(), iterfield=['input_image', 'transforms'], name='sample2mni') sample2mni.inputs.input_image_type = 3 sample2mni.inputs.interpolation = 'BSpline' sample2mni.inputs.invert_transform_flags = [False, False] sample2mni.inputs.reference_image = \ os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz') sample2mni.inputs.terminal_output = 'file' wf.connect(bandpass, 'out_file', sample2mni, 'input_image') wf.connect(merge, 'out', sample2mni, 'transforms') # Sample the time series file for each subcortical roi ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file', 'indices'], output_names=['out_file'], function=extract_subrois, imports=imports), iterfield=['timeseries_file'], name='getsubcortts') ts2txt.inputs.indices = [8] + range(10, 14) + [17, 18, 26, 47] +\ range(49, 55) + [58] ts2txt.inputs.label_file = \ os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_' '2mm.nii.gz')) wf.connect(sample2mni, 'output_image', ts2txt, 'timeseries_file') # Save the relevant data into an output directory datasink = Node(interface=DataSink(), name="datasink") datasink.inputs.base_directory = sink_directory datasink.inputs.container = subject_id datasink.inputs.substitutions = [('_target_subject_', '')] datasink.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2') wf.connect(despiker, 'out_file', datasink, 'resting.qa.despike') wf.connect(realign, 'par_file', datasink, 'resting.qa.motion') wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr') wf.connect(tsnr, 'mean_file', datasink, 'resting.qa.tsnr.@mean') wf.connect(tsnr, 'stddev_file', datasink, 'resting.qa.@tsnr_stddev') if fieldmap_images: wf.connect(fieldmap, 'exf_mask', datasink, 'resting.reference') else: wf.connect(calc_median, 'median_file', datasink, 'resting.reference') wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm') wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity') wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files') wf.connect(mask, 'binary_file', datasink, 'resting.mask') wf.connect(masktransform, 'transformed_file', datasink, 'resting.mask.@transformed_file') wf.connect(register, 'out_reg_file', datasink, 'resting.registration.bbreg') wf.connect(reg, ('composite_transform', pickfirst), datasink, 'resting.registration.ants') wf.connect(register, 'min_cost_file', datasink, 'resting.qa.bbreg.@mincost') wf.connect(smooth, 'smoothed_file', datasink, 'resting.timeseries.fullpass') wf.connect(bandpass, 'out_file', datasink, 'resting.timeseries.bandpassed') wf.connect(sample2mni, 'output_image', datasink, 'resting.timeseries.mni') wf.connect(createfilter1, 'out_files', datasink, 'resting.regress.@regressors') wf.connect(createfilter2, 'out_files', datasink, 'resting.regress.@compcorr') wf.connect(sampleaparc, 'summary_file', datasink, 'resting.parcellations.aparc') wf.connect(sampleaparc, 'avgwf_txt_file', datasink, 'resting.parcellations.aparc.@avgwf') wf.connect(ts2txt, 'out_file', datasink, 'resting.parcellations.grayo.@subcortical') datasink2 = Node(interface=DataSink(), name="datasink2") datasink2.inputs.base_directory = sink_directory datasink2.inputs.container = subject_id datasink2.inputs.substitutions = [('_target_subject_', '')] datasink2.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2') wf.connect(combiner, 'out_file', datasink2, 'resting.parcellations.grayo.@surface') return wf """ Creates the full workflow including getting information from dicom files """ def create_resting_workflow(args, name='resting'): TR = args.TR slice_times = args.slice_times slice_thickness = None if args.dicom_file: TR, slice_times, slice_thickness = get_info(args.dicom_file) slice_times = (np.array(slice_times)/1000.).tolist() if slice_thickness is None: from nibabel import load img = load(args.files[0]) slice_thickness = max(img.get_header().get_zooms()[:3]) kwargs = dict(files=[os.path.abspath(filename) for filename in args.files], subject_id=args.subject_id, n_vol=args.n_vol, despike=args.despike, TR=TR, slice_times=slice_times, slice_thickness=slice_thickness, lowpass_freq=args.lowpass_freq, highpass_freq=args.highpass_freq, sink_directory=os.path.abspath(args.sink), name=name) if args.field_maps: kwargs.update(**dict(fieldmap_images=args.field_maps, FM_TEdiff=args.TE_diff, FM_echo_spacing=args.echo_spacing, FM_sigma=args.sigma)) wf = create_workflow(**kwargs) return wf if __name__ == "__main__": from argparse import ArgumentParser parser = ArgumentParser(description=__doc__) parser.add_argument("-d", "--dicom_file", dest="dicom_file", help="an example dicom file from the resting series") parser.add_argument("-f", "--files", dest="files", nargs="+", help="4d nifti files for resting state", required=True) parser.add_argument("-s", "--subject_id", dest="subject_id", help="FreeSurfer subject id", required=True) parser.add_argument("-n", "--n_vol", dest="n_vol", default=0, type=int, help="Volumes to skip at the beginning") parser.add_argument("--despike", dest="despike", default=False, action="store_true", help="Use despiked data") parser.add_argument("--TR", dest="TR", default=None, help="TR if dicom not provided in seconds") parser.add_argument("--slice_times", dest="slice_times", nargs="+", type=float, help="Slice times in seconds") parser.add_argument("-l", "--lowpass_freq", dest="lowpass_freq", default=-1, help="Low pass frequency (Hz)") parser.add_argument("-u", "--highpass_freq", dest="highpass_freq", default=-1, help="High pass frequency (Hz)") parser.add_argument("-o", "--output_dir", dest="sink", help="Output directory base") parser.add_argument("-w", "--work_dir", dest="work_dir", help="Output directory base") parser.add_argument("-p", "--plugin", dest="plugin", default='Linear', help="Plugin to use") parser.add_argument("--plugin_args", dest="plugin_args", help="Plugin arguments") parser.add_argument("--field_maps", dest="field_maps", nargs="+", help="field map niftis") parser.add_argument("--fm_echospacing", dest="echo_spacing", type=float, help="field map echo spacing") parser.add_argument("--fm_TE_diff", dest='TE_diff', type=float, help="field map echo time difference") parser.add_argument("--fm_sigma", dest='sigma', type=float, help="field map sigma value") args = parser.parse_args() wf = create_resting_workflow(args) if args.work_dir: work_dir = os.path.abspath(args.work_dir) else: work_dir = os.getcwd() wf.base_dir = work_dir if args.plugin_args: wf.run(args.plugin, plugin_args=eval(args.plugin_args)) else: wf.run(args.plugin)
21,924
0
46
ef39c4661ae82fb6ae41df37716b835a60ad8d39
1,399
py
Python
tests/storage/dav/test_main.py
AlecPapierniak/vdirsyncer
27ebb0902bba66c2d26f892353bf6f05e849fe81
[ "BSD-3-Clause" ]
1
2020-09-29T14:42:08.000Z
2020-09-29T14:42:08.000Z
tests/storage/dav/test_main.py
AlecPapierniak/vdirsyncer
27ebb0902bba66c2d26f892353bf6f05e849fe81
[ "BSD-3-Clause" ]
null
null
null
tests/storage/dav/test_main.py
AlecPapierniak/vdirsyncer
27ebb0902bba66c2d26f892353bf6f05e849fe81
[ "BSD-3-Clause" ]
1
2020-09-30T08:36:36.000Z
2020-09-30T08:36:36.000Z
import pytest from vdirsyncer.storage.dav import _BAD_XML_CHARS from vdirsyncer.storage.dav import _merge_xml from vdirsyncer.storage.dav import _parse_xml @pytest.mark.parametrize('char', range(32))
32.534884
72
0.518227
import pytest from vdirsyncer.storage.dav import _BAD_XML_CHARS from vdirsyncer.storage.dav import _merge_xml from vdirsyncer.storage.dav import _parse_xml def test_xml_utilities(): x = _parse_xml(b'''<?xml version="1.0" encoding="UTF-8" ?> <D:multistatus xmlns:D="DAV:"> <D:response> <D:propstat> <D:status>HTTP/1.1 404 Not Found</D:status> <D:prop> <D:getcontenttype/> </D:prop> </D:propstat> <D:propstat> <D:prop> <D:resourcetype> <D:collection/> </D:resourcetype> </D:prop> </D:propstat> </D:response> </D:multistatus> ''') response = x.find('{DAV:}response') props = _merge_xml(response.findall('{DAV:}propstat/{DAV:}prop')) assert props.find('{DAV:}resourcetype/{DAV:}collection') is not None assert props.find('{DAV:}getcontenttype') is not None @pytest.mark.parametrize('char', range(32)) def test_xml_specialchars(char): x = _parse_xml('<?xml version="1.0" encoding="UTF-8" ?>' '<foo>ye{}s\r\n' 'hello</foo>'.format(chr(char)).encode('ascii')) if char in _BAD_XML_CHARS: assert x.text == 'yes\nhello'
1,150
0
45
7040b36361e9dc301170f0d358f3b21042e633a0
4,241
py
Python
prev_ob_models/exclude/GilraBhalla2015/channels/load_channels.py
fameshpatel/olfactorybulb
8d7a644b4560309ef177c0590ff73ed4c2432604
[ "MIT" ]
null
null
null
prev_ob_models/exclude/GilraBhalla2015/channels/load_channels.py
fameshpatel/olfactorybulb
8d7a644b4560309ef177c0590ff73ed4c2432604
[ "MIT" ]
null
null
null
prev_ob_models/exclude/GilraBhalla2015/channels/load_channels.py
fameshpatel/olfactorybulb
8d7a644b4560309ef177c0590ff73ed4c2432604
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import math sys.path.extend(["..","../channels/"]) from NaChannel import * from KFastChannel import * from KSlowChannel import * from CaLChannel import * from KAChannel import * ## use KCaChannel instead of KCaMPIChannel, in a non-MPI i.e. non-parallel run, ## to generate the KCaA.dat and KCaB.dat files. #from KCaChannel import * from KCaMPIChannel import * ## use KCaChannel_PG instead of KCaMPIChannel_PG, in a non-MPI i.e. non-parallel run, ## to generate the KCaA_PG.dat and KCaB_PG.dat files. #from KCaChannel_PG import * from KCaMPIChannel_PG import * from CaPool import * from KMChannel import * from CaTChannel import * from NaMitChannelMS import * from KAChannelMS import * from KDRChannelMS import * import moose from moose.neuroml import * FARADAY = 96154.0 # Coulombs # from cadecay.mod : 1/(2*96154.0) = 5.2e-6 which is the Book of Genesis / readcell value #FARADAY = 96485.3415 # Coulombs # from Wikipedia
46.097826
153
0.683801
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import math sys.path.extend(["..","../channels/"]) from NaChannel import * from KFastChannel import * from KSlowChannel import * from CaLChannel import * from KAChannel import * ## use KCaChannel instead of KCaMPIChannel, in a non-MPI i.e. non-parallel run, ## to generate the KCaA.dat and KCaB.dat files. #from KCaChannel import * from KCaMPIChannel import * ## use KCaChannel_PG instead of KCaMPIChannel_PG, in a non-MPI i.e. non-parallel run, ## to generate the KCaA_PG.dat and KCaB_PG.dat files. #from KCaChannel_PG import * from KCaMPIChannel_PG import * from CaPool import * from KMChannel import * from CaTChannel import * from NaMitChannelMS import * from KAChannelMS import * from KDRChannelMS import * import moose from moose.neuroml import * FARADAY = 96154.0 # Coulombs # from cadecay.mod : 1/(2*96154.0) = 5.2e-6 which is the Book of Genesis / readcell value #FARADAY = 96485.3415 # Coulombs # from Wikipedia def load_channels(): # Na channels in /library should be called *Na* not *na*. No other channel should have Na in its name. # I search later for Na channels by string *Na* NaChannel("/library/Na_mit_usb") KFastChannel("/library/K2_mit_usb") ## CAUTION: K2 is Kfast KSlowChannel("/library/K_mit_usb") ## CAUTION: K is Kslow CaLChannel("/library/LCa3_mit_usb") # L-type Ca channel (high threshold) KAChannel("/library/KA_bsg_yka") KCaChannel("/library/Kca_mit_usb") KCaChannel_PG("/library/Kca_mit_usb_pg") CaPool("/library/Ca_mit_conc") KMChannel("/library/KM_bsg_upi") CaTChannel("/library/TCa_rat_ag") # T-type Ca channel (low threshold) # NaMitChannelMS(sh, Vna, MOOSEpathname) # mit Na and gran Na channels are different in sh and ENa. NaMitChannelMS(10e-3, 50e-3, "/library/Na_mit_ms") NaMitChannelMS(0e-3, 50e-3, "/library/Na_mit_initialsegment_ms") # for Granule cell, maintaining the old name Na_rat_ms to avoid changing .p file (now invalid argument!), # even though this Na channel for granule cell is from Migliore and Shepherd 2008 NaMitChannelMS(15e-3, 60e-3, "/library/Na_rat_ms") KAChannelMS("/library/KA_ms") KDRChannelMS("/library/KDR_ms") #self.context.readNeuroML(../channels/IhChannel.xml,"/library/Ih_cb") CML = ChannelML({'temperature':CELSIUS}) CML.readChannelMLFromFile('../channels/Ih_cb.xml') CML.readChannelMLFromFile('../channels/TCa_d.xml') ## extras from neuroConstruct examples #CML.readChannelMLFromFile('../channels/CaHVA_Chan.xml') #CML.readChannelMLFromFile('../channels/CaL_Chan.xml') def connect_CaConc(compartment_list): context = moose.PyMooseBase.getContext() #### Connect the Ca pools and channels #### Ca channels should have an extra field called 'ion' defined and set in MOOSE. #### Ca dependent channels like KCa should have an extra field called 'ionDependency' defined and set in MOOSE. #### Am connecting these at the very end so that all channels and pools have been created for compartment in compartment_list: if context.exists(compartment.path+'/Ca_mit_conc'): # Ca Pool caconc = moose.CaConc(compartment.path+'/Ca_mit_conc') for child in compartment.getChildren(compartment.id): neutralwrap = moose.Neutral(child) if neutralwrap.className == 'HHChannel': channel = moose.HHChannel(child) ### If 'ion' field is not present, the Shell returns '0', cribs and prints out a message but it does not throw an exception if channel.getField('ion') == 'Ca': channel.connect('IkSrc',caconc,'current') #print 'Connected ',channel.path if neutralwrap.className == 'HHChannel2D': channel = moose.HHChannel2D(child) ### If 'ionDependency' field is not present, the Shell returns '0', cribs and prints out a message but it does not throw an exception if channel.getField('ionDependency') == 'Ca': caconc.connect('concSrc',channel,'concen') #print 'Connected ',channel.path
3,198
0
46
c4be31d36b3518697eb7d6f927237f318ef53b65
2,804
py
Python
ztest/api_1_0/phone.py
Kk0t/Flask-phone-administration
4acdcedabe7725c6ce2b57e32bd54551cdbb5297
[ "CC0-1.0" ]
null
null
null
ztest/api_1_0/phone.py
Kk0t/Flask-phone-administration
4acdcedabe7725c6ce2b57e32bd54551cdbb5297
[ "CC0-1.0" ]
null
null
null
ztest/api_1_0/phone.py
Kk0t/Flask-phone-administration
4acdcedabe7725c6ce2b57e32bd54551cdbb5297
[ "CC0-1.0" ]
null
null
null
# -*- coding: utf-8 -*- # @Time : 2021/9/10 14:23 # @Author : WuBingTai from ztest import db from ztest.models import Phone from ztest.utils.common import login_required from ztest.utils.response_code import RET from . import api from flask import jsonify, request, current_app # 测试机列表 @api.route("/phone/list") @login_required # 新增手机 @api.route("/addPhone", methods=["POST"]) @login_required
31.155556
101
0.64301
# -*- coding: utf-8 -*- # @Time : 2021/9/10 14:23 # @Author : WuBingTai from ztest import db from ztest.models import Phone from ztest.utils.common import login_required from ztest.utils.response_code import RET from . import api from flask import jsonify, request, current_app # 测试机列表 @api.route("/phone/list") @login_required def get_phone_list(): req = request.args page_size = int(req.get("page_size")) page = int(req.get("page")) params = list() if req.get('model'): params.append(Phone.model.like('%' + req.get('model') + '%')) if req.get('brand'): params.append(Phone.brand.like('%' + req.get('brand') + '%')) if req.get('os'): params.append(Phone.os.like('%' + req.get('os') + '%')) if req.get('pixel'): params.append(Phone.pixel.like('%' + req.get('pixel') + '%')) if req.get('is_borrow'): params.append(Phone.is_borrow == req.get('is_borrow')) if req.get('administrative_number'): params.append(Phone.administrative_number == req.get('administrative_number')) try: paginate = db.session.query(Phone).filter(*params).paginate(page, page_size, error_out=False) phones = paginate.items pages = paginate.pages # has_prev = paginate.has_prev phone_dict = [] for phone in phones: phone_dict.append(phone.to_phone_dict()) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="查询数据失败") return jsonify(stat=RET.OK, msg="OK", data={"phones": phone_dict}, pages=pages) # 新增手机 @api.route("/addPhone", methods=["POST"]) @login_required def add_phone(): data_dict = request.form brand = data_dict.get("brand") model = data_dict.get("model") os = data_dict.get("os") pixel = data_dict.get("pixel") cpu = data_dict.get("cpu") ram = data_dict.get("ram") screen_size = data_dict.get("screen_size") administrative_number = data_dict.get("administrative_number") colour = data_dict.get("colour") if not all([brand, model, os, pixel, cpu, ram, screen_size, administrative_number, colour]): return jsonify(errno=RET.PARAMERR, errmsg='参数不全') phone = Phone() # 设置数据 phone.brand = brand phone.model = model phone.os = os phone.cpu = cpu phone.pixel = pixel phone.ram = ram phone.screen_size = screen_size phone.administrative_number = administrative_number phone.colour = colour phone.is_borrow = "0" try: db.session.add(phone) db.session.commit() except Exception as e: db.session.rollback() current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="保存数据失败") return jsonify(stat=RET.OK, msg="添加成功", data=phone.to_phone_dict())
2,406
0
44
92db780c10a1808558c4ea098be409cccef53599
4,145
py
Python
example_planets/epic203771098.py
lgbouma/radvel
4ba645468d4c742e04c5965a3c74180f1ed2353b
[ "MIT" ]
null
null
null
example_planets/epic203771098.py
lgbouma/radvel
4ba645468d4c742e04c5965a3c74180f1ed2353b
[ "MIT" ]
2
2019-03-13T18:37:41.000Z
2019-04-10T19:57:36.000Z
example_planets/epic203771098.py
lgbouma/radvel
4ba645468d4c742e04c5965a3c74180f1ed2353b
[ "MIT" ]
null
null
null
# Example Keplerian fit configuration file # Required packages for setup import os import pandas as pd import numpy as np import radvel # Define global planetary system and dataset parameters starname = 'epic203771098' nplanets = 2 # number of planets in the system instnames = ['j'] # list of instrument names. Can be whatever you like (no spaces) but should match 'tel' column in the input file. ntels = len(instnames) # number of instruments with unique velocity zero-points fitting_basis = 'per tc secosw sesinw k' # Fitting basis, see radvel.basis.BASIS_NAMES for available basis names bjd0 = 2454833. planet_letters = {1: 'b', 2:'c'} # Define prior centers (initial guesses) in a basis of your choice (need not be in the fitting basis) anybasis_params = radvel.Parameters(nplanets,basis='per tc e w k') # initialize Parameters object anybasis_params['per1'] = radvel.Parameter(value=20.885258) # period of 1st planet anybasis_params['tc1'] = radvel.Parameter(value=2072.79438) # time of inferior conjunction of 1st planet anybasis_params['e1'] = radvel.Parameter(value=0.01) # eccentricity of 'per tc secosw sesinw logk'1st planet anybasis_params['w1'] = radvel.Parameter(value=np.pi/2.) # argument of periastron of the star's orbit for 1st planet anybasis_params['k1'] = radvel.Parameter(value=10.0) # velocity semi-amplitude for 1st planet anybasis_params['per2'] = radvel.Parameter(value=42.363011) # same parameters for 2nd planet ... anybasis_params['tc2'] = radvel.Parameter(value=2082.62516) anybasis_params['e2'] = radvel.Parameter(value=0.01) anybasis_params['w2'] = radvel.Parameter(value=np.pi/2.) anybasis_params['k2'] = radvel.Parameter(value=10.0) anybasis_params['dvdt'] = radvel.Parameter(value=0.0) # slope anybasis_params['curv'] = radvel.Parameter(value=0.0) # curvature anybasis_params['gamma_j'] = radvel.Parameter(1.0) # velocity zero-point for hires_rj anybasis_params['jit_j'] = radvel.Parameter(value=2.6) # jitter for hires_rj # Convert input orbital parameters into the fitting basis params = anybasis_params.basis.to_any_basis(anybasis_params,fitting_basis) # Set the 'vary' attributes of each of the parameters in the fitting basis. A parameter's 'vary' attribute should # be set to False if you wish to hold it fixed during the fitting process. By default, all 'vary' parameters # are set to True. params['secosw1'].vary = False params['sesinw1'].vary = False params['secosw2'].vary = False params['sesinw2'].vary = False params['tc1'].vary = False params['per1'].vary = False params['tc2'].vary = False params['per2'].vary = False # Load radial velocity data, in this example the data is contained in an hdf file, # the resulting dataframe or must have 'time', 'mnvel', 'errvel', and 'tel' keys # the velocities are expected to be in m/s path = os.path.join(radvel.DATADIR, 'epic203771098.csv') data = pd.read_csv(path) data['time'] = data.t data['mnvel'] = data.vel data['errvel'] = data.errvel data['tel'] = 'j' # Define prior shapes and widths here. priors = [ radvel.prior.EccentricityPrior( nplanets ), # Keeps eccentricity < 1 radvel.prior.PositiveKPrior( nplanets ), # Keeps K > 0 radvel.prior.Gaussian('tc1', params['tc1'].value, 0.01), # Gaussian prior on tc1 with center at tc1 and width 0.01 days radvel.prior.Gaussian('per1', params['per1'].value, 0.01), radvel.prior.Gaussian('tc2', params['tc2'].value, 0.01), radvel.prior.Gaussian('per2', params['per2'].value, 0.01), radvel.prior.HardBounds('jit_j', 0.0, 15.0) ] # abscissa for slope and curvature terms (should be near mid-point of time baseline) time_base = np.mean([np.min(data.time), np.max(data.time)]) # optional argument that can contain stellar mass in solar units (mstar) and # uncertainty (mstar_err). If not set, mstar will be set to nan. stellar = dict(mstar=1.12, mstar_err= 0.05) # optional argument that can contain planet radii, # used for computing densities. Values should be given # in units of Earth radii planet = dict( rp1=5.68, rp_err1=0.56, rp2=7.82, rp_err2=0.72, )
45.549451
134
0.72304
# Example Keplerian fit configuration file # Required packages for setup import os import pandas as pd import numpy as np import radvel # Define global planetary system and dataset parameters starname = 'epic203771098' nplanets = 2 # number of planets in the system instnames = ['j'] # list of instrument names. Can be whatever you like (no spaces) but should match 'tel' column in the input file. ntels = len(instnames) # number of instruments with unique velocity zero-points fitting_basis = 'per tc secosw sesinw k' # Fitting basis, see radvel.basis.BASIS_NAMES for available basis names bjd0 = 2454833. planet_letters = {1: 'b', 2:'c'} # Define prior centers (initial guesses) in a basis of your choice (need not be in the fitting basis) anybasis_params = radvel.Parameters(nplanets,basis='per tc e w k') # initialize Parameters object anybasis_params['per1'] = radvel.Parameter(value=20.885258) # period of 1st planet anybasis_params['tc1'] = radvel.Parameter(value=2072.79438) # time of inferior conjunction of 1st planet anybasis_params['e1'] = radvel.Parameter(value=0.01) # eccentricity of 'per tc secosw sesinw logk'1st planet anybasis_params['w1'] = radvel.Parameter(value=np.pi/2.) # argument of periastron of the star's orbit for 1st planet anybasis_params['k1'] = radvel.Parameter(value=10.0) # velocity semi-amplitude for 1st planet anybasis_params['per2'] = radvel.Parameter(value=42.363011) # same parameters for 2nd planet ... anybasis_params['tc2'] = radvel.Parameter(value=2082.62516) anybasis_params['e2'] = radvel.Parameter(value=0.01) anybasis_params['w2'] = radvel.Parameter(value=np.pi/2.) anybasis_params['k2'] = radvel.Parameter(value=10.0) anybasis_params['dvdt'] = radvel.Parameter(value=0.0) # slope anybasis_params['curv'] = radvel.Parameter(value=0.0) # curvature anybasis_params['gamma_j'] = radvel.Parameter(1.0) # velocity zero-point for hires_rj anybasis_params['jit_j'] = radvel.Parameter(value=2.6) # jitter for hires_rj # Convert input orbital parameters into the fitting basis params = anybasis_params.basis.to_any_basis(anybasis_params,fitting_basis) # Set the 'vary' attributes of each of the parameters in the fitting basis. A parameter's 'vary' attribute should # be set to False if you wish to hold it fixed during the fitting process. By default, all 'vary' parameters # are set to True. params['secosw1'].vary = False params['sesinw1'].vary = False params['secosw2'].vary = False params['sesinw2'].vary = False params['tc1'].vary = False params['per1'].vary = False params['tc2'].vary = False params['per2'].vary = False # Load radial velocity data, in this example the data is contained in an hdf file, # the resulting dataframe or must have 'time', 'mnvel', 'errvel', and 'tel' keys # the velocities are expected to be in m/s path = os.path.join(radvel.DATADIR, 'epic203771098.csv') data = pd.read_csv(path) data['time'] = data.t data['mnvel'] = data.vel data['errvel'] = data.errvel data['tel'] = 'j' # Define prior shapes and widths here. priors = [ radvel.prior.EccentricityPrior( nplanets ), # Keeps eccentricity < 1 radvel.prior.PositiveKPrior( nplanets ), # Keeps K > 0 radvel.prior.Gaussian('tc1', params['tc1'].value, 0.01), # Gaussian prior on tc1 with center at tc1 and width 0.01 days radvel.prior.Gaussian('per1', params['per1'].value, 0.01), radvel.prior.Gaussian('tc2', params['tc2'].value, 0.01), radvel.prior.Gaussian('per2', params['per2'].value, 0.01), radvel.prior.HardBounds('jit_j', 0.0, 15.0) ] # abscissa for slope and curvature terms (should be near mid-point of time baseline) time_base = np.mean([np.min(data.time), np.max(data.time)]) # optional argument that can contain stellar mass in solar units (mstar) and # uncertainty (mstar_err). If not set, mstar will be set to nan. stellar = dict(mstar=1.12, mstar_err= 0.05) # optional argument that can contain planet radii, # used for computing densities. Values should be given # in units of Earth radii planet = dict( rp1=5.68, rp_err1=0.56, rp2=7.82, rp_err2=0.72, )
0
0
0
a2ed73df44165a0b6222692affca1d7a0ede3535
1,536
py
Python
tests/pipeline_test.py
isi-vista/adam
43542c4486af7533938e77e7191eae630541a891
[ "MIT" ]
8
2019-07-02T20:29:31.000Z
2022-01-03T18:20:41.000Z
tests/pipeline_test.py
Tubbz-alt/adam
91f392f2529a98cd50c095a18769ae4b55ce4292
[ "MIT" ]
1,011
2019-07-02T18:00:48.000Z
2022-03-25T14:56:32.000Z
tests/pipeline_test.py
Tubbz-alt/adam
91f392f2529a98cd50c095a18769ae4b55ce4292
[ "MIT" ]
4
2020-08-05T15:36:55.000Z
2022-01-12T17:16:28.000Z
from vistautils.iter_utils import only from adam.language import TokenSequenceLinguisticDescription from adam.learner import LearningExample, MemorizingLanguageLearner from adam.perception import ( BagOfFeaturesPerceptualRepresentationFrame, PerceptualRepresentation, )
34.909091
90
0.721354
from vistautils.iter_utils import only from adam.language import TokenSequenceLinguisticDescription from adam.learner import LearningExample, MemorizingLanguageLearner from adam.perception import ( BagOfFeaturesPerceptualRepresentationFrame, PerceptualRepresentation, ) def test_pipeline(): curriculum = [ LearningExample( perception=PerceptualRepresentation( [BagOfFeaturesPerceptualRepresentationFrame(("red", "truck"))] ), linguistic_description=TokenSequenceLinguisticDescription(("red", "truck")), ) ] learner: MemorizingLanguageLearner[ BagOfFeaturesPerceptualRepresentationFrame, TokenSequenceLinguisticDescription ] = MemorizingLanguageLearner() for example in curriculum: learner.observe(example) # shouldn't be able to describe "red" or "truck" alone assert not learner.describe( PerceptualRepresentation([BagOfFeaturesPerceptualRepresentationFrame(("red",))]) ) assert not learner.describe( PerceptualRepresentation([BagOfFeaturesPerceptualRepresentationFrame(("truck",))]) ) # but should be able to describe "red truck" red_truck_descriptions = learner.describe( PerceptualRepresentation( [BagOfFeaturesPerceptualRepresentationFrame(("red", "truck"))] ) ) assert len(red_truck_descriptions) == 1 red_truck_description = only(red_truck_descriptions) assert red_truck_description.as_token_sequence() == ("red", "truck")
1,233
0
23
75ea05792a1f9507e47a6c1f81b0ac0d04cc3a14
87
py
Python
programs/exceptions/syntax_error.py
dasari-mohana/100_page_python_intro
7b8aecb3be2f2abbaf168b068b1299a1f3b9b97e
[ "MIT" ]
163
2020-12-26T13:24:20.000Z
2022-03-24T14:30:42.000Z
programs/exceptions/syntax_error.py
pr0b3r7/100_page_python_intro
7b8aecb3be2f2abbaf168b068b1299a1f3b9b97e
[ "MIT" ]
1
2021-06-26T18:26:39.000Z
2021-06-26T18:27:26.000Z
programs/exceptions/syntax_error.py
pr0b3r7/100_page_python_intro
7b8aecb3be2f2abbaf168b068b1299a1f3b9b97e
[ "MIT" ]
33
2021-02-09T14:13:28.000Z
2022-03-28T21:23:14.000Z
print('hello') main)
8.7
21
0.517241
print('hello') def main(): num = 5 total = num + 09 print(total) main)
41
0
23
ac84d923e6756df2ed1d75aa35203c8338b59d07
1,155
py
Python
Proper/proper/prop_get_nyquistsampling.py
RupertDodkins/medis
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
[ "MIT" ]
1
2021-06-25T17:35:56.000Z
2021-06-25T17:35:56.000Z
Proper/proper/prop_get_nyquistsampling.py
RupertDodkins/medis
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
[ "MIT" ]
null
null
null
Proper/proper/prop_get_nyquistsampling.py
RupertDodkins/medis
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
[ "MIT" ]
2
2018-12-08T15:05:13.000Z
2019-08-08T17:28:24.000Z
# Copyright 2016, 2017 California Institute of Technology # Users must agree to abide by the restrictions listed in the # file "LegalStuff.txt" in the PROPER library directory. # # PROPER developed at Jet Propulsion Laboratory/California Inst. Technology # Original IDL version by John Krist # Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri def prop_get_nyquistsampling(wf, lamx = 0.0): """Funtion determines the Nyquist sampling interval for the current beam, which is focal_ratio * wavelength / 2. Parameters ---------- wf : obj Wavefront class object lamx : float Wavelength to use for computing sampling. By default, the current wavefront's wavelength is used. This parameter can be used when you want to know the Nyquist sampling for a wavelength other than for the current wavefront. Returns ------- float Nyquist sampling interval corresponding to the current wavefront """ if lamx != 0.: return wf.current_fratio * lamx / 2. else: return wf.current_fratio * wf.lamda / 2.
32.083333
77
0.674459
# Copyright 2016, 2017 California Institute of Technology # Users must agree to abide by the restrictions listed in the # file "LegalStuff.txt" in the PROPER library directory. # # PROPER developed at Jet Propulsion Laboratory/California Inst. Technology # Original IDL version by John Krist # Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri def prop_get_nyquistsampling(wf, lamx = 0.0): """Funtion determines the Nyquist sampling interval for the current beam, which is focal_ratio * wavelength / 2. Parameters ---------- wf : obj Wavefront class object lamx : float Wavelength to use for computing sampling. By default, the current wavefront's wavelength is used. This parameter can be used when you want to know the Nyquist sampling for a wavelength other than for the current wavefront. Returns ------- float Nyquist sampling interval corresponding to the current wavefront """ if lamx != 0.: return wf.current_fratio * lamx / 2. else: return wf.current_fratio * wf.lamda / 2.
0
0
0
689daa725033885ac10d168ae036c0ccfd02ab2f
1,071
py
Python
django_google_recaptcha/checks.py
GustavoBPereira/django-recaptcha
944c93b1412f1756d1818185bae88147dd16995d
[ "X11", "MIT" ]
null
null
null
django_google_recaptcha/checks.py
GustavoBPereira/django-recaptcha
944c93b1412f1756d1818185bae88147dd16995d
[ "X11", "MIT" ]
null
null
null
django_google_recaptcha/checks.py
GustavoBPereira/django-recaptcha
944c93b1412f1756d1818185bae88147dd16995d
[ "X11", "MIT" ]
null
null
null
from django.conf import settings from django.core import checks from django_google_recaptcha.constants import TEST_PRIVATE_KEY, TEST_PUBLIC_KEY
38.25
79
0.678805
from django.conf import settings from django.core import checks from django_google_recaptcha.constants import TEST_PRIVATE_KEY, TEST_PUBLIC_KEY def recaptcha_key_check(app_configs, **kwargs): errors = [] private_key = getattr( settings, "RECAPTCHA_PRIVATE_KEY", TEST_PRIVATE_KEY) public_key = getattr( settings, "RECAPTCHA_PUBLIC_KEY", TEST_PUBLIC_KEY) if private_key == TEST_PRIVATE_KEY or \ public_key == TEST_PUBLIC_KEY: errors.extend([checks.Error( "RECAPTCHA_PRIVATE_KEY or RECAPTCHA_PUBLIC_KEY is making use" " of the Google test keys and will not behave as expected in a" " production environment", hint="Update settings.RECAPTCHA_PRIVATE_KEY" " and/or settings.RECAPTCHA_PUBLIC_KEY. Alternatively this check" " can be ignored by adding" " `SILENCED_SYSTEM_CHECKS = ['captcha.recaptcha_test_key_error']`" " to your settings file.", id="captcha.recaptcha_test_key_error" )]) return errors
902
0
23
b67cb72fba5ffe6b256b7a9edc6f10aabfa0030c
926
py
Python
ricerca_app/migrations/0037_auto_20211202_0439.py
UniversitaDellaCalabria/Ricerca
b46157d3182d1c59cff4d36cc63b9e89f2f320c9
[ "Apache-2.0" ]
null
null
null
ricerca_app/migrations/0037_auto_20211202_0439.py
UniversitaDellaCalabria/Ricerca
b46157d3182d1c59cff4d36cc63b9e89f2f320c9
[ "Apache-2.0" ]
1
2020-08-03T15:12:46.000Z
2020-09-03T22:12:16.000Z
ricerca_app/migrations/0037_auto_20211202_0439.py
UniversitaDellaCalabria/Ricerca
b46157d3182d1c59cff4d36cc63b9e89f2f320c9
[ "Apache-2.0" ]
null
null
null
# Generated by Django 3.2.7 on 2021-12-02 04:39 from django.db import migrations, models import django.db.models.deletion
31.931034
164
0.647948
# Generated by Django 3.2.7 on 2021-12-02 04:39 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ricerca_app', '0036_laboratoriodatibase_nome_file_logo'), ] operations = [ migrations.RemoveField( model_name='unitaorganizzativafunzioni', name='unita_organizzativa', ), migrations.AddField( model_name='unitaorganizzativa', name='cd_csa', field=models.CharField(blank=True, db_column='CD_CSA', max_length=40, null=True, unique=True), ), migrations.AddField( model_name='unitaorganizzativafunzioni', name='cd_csa', field=models.ForeignKey(blank=True, db_column='CD_CSA', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='ricerca_app.unitaorganizzativa'), ), ]
0
779
23
3b3d78c726e1c168161e921e693197654cc8d476
3,258
py
Python
africanus/util/code.py
JoshVStaden/codex-africanus
4a38994431d51510b1749fa0e4b8b6190b8b530f
[ "BSD-3-Clause" ]
13
2018-04-06T09:36:13.000Z
2021-04-13T13:11:00.000Z
africanus/util/code.py
JoshVStaden/codex-africanus
4a38994431d51510b1749fa0e4b8b6190b8b530f
[ "BSD-3-Clause" ]
153
2018-03-28T14:13:48.000Z
2022-02-03T07:49:17.000Z
africanus/util/code.py
JoshVStaden/codex-africanus
4a38994431d51510b1749fa0e4b8b6190b8b530f
[ "BSD-3-Clause" ]
14
2018-03-29T13:30:52.000Z
2021-06-12T02:56:55.000Z
# -*- coding: utf-8 -*- from functools import wraps try: from dask.utils import SerializableLock as Lock except ImportError: from threading import Lock def format_code(code): """ Formats some code with line numbers Parameters ---------- code : str Code Returns ------- str Code prefixed with line numbers """ lines = [''] lines.extend(["%-5d %s" % (i, l) for i, l in enumerate(code.split('\n'), 1)]) return '\n'.join(lines) class memoize_on_key(object): """ Memoize based on a key function supplied by the user. The key function should return a custom key for memoizing the decorated function, based on the arguments passed to it. In the following example, the arguments required to generate the `_generate_phase_delay_kernel` function are the types of the `lm`, `uvw` and `frequency` arrays, as well as the number of correlations, `ncorr`. The supplied ``key_fn`` produces a unique key based on these types and the number of correlations, which is used to cache the generated function. .. code-block:: python def key_fn(lm, uvw, frequency, ncorrs=4): ''' Produce a unique key for the arguments of _generate_phase_delay_kernel ''' return (lm.dtype, uvw.dtype, frequency.dtype, ncorrs) _code_template = jinja2.Template(''' #define ncorrs {{ncorrs}} __global__ void phase_delay( const {{lm_type}} * lm, const {{uvw_type}} * uvw, const {{freq_type}} * frequency, {{out_type}} * out) { ... } ''') _type_map = { np.float32: 'float', np.float64: 'double' } @memoize_on_key(key_fn) def _generate_phase_delay_kernel(lm, uvw, frequency, ncorrs=4): ''' Generate the phase delay kernel ''' out_dtype = np.result_type(lm.dtype, uvw.dtype, frequency.dtype) code = _code_template.render(lm_type=_type_map[lm.dtype], uvw_type=_type_map[uvw.dtype], freq_type=_type_map[frequency.dtype], ncorrs=ncorrs) return cp.RawKernel(code, "phase_delay") """
27.610169
78
0.556476
# -*- coding: utf-8 -*- from functools import wraps try: from dask.utils import SerializableLock as Lock except ImportError: from threading import Lock class SingletonMixin(object): __singleton_lock = Lock() __singleton_instance = None @classmethod def instance(cls): if not cls.__singleton_instance: with cls.__singleton_lock: if not cls.__singleton_instance: cls.__singleton_instance = cls() return cls.__singleton_instance def format_code(code): """ Formats some code with line numbers Parameters ---------- code : str Code Returns ------- str Code prefixed with line numbers """ lines = [''] lines.extend(["%-5d %s" % (i, l) for i, l in enumerate(code.split('\n'), 1)]) return '\n'.join(lines) class memoize_on_key(object): """ Memoize based on a key function supplied by the user. The key function should return a custom key for memoizing the decorated function, based on the arguments passed to it. In the following example, the arguments required to generate the `_generate_phase_delay_kernel` function are the types of the `lm`, `uvw` and `frequency` arrays, as well as the number of correlations, `ncorr`. The supplied ``key_fn`` produces a unique key based on these types and the number of correlations, which is used to cache the generated function. .. code-block:: python def key_fn(lm, uvw, frequency, ncorrs=4): ''' Produce a unique key for the arguments of _generate_phase_delay_kernel ''' return (lm.dtype, uvw.dtype, frequency.dtype, ncorrs) _code_template = jinja2.Template(''' #define ncorrs {{ncorrs}} __global__ void phase_delay( const {{lm_type}} * lm, const {{uvw_type}} * uvw, const {{freq_type}} * frequency, {{out_type}} * out) { ... } ''') _type_map = { np.float32: 'float', np.float64: 'double' } @memoize_on_key(key_fn) def _generate_phase_delay_kernel(lm, uvw, frequency, ncorrs=4): ''' Generate the phase delay kernel ''' out_dtype = np.result_type(lm.dtype, uvw.dtype, frequency.dtype) code = _code_template.render(lm_type=_type_map[lm.dtype], uvw_type=_type_map[uvw.dtype], freq_type=_type_map[frequency.dtype], ncorrs=ncorrs) return cp.RawKernel(code, "phase_delay") """ def __init__(self, key_fn): self._key_fn = key_fn self._lock = Lock() self._cache = {} def __call__(self, fn): @wraps(fn) def wrapper(*args, **kwargs): key = self._key_fn(*args, **kwargs) with self._lock: try: return self._cache[key] except KeyError: self._cache[key] = entry = fn(*args, **kwargs) return entry return wrapper
668
114
77
983d10efe6acf6f4dbb6c708e453e20c0b3674bc
112
py
Python
Conditions/Climates/Hail.py
saisua/RAF-Game
79b2b618aa18c31a40c080865b58fac02c1cac68
[ "MIT" ]
null
null
null
Conditions/Climates/Hail.py
saisua/RAF-Game
79b2b618aa18c31a40c080865b58fac02c1cac68
[ "MIT" ]
null
null
null
Conditions/Climates/Hail.py
saisua/RAF-Game
79b2b618aa18c31a40c080865b58fac02c1cac68
[ "MIT" ]
null
null
null
from .Climate import Climate
16
28
0.625
from .Climate import Climate class Hail(Climate): name:str="Hail" def __init__(self): pass
11
50
23
b9c81a7b70f41df791135857217abd8e545c6539
665
py
Python
tests/test_configuration.py
ta4tsering/pyrrha-bo
d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17
[ "MIT" ]
16
2018-11-16T13:48:20.000Z
2020-11-13T21:28:06.000Z
tests/test_configuration.py
ta4tsering/pyrrha-bo
d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17
[ "MIT" ]
179
2018-11-16T12:43:05.000Z
2022-03-31T08:52:22.000Z
tests/test_configuration.py
ta4tsering/pyrrha-bo
d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17
[ "MIT" ]
21
2019-02-17T15:56:29.000Z
2022-03-28T09:27:57.000Z
from unittest import TestCase from glob import glob
31.666667
70
0.544361
from unittest import TestCase from glob import glob class CheckConfigurations(TestCase): def test_unique_values(self): """ Check that configuration do not contain duplicate lines""" for config in glob("app/configurations/langs/**/*.txt"): with open(config) as f: content = f.read() if config.endswith("POS.txt"): lines = content.split(",") else: lines = content.split("\n") print("Testing " + config) self.assertEqual( len(set(lines)), len(lines), "There should be no duplicate in lemma" )
0
589
23
73cf6863bbca4b6719de97cc9d81ce4bd5459cf8
746
py
Python
2018/11/part1.py
mihaip/adventofcode
3725668595bfcf619fe6c97d12e2f14b42e3f0cb
[ "Apache-2.0" ]
null
null
null
2018/11/part1.py
mihaip/adventofcode
3725668595bfcf619fe6c97d12e2f14b42e3f0cb
[ "Apache-2.0" ]
null
null
null
2018/11/part1.py
mihaip/adventofcode
3725668595bfcf619fe6c97d12e2f14b42e3f0cb
[ "Apache-2.0" ]
null
null
null
#!/usr/local/bin/python3 input = 1133 power_levels = [] for x in range(1, 301): row = [] for y in range(1, 301): row.append(power_level(x, y)) power_levels.append(row) best_square = None best_power = None for x in range(1, 298): for y in range(1, 298): sum = 0 for i in range(0, 3): for j in range(0, 3): sum += power_levels[x + i - 1][y + j - 1] if best_power is None or sum > best_power: best_power = sum best_square = [x, y] print("best square: %d, %d with power %d" % (best_square[0], best_square[1], best_power))
21.314286
89
0.647453
#!/usr/local/bin/python3 input = 1133 def power_level(x, y): rack_id = x + 10 power_level = rack_id * y power_level += input power_level *= rack_id power_level = int(power_level / 100) % 10 power_level -= 5 return power_level power_levels = [] for x in range(1, 301): row = [] for y in range(1, 301): row.append(power_level(x, y)) power_levels.append(row) best_square = None best_power = None for x in range(1, 298): for y in range(1, 298): sum = 0 for i in range(0, 3): for j in range(0, 3): sum += power_levels[x + i - 1][y + j - 1] if best_power is None or sum > best_power: best_power = sum best_square = [x, y] print("best square: %d, %d with power %d" % (best_square[0], best_square[1], best_power))
173
0
23
6d19666ed61331bdd9b8526005047ebf3a9aa098
2,645
py
Python
plugins/collage.py
loktacar/wallpapermaker
3feb075c30b138decca8f6d57566ab4258d8ac75
[ "MIT" ]
1
2021-12-07T11:03:47.000Z
2021-12-07T11:03:47.000Z
plugins/collage.py
loktacar/wallpapermaker
3feb075c30b138decca8f6d57566ab4258d8ac75
[ "MIT" ]
null
null
null
plugins/collage.py
loktacar/wallpapermaker
3feb075c30b138decca8f6d57566ab4258d8ac75
[ "MIT" ]
1
2021-12-07T11:06:10.000Z
2021-12-07T11:06:10.000Z
### Base class for Collage plugins ### import math import logging import pygame from plugin import Plugin class Collage(Plugin): """ Base class for collage plugins See simple_resize.py or recursive_split.py for example implementation of a plugin """ @staticmethod def generate(self, size, wallpaper_queue): """ Generates the wallpaper collage """ raise NotImplementedError() def _resize_wallpaper(self, wallpaper, size): """ Resizes wallpaper to set size, conserves aspect ratio Returns crop co-ordinates and scaled image """ # find ratios width_ratio = 1.0*size[0]/wallpaper.get_width() height_ratio = 1.0*size[1]/wallpaper.get_height() # resize to fit width if width_ratio > height_ratio: new_size = (size[0], int(math.ceil(wallpaper.get_height()*width_ratio))) # resize to fit height else: new_size = (int(math.ceil(wallpaper.get_width()*height_ratio)), size[1]) # scale wallpaper according to new_size try: wallpaper = pygame.transform.smoothscale(wallpaper, new_size) except ValueError: logging.debug('bit-depth error, using crappy scaling') wallpaper = pygame.transform.scale(wallpaper, new_size) # Height or width might be too large crop = (0, 0) if wallpaper.get_width() > size[0]+1: overflow = wallpaper.get_width() - size[0] margin = int(overflow / 2) crop = (margin, 0) elif wallpaper.get_height() > size[1]+1: overflow = wallpaper.get_height() - size[1] margin = int(overflow / 2) crop = (0, margin) return crop, wallpaper
30.056818
93
0.593573
### Base class for Collage plugins ### import math import logging import pygame from plugin import Plugin class Collage(Plugin): """ Base class for collage plugins See simple_resize.py or recursive_split.py for example implementation of a plugin """ def __init__(self, config): super(Collage, self).__init__() self.config = config self.wallpaper_source = None @staticmethod def get_instances(plugins, config): collages = config['collage-plugins'] if not collages == 'all': collages = collages.split(',') collages = [c.strip() for c in collages] instances = [] for plugin in plugins: if plugin.name in collages or collages == 'all': instances.append(plugin(config)) if type(collages) is list: collages.remove(plugin.name) for collage_exception in collages: logging.warning('Collage %s not found' % collage_exception) return instances def set_source(self, source): self.wallpaper_source = source def generate(self, size, wallpaper_queue): """ Generates the wallpaper collage """ raise NotImplementedError() def _resize_wallpaper(self, wallpaper, size): """ Resizes wallpaper to set size, conserves aspect ratio Returns crop co-ordinates and scaled image """ # find ratios width_ratio = 1.0*size[0]/wallpaper.get_width() height_ratio = 1.0*size[1]/wallpaper.get_height() # resize to fit width if width_ratio > height_ratio: new_size = (size[0], int(math.ceil(wallpaper.get_height()*width_ratio))) # resize to fit height else: new_size = (int(math.ceil(wallpaper.get_width()*height_ratio)), size[1]) # scale wallpaper according to new_size try: wallpaper = pygame.transform.smoothscale(wallpaper, new_size) except ValueError: logging.debug('bit-depth error, using crappy scaling') wallpaper = pygame.transform.scale(wallpaper, new_size) # Height or width might be too large crop = (0, 0) if wallpaper.get_width() > size[0]+1: overflow = wallpaper.get_width() - size[0] margin = int(overflow / 2) crop = (margin, 0) elif wallpaper.get_height() > size[1]+1: overflow = wallpaper.get_height() - size[1] margin = int(overflow / 2) crop = (0, margin) return crop, wallpaper
749
0
80
53a804a1567d561668b0c64f534904bfd9e880d3
570
py
Python
templatetricks/override_autoescaped.py
jinalharia/flask-snippets
9871815f32f6eaf6d27e8be34b0a0b22172e203f
[ "BSD-3-Clause" ]
43
2015-10-23T06:36:56.000Z
2022-02-23T09:22:30.000Z
templatetricks/override_autoescaped.py
jinalharia/flask-snippets
9871815f32f6eaf6d27e8be34b0a0b22172e203f
[ "BSD-3-Clause" ]
null
null
null
templatetricks/override_autoescaped.py
jinalharia/flask-snippets
9871815f32f6eaf6d27e8be34b0a0b22172e203f
[ "BSD-3-Clause" ]
15
2015-09-27T12:39:02.000Z
2021-07-17T16:16:15.000Z
# -*- coding: utf-8 -*- """ templatetricks.override_autoescaped ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Override which templates are autoescaped http://flask.pocoo.org/snippets/41/ """ import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from flask import Flask app = JHtmlEscapingFlask(__name__)
23.75
79
0.664912
# -*- coding: utf-8 -*- """ templatetricks.override_autoescaped ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Override which templates are autoescaped http://flask.pocoo.org/snippets/41/ """ import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from flask import Flask class JHtmlEscapingFlask(Flask): def select_jinja_autoescape(self, filename): if filename.endswith('.jhtml'): return True return Flask.select_jinja_autoescape(self, filename) app = JHtmlEscapingFlask(__name__)
148
11
50
a29afba16dbdda98ece1a57a82a914b4c0f3b1bf
5,062
py
Python
ffx_render.py
tinkerbeast/ffx-ai
b4d0e6397d8c18c2d5a4f423f8f6866a06c6d2b4
[ "Apache-2.0" ]
null
null
null
ffx_render.py
tinkerbeast/ffx-ai
b4d0e6397d8c18c2d5a4f423f8f6866a06c6d2b4
[ "Apache-2.0" ]
null
null
null
ffx_render.py
tinkerbeast/ffx-ai
b4d0e6397d8c18c2d5a4f423f8f6866a06c6d2b4
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import logging import os import pandas as pd import shutil import subprocess import tempfile from multiprocessing.pool import ThreadPool import bg_convert from thirdparty_xentax import phyre if __name__ == '__main__': # get available models chr_path = '/media/rishin/20ACFF83ACFF5230/Users/rishin/Desktop/ffxx/ffx_data/gamedata/ps3data/chr' bg_path = '/home/rishin/workspace/ffx-ai/assets/' dist_path = '/home/rishin/workspace/ffx-ai/dist' map_path = '/home/rishin/workspace/ffx-ai/enemy_map.txt' # angles_front = [15, 30, 45, 60, 75, 345, 330, 315, 300, 285] # [15, 30, 45, 60, 75, -15, -30, -45, -60, -75] angles_back = [105, 120, 135, 150, 165, 255, 240, 225, 210, 195] # [105, 120, 135, 150, 165, -105, -120, -135, -150, -165] all_angles = angles_front + angles_back # model_map = make_model_map(map_path) model_filter = model_map.keys() #print(model_filter) # model_data = model_gather(chr_path, ['mon']) model_data_filtered = [m for m in model_data if m['name'] in model_filter] tmp_dir = model_extract(model_data_filtered) #print(model_data) print(tmp_dir) # bg_tmp_dir = tempfile.mkdtemp() bg_data = make_bgs(bg_path, bg_tmp_dir) #print(bg_data) print(bg_tmp_dir) # parrallelise jobs tp = ThreadPool(12) # render models xy_map = {'id': [], 'cls': []} xxx = 0 for angle, bg, model in get_triplet(all_angles, bg_data, model_data_filtered): model_name = model['name'] obj_path = os.path.join(tmp_dir, model_name + '.obj') texture_path = os.path.join(tmp_dir, model_name + '.dds') bg_alt_path = os.path.join(bg_tmp_dir, bg) out_path = os.path.join(dist_path, '{}_{}_{}.png'.format(model_name, bg, angle)) class_suffix = '_front' if angle in angles_front else '_back' # xy_map['id'].append(out_path) xy_map['cls'].append(model_map[model_name] + class_suffix) # run subprocess tp.apply_async(render_job, (model_name, obj_path, texture_path, bg_alt_path, out_path, str(angle))) tp.close() tp.join() # df = pd.DataFrame(xy_map) df.to_csv(dist_path + '/img_map.csv') # cleanup shutil.rmtree(tmp_dir) shutil.rmtree(bg_tmp_dir)
38.06015
136
0.615765
# -*- coding: utf-8 -*- import logging import os import pandas as pd import shutil import subprocess import tempfile from multiprocessing.pool import ThreadPool import bg_convert from thirdparty_xentax import phyre def model_gather(chr_path, filter_dirs=[]): model_data = [] sub_dirs = os.listdir(chr_path) if len(filter_dirs) == 0 else filter_dirs for sub_dir in sub_dirs: sub_dir_path = os.path.join(chr_path, sub_dir) models = os.listdir(sub_dir_path) for model in models: mesh_file = os.path.join(sub_dir_path, model, 'mdl', 'd3d11', model + '.dae.phyre') texture_file = os.path.join(sub_dir_path, model, 'tex', 'd3d11', model + '.dds.phyre') if os.path.exists(mesh_file) and os.path.exists(texture_file): model_data.append({'name': sub_dir + '_' + model, 'mesh': mesh_file,'texture': texture_file}) else: logging.warning('Files not present: ' + mesh_file + ' or ' + texture_file) return model_data def model_extract(model_data): tmp_dir = tempfile.mkdtemp() logging.info('Extracting models into directory, dir=' + tmp_dir) for model in model_data: out_mesh = os.path.join(tmp_dir, model['name'] + '.obj') out_texture = os.path.join(tmp_dir, model['name'] + '.dds') phyre.extractMesh(model['mesh'], out_mesh, debug=False) phyre.extractDDS(model['texture'], out_texture, debug=False) return tmp_dir def render_job(model_name, obj_path, texture_path, bg_path, dist_path, angle): print('{} {} {}'.format(angle, bg_path, model_name)) instr ='\n'.join([model_name, obj_path, texture_path, bg_path, dist_path, angle]).encode() out = subprocess.check_output("blender --background --python blender_render.py", stderr=subprocess.STDOUT, input=instr, shell=True) logging.info('Blender out=' + str(out)) #print(out.decode('utf-8')) def make_bgs(bg_path, tmp_dir): files = os.listdir(bg_path) outs = [] for file in files: src_file = os.path.join(bg_path, file) outs.append(bg_convert.bg_convert(src_file, tmp_dir)) return outs def make_model_map(map_file): model_map = {} with open(map_file) as fd: lines = fd.readlines() # for line in lines: if line.startswith('# '): classname = line.split(' ')[1].rstrip('\n') elif line.startswith('#'): pass else: tokens = line.split('\t') model_name = 'mon_m{:03d}'.format(int(tokens[0])) model_map[model_name] = classname #model_map['web'].append(tokens[1]) return model_map def get_triplet(angles, bgs, models): for a in angles: for b in bgs: for m in models: yield (a, b, m) if __name__ == '__main__': # get available models chr_path = '/media/rishin/20ACFF83ACFF5230/Users/rishin/Desktop/ffxx/ffx_data/gamedata/ps3data/chr' bg_path = '/home/rishin/workspace/ffx-ai/assets/' dist_path = '/home/rishin/workspace/ffx-ai/dist' map_path = '/home/rishin/workspace/ffx-ai/enemy_map.txt' # angles_front = [15, 30, 45, 60, 75, 345, 330, 315, 300, 285] # [15, 30, 45, 60, 75, -15, -30, -45, -60, -75] angles_back = [105, 120, 135, 150, 165, 255, 240, 225, 210, 195] # [105, 120, 135, 150, 165, -105, -120, -135, -150, -165] all_angles = angles_front + angles_back # model_map = make_model_map(map_path) model_filter = model_map.keys() #print(model_filter) # model_data = model_gather(chr_path, ['mon']) model_data_filtered = [m for m in model_data if m['name'] in model_filter] tmp_dir = model_extract(model_data_filtered) #print(model_data) print(tmp_dir) # bg_tmp_dir = tempfile.mkdtemp() bg_data = make_bgs(bg_path, bg_tmp_dir) #print(bg_data) print(bg_tmp_dir) # parrallelise jobs tp = ThreadPool(12) # render models xy_map = {'id': [], 'cls': []} xxx = 0 for angle, bg, model in get_triplet(all_angles, bg_data, model_data_filtered): model_name = model['name'] obj_path = os.path.join(tmp_dir, model_name + '.obj') texture_path = os.path.join(tmp_dir, model_name + '.dds') bg_alt_path = os.path.join(bg_tmp_dir, bg) out_path = os.path.join(dist_path, '{}_{}_{}.png'.format(model_name, bg, angle)) class_suffix = '_front' if angle in angles_front else '_back' # xy_map['id'].append(out_path) xy_map['cls'].append(model_map[model_name] + class_suffix) # run subprocess tp.apply_async(render_job, (model_name, obj_path, texture_path, bg_alt_path, out_path, str(angle))) tp.close() tp.join() # df = pd.DataFrame(xy_map) df.to_csv(dist_path + '/img_map.csv') # cleanup shutil.rmtree(tmp_dir) shutil.rmtree(bg_tmp_dir)
2,521
0
149
6ef26fa3bcf2699bb73789962e23ccdf5714ee22
447
py
Python
backend/cart/urls.py
harizMunawar/La-Virtuele
051d11a281620b36638b6be50e71d3c893ce1568
[ "MIT" ]
2
2021-02-23T16:30:27.000Z
2021-03-21T08:12:39.000Z
backend/cart/urls.py
harizMunawar/La-Virtuele
051d11a281620b36638b6be50e71d3c893ce1568
[ "MIT" ]
9
2021-02-23T09:05:32.000Z
2021-07-02T11:41:55.000Z
backend/cart/urls.py
harizMunawar/La-Virtuele
051d11a281620b36638b6be50e71d3c893ce1568
[ "MIT" ]
1
2021-02-23T07:42:17.000Z
2021-02-23T07:42:17.000Z
from django.urls import path from cart.views import CartItem, Carts, Checkout, ToggleCartItem app_name = 'cart' urlpatterns = [ path('carts/', Carts.as_view(),name='cart-list'), path('carts/items/<slug:slug>/<size>/', CartItem.as_view(), name='add-remove-cart-item'), path('carts/toggle/items/<slug:slug>/<size>/', ToggleCartItem.as_view(), name='toggle-cart-item'), path('carts/checkout/', Checkout.as_view(), name='checkout'), ]
44.7
102
0.695749
from django.urls import path from cart.views import CartItem, Carts, Checkout, ToggleCartItem app_name = 'cart' urlpatterns = [ path('carts/', Carts.as_view(),name='cart-list'), path('carts/items/<slug:slug>/<size>/', CartItem.as_view(), name='add-remove-cart-item'), path('carts/toggle/items/<slug:slug>/<size>/', ToggleCartItem.as_view(), name='toggle-cart-item'), path('carts/checkout/', Checkout.as_view(), name='checkout'), ]
0
0
0
0dbb59bbb43a54238ffdc076caee1cbf2ff74581
33,165
py
Python
hummingbird/ml/_container.py
hannahaih/hummingbird
b8ec670b3c90ec7e87d3ae4a2b268075bd5eae65
[ "MIT" ]
1
2020-12-29T12:51:10.000Z
2020-12-29T12:51:10.000Z
hummingbird/ml/_container.py
hannahaih/hummingbird
b8ec670b3c90ec7e87d3ae4a2b268075bd5eae65
[ "MIT" ]
null
null
null
hummingbird/ml/_container.py
hannahaih/hummingbird
b8ec670b3c90ec7e87d3ae4a2b268075bd5eae65
[ "MIT" ]
null
null
null
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """ All custom model containers are listed here. In Hummingbird we use two types of containers: - containers for input models (e.g., `CommonONNXModelContainer`) used to represent input models in a unified way as DAG of containers - containers for output models (e.g., `SklearnContainer`) used to surface output models as unified API format. """ from abc import ABC, abstractmethod import dill import os import numpy as np from onnxconverter_common.container import CommonSklearnModelContainer import torch from hummingbird.ml.operator_converters import constants from hummingbird.ml._utils import onnx_runtime_installed, tvm_installed, pandas_installed, get_device, from_strings_to_ints if pandas_installed(): from pandas import DataFrame else: DataFrame = None # Input containers class CommonONNXModelContainer(CommonSklearnModelContainer): """ Common container for input ONNX operators. """ class CommonSparkMLModelContainer(CommonSklearnModelContainer): """ Common container for input Spark-ML operators. """ # Output containers. # Abstract containers enabling the Sklearn API. class SklearnContainerTransformer(SklearnContainer): """ Abstract container mirroring Sklearn transformers API. """ @abstractmethod def _transform(self, *input): """ This method contains container-specific implementation of transform. """ pass def transform(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On data transformers it returns transformed output data """ return self._run(self._transform, *inputs) class SklearnContainerRegression(SklearnContainer): """ Abstract container mirroring Sklearn regressors API. """ @abstractmethod def _predict(self, *input): """ This method contains container-specific implementation of predict. """ pass def predict(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On regression returns the predicted values. On classification tasks returns the predicted class labels for the input data. On anomaly detection (e.g. isolation forest) returns the predicted classes (-1 or 1). """ return self._run(self._predict, *inputs) class SklearnContainerClassification(SklearnContainerRegression): """ Container mirroring Sklearn classifiers API. """ @abstractmethod def _predict_proba(self, *input): """ This method contains container-specific implementation of predict_proba. """ pass def predict_proba(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On classification tasks returns the probability estimates. """ return self._run(self._predict_proba, *inputs) class SklearnContainerAnomalyDetection(SklearnContainerRegression): """ Container mirroring Sklearn anomaly detection API. """ @abstractmethod def _decision_function(self, *inputs): """ This method contains container-specific implementation of decision_function. """ pass def decision_function(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On anomaly detection (e.g. isolation forest) returns the decision function scores. """ scores = self._run(self._decision_function, *inputs) # Backward compatibility for sklearn <= 0.21 if constants.IFOREST_THRESHOLD in self._extra_config: scores += self._extra_config[constants.IFOREST_THRESHOLD] return scores def score_samples(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On anomaly detection (e.g. isolation forest) returns the decision_function score plus offset_ """ return self.decision_function(*inputs) + self._extra_config[constants.OFFSET] # PyTorch containers. class PyTorchSklearnContainer(SklearnContainer): """ Base container for PyTorch models. """ @staticmethod def load(location): """ Method used to load a container from the file system. Args: location: The location on the file system where to load the model. Returns: The loaded model. """ assert os.path.exists(location), "Model location {} does not exist.".format(location) container = None if os.path.isdir(location): # This is a torch.jit model model = torch.jit.load(os.path.join(location, constants.SAVE_LOAD_TORCH_JIT_PATH)) with open(os.path.join(location, "container.pkl"), "rb") as file: container = dill.load(file) container._model = model else: # This is a pytorch model with open(location, "rb") as file: container = dill.load(file) # Need to set the number of threads to use as set in the original container. if container._n_threads is not None: if torch.get_num_interop_threads() != 1: torch.set_num_interop_threads(1) torch.set_num_threads(container._n_threads) return container class PyTorchSklearnContainerTransformer(SklearnContainerTransformer, PyTorchSklearnContainer): """ Container for PyTorch models mirroring Sklearn transformers API. """ class PyTorchSklearnContainerRegression(SklearnContainerRegression, PyTorchSklearnContainer): """ Container for PyTorch models mirroring Sklearn regressor API. """ class PyTorchSklearnContainerClassification(SklearnContainerClassification, PyTorchSklearnContainerRegression): """ Container for PyTorch models mirroring Sklearn classifiers API. """ class PyTorchSklearnContainerAnomalyDetection(PyTorchSklearnContainerRegression, SklearnContainerAnomalyDetection): """ Container for PyTorch models mirroning the Sklearn anomaly detection API. """ # TorchScript containers. def _torchscript_wrapper(device, function, *inputs, extra_config={}): """ This function contains the code to enable predictions over torchscript models. It is used to translates inputs in the proper torch format. """ inputs = [*inputs] with torch.no_grad(): if type(inputs) == DataFrame and DataFrame is not None: # Split the dataframe into column ndarrays inputs = inputs[0] input_names = list(inputs.columns) splits = [inputs[input_names[idx]] for idx in range(len(input_names))] splits = [df.to_numpy().reshape(-1, 1) for df in splits] inputs = tuple(splits) # Maps data inputs to the expected type and device. for i in range(len(inputs)): if type(inputs[i]) is list: inputs[i] = np.array(inputs[i]) if type(inputs[i]) is np.ndarray: # Convert string arrays into int32. if inputs[i].dtype.kind in constants.SUPPORTED_STRING_TYPES: assert constants.MAX_STRING_LENGTH in extra_config inputs[i] = from_strings_to_ints(inputs[i], extra_config[constants.MAX_STRING_LENGTH]) if inputs[i].dtype == np.float64: # We convert double precision arrays into single precision. Sklearn does the same. inputs[i] = inputs[i].astype("float32") inputs[i] = torch.from_numpy(inputs[i]) elif type(inputs[i]) is not torch.Tensor: raise RuntimeError("Inputer tensor {} of not supported type {}".format(i, type(inputs[i]))) if device.type != "cpu" and device is not None: inputs[i] = inputs[i].to(device) return function(*inputs) class TorchScriptSklearnContainerTransformer(PyTorchSklearnContainerTransformer): """ Container for TorchScript models mirroring Sklearn transformers API. """ class TorchScriptSklearnContainerRegression(PyTorchSklearnContainerRegression): """ Container for TorchScript models mirroring Sklearn regressors API. """ class TorchScriptSklearnContainerClassification(PyTorchSklearnContainerClassification): """ Container for TorchScript models mirroring Sklearn classifiers API. """ class TorchScriptSklearnContainerAnomalyDetection(PyTorchSklearnContainerAnomalyDetection): """ Container for TorchScript models mirroring Sklearn anomaly detection API. """ # ONNX containers. class ONNXSklearnContainer(SklearnContainer): """ Base container for ONNX models. The container allows to mirror the Sklearn API. """ @staticmethod def load(location): """ Method used to load a container from the file system. Args: location: The location on the file system where to load the model. Returns: The loaded model. """ assert os.path.exists(location), "Model location {} does not exist.".format(location) assert onnx_runtime_installed import onnx import onnxruntime as ort container = None model = onnx.load(os.path.join(location, constants.SAVE_LOAD_ONNX_PATH)) with open(os.path.join(location, constants.SAVE_LOAD_CONTAINER_PATH), "rb") as file: container = dill.load(file) container._model = model sess_options = ort.SessionOptions() if container._n_threads is not None: # Need to set the number of threads to use as set in the original container. sess_options.intra_op_num_threads = container._n_threads sess_options.inter_op_num_threads = 1 sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL container._session = ort.InferenceSession(container._model.SerializeToString(), sess_options=sess_options) return container def _get_named_inputs(self, inputs): """ Retrieve the inputs names from the session object. """ if len(inputs) < len(self._input_names): inputs = inputs[0] assert len(inputs) == len(self._input_names) named_inputs = {} for i in range(len(inputs)): input_ = np.array(inputs[i]) if input_.dtype.kind in constants.SUPPORTED_STRING_TYPES: assert constants.MAX_STRING_LENGTH in self._extra_config input_ = from_strings_to_ints(input_, self._extra_config[constants.MAX_STRING_LENGTH]) named_inputs[self._input_names[i]] = input_ return named_inputs class ONNXSklearnContainerTransformer(ONNXSklearnContainer, SklearnContainerTransformer): """ Container for ONNX models mirroring Sklearn transformers API. """ class ONNXSklearnContainerRegression(ONNXSklearnContainer, SklearnContainerRegression): """ Container for ONNX models mirroring Sklearn regressors API. """ class ONNXSklearnContainerClassification(ONNXSklearnContainerRegression, SklearnContainerClassification): """ Container for ONNX models mirroring Sklearn classifiers API. """ class ONNXSklearnContainerAnomalyDetection(ONNXSklearnContainerRegression, SklearnContainerAnomalyDetection): """ Container for ONNX models mirroring Sklearn anomaly detection API. """ # TVM containers. class TVMSklearnContainer(SklearnContainer): """ Base container for TVM models. The container allows to mirror the Sklearn API. The test input size must be the same as the batch size this container is created. """ @staticmethod def load(location): """ Method used to load a container from the file system. Args: location: The location on the file system where to load the model. Returns: The loaded model. """ assert tvm_installed() import tvm from tvm.contrib import util, graph_runtime from tvm import relay container = None assert os.path.exists(location), "Directory {} not found.".format(location) path_lib = os.path.join(location, constants.SAVE_LOAD_TVM_LIB_PATH) graph = open(os.path.join(location, constants.SAVE_LOAD_TVM_GRAPH_PATH)).read() lib = tvm.runtime.module.load_module(path_lib) params = relay.load_param_dict(open(os.path.join(location, constants.SAVE_LOAD_TVM_PARAMS_PATH), "rb").read()) # params = bytearray(open(os.path.join(location, "deploy_param.params"), "rb").read()) with open(os.path.join(location, constants.SAVE_LOAD_CONTAINER_PATH), "rb") as file: container = dill.load(file) assert container is not None, "Failed to load the model container." ctx = tvm.cpu() if container._ctx == "cpu" else tvm.gpu container._model = graph_runtime.create(graph, lib, ctx) container._model.set_input(**params) container._extra_config[constants.TVM_GRAPH] = graph container._extra_config[constants.TVM_LIB] = lib container._extra_config[constants.TVM_PARAMS] = params container._extra_config[constants.TVM_CONTEXT] = ctx container._ctx = ctx # Need to set the number of threads to use as set in the original container. os.environ["TVM_NUM_THREADS"] = str(container._n_threads) return container class TVMSklearnContainerTransformer(TVMSklearnContainer, SklearnContainerTransformer): """ Container for TVM models mirroring Sklearn transformers API. """ class TVMSklearnContainerRegression(TVMSklearnContainer, SklearnContainerRegression): """ Container for TVM models mirroring Sklearn regressors API. """ class TVMSklearnContainerClassification(TVMSklearnContainerRegression, SklearnContainerClassification): """ Container for TVM models mirroring Sklearn classifiers API. """ class TVMSklearnContainerAnomalyDetection(TVMSklearnContainerRegression, SklearnContainerAnomalyDetection): """ Container for TVM models mirroring Sklearn anomaly detection API. """
38.296767
133
0.668657
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """ All custom model containers are listed here. In Hummingbird we use two types of containers: - containers for input models (e.g., `CommonONNXModelContainer`) used to represent input models in a unified way as DAG of containers - containers for output models (e.g., `SklearnContainer`) used to surface output models as unified API format. """ from abc import ABC, abstractmethod import dill import os import numpy as np from onnxconverter_common.container import CommonSklearnModelContainer import torch from hummingbird.ml.operator_converters import constants from hummingbird.ml._utils import onnx_runtime_installed, tvm_installed, pandas_installed, get_device, from_strings_to_ints if pandas_installed(): from pandas import DataFrame else: DataFrame = None # Input containers class CommonONNXModelContainer(CommonSklearnModelContainer): """ Common container for input ONNX operators. """ def __init__(self, onnx_model): super(CommonONNXModelContainer, self).__init__(onnx_model) class CommonSparkMLModelContainer(CommonSklearnModelContainer): """ Common container for input Spark-ML operators. """ def __init__(self, sparkml_model): super(CommonSparkMLModelContainer, self).__init__(sparkml_model) # Output containers. # Abstract containers enabling the Sklearn API. class SklearnContainer(ABC): def __init__(self, model, n_threads=None, batch_size=None, extra_config={}): """ Base container abstract class allowing to mirror the Sklearn API. *SklearnContainer* enables the use of `predict`, `predict_proba` etc. API of Sklearn also over the models generated by Hummingbird (irrespective of the selected backend). Args: model: Any Hummingbird supported model n_threads: How many threads should be used by the containter to run the model. None means use all threads. batch_size: If different than None, split the input into batch_size partitions and score one partition at a time. extra_config: Some additional configuration parameter. """ self._model = model self._n_threads = n_threads self._extra_config = extra_config self._batch_size = batch_size @property def model(self): return self._model @abstractmethod def save(self, location): """ Method used to save the container for future use. Args: location: The location on the file system where to save the model. """ return def _run(self, function, *inputs): """ This function scores the full dataset at once. See BatchContainer below for batched scoring. """ if DataFrame is not None and type(inputs[0]) == DataFrame: # Split the dataframe into column ndarrays. inputs = inputs[0] input_names = list(inputs.columns) splits = [inputs[input_names[idx]] for idx in range(len(input_names))] inputs = [df.to_numpy().reshape(-1, 1) for df in splits] return function(*inputs) class BatchContainer: def __init__(self, base_container, remainder_model_container=None): """ A wrapper around one or two containers to do batch by batch prediction. The batch size is fixed when `base_container` is created. Together with `remainder_model_container`, this class enables prediction on a dataset of size `base_container._batch_size` * k + `remainder_model_container._batch_size`, where k is any integer. Its `predict` related method optionally takes `concatenate_outputs` argument, which when set to True causes the outputs to be returned as a list of individual prediction. This avoids an extra allocation of an output array and copying of each batch prediction into it. Args: base_container: One of subclasses of `SklearnContainer`. remainder_model_container: An auxiliary container that is used in the last iteration, if the test input batch size is not devisible by `base_container._batch_size`. """ assert base_container._batch_size is not None self._base_container = base_container self._batch_size = base_container._batch_size if remainder_model_container: assert remainder_model_container._batch_size is not None self._remainder_model_container = remainder_model_container self._remainder_size = remainder_model_container._batch_size else: # This is remainder_size == 0 case # We repurpose base_container as a remainder_model_container self._remainder_model_container = base_container self._remainder_size = base_container._batch_size def __getattr__(self, name): return getattr(self._base_container, name) def decision_function(self, *inputs, concatenate_outputs=True): return self._predict_common( self._base_container.decision_function, self._remainder_model_container.decision_function, *inputs, concatenate_outputs=concatenate_outputs ) def transform(self, *inputs, concatenate_outputs=True): return self._predict_common( self._base_container.transform, self._remainder_model_container.transform, *inputs, concatenate_outputs=concatenate_outputs ) def score_samples(self, *inputs, concatenate_outputs=True): return self._predict_common( self._base_container.score_samples, self._remainder_model_container.score_samples, *inputs, concatenate_outputs=concatenate_outputs ) def predict(self, *inputs, concatenate_outputs=True): return self._predict_common( self._base_container.predict, self._remainder_model_container.predict, *inputs, concatenate_outputs=concatenate_outputs ) def predict_proba(self, *inputs, concatenate_outputs=True): return self._predict_common( self._base_container.predict_proba, self._remainder_model_container.predict_proba, *inputs, concatenate_outputs=concatenate_outputs ) def _predict_common(self, predict_func, remainder_predict_func, *inputs, concatenate_outputs=True): if DataFrame is not None and type(inputs[0]) == DataFrame: # Split the dataframe into column ndarrays. inputs = inputs[0] input_names = list(inputs.columns) splits = [inputs[input_names[idx]] for idx in range(len(input_names))] inputs = tuple([df.to_numpy().reshape(-1, 1) for df in splits]) def output_proc(predictions): if concatenate_outputs: return np.concatenate(predictions) return predictions is_tuple = isinstance(inputs, tuple) if is_tuple: total_size = inputs[0].shape[0] else: total_size = inputs.shape[0] if total_size == self._batch_size: # A single batch inference case return output_proc([predict_func(*inputs)]) iterations = total_size // self._batch_size iterations += 1 if total_size % self._batch_size > 0 else 0 iterations = max(1, iterations) predictions = [] for i in range(0, iterations): start = i * self._batch_size end = min(start + self._batch_size, total_size) if is_tuple: batch = tuple([input[start:end, :] for input in inputs]) else: batch = inputs[start:end, :] if i == iterations - 1: assert (end - start) == self._remainder_size out = remainder_predict_func(*batch) else: out = predict_func(*batch) predictions.append(out) return output_proc(predictions) class SklearnContainerTransformer(SklearnContainer): """ Abstract container mirroring Sklearn transformers API. """ @abstractmethod def _transform(self, *input): """ This method contains container-specific implementation of transform. """ pass def transform(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On data transformers it returns transformed output data """ return self._run(self._transform, *inputs) class SklearnContainerRegression(SklearnContainer): """ Abstract container mirroring Sklearn regressors API. """ def __init__( self, model, n_threads, batch_size, is_regression=True, is_anomaly_detection=False, extra_config={}, **kwargs ): super(SklearnContainerRegression, self).__init__(model, n_threads, batch_size, extra_config) assert not (is_regression and is_anomaly_detection) self._is_regression = is_regression self._is_anomaly_detection = is_anomaly_detection @abstractmethod def _predict(self, *input): """ This method contains container-specific implementation of predict. """ pass def predict(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On regression returns the predicted values. On classification tasks returns the predicted class labels for the input data. On anomaly detection (e.g. isolation forest) returns the predicted classes (-1 or 1). """ return self._run(self._predict, *inputs) class SklearnContainerClassification(SklearnContainerRegression): """ Container mirroring Sklearn classifiers API. """ def __init__(self, model, n_threads, batch_size, extra_config={}): super(SklearnContainerClassification, self).__init__( model, n_threads, batch_size, is_regression=False, extra_config=extra_config ) @abstractmethod def _predict_proba(self, *input): """ This method contains container-specific implementation of predict_proba. """ pass def predict_proba(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On classification tasks returns the probability estimates. """ return self._run(self._predict_proba, *inputs) class SklearnContainerAnomalyDetection(SklearnContainerRegression): """ Container mirroring Sklearn anomaly detection API. """ def __init__(self, model, n_threads, batch_size, extra_config={}): super(SklearnContainerAnomalyDetection, self).__init__( model, n_threads, batch_size, is_regression=False, is_anomaly_detection=True, extra_config=extra_config ) @abstractmethod def _decision_function(self, *inputs): """ This method contains container-specific implementation of decision_function. """ pass def decision_function(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On anomaly detection (e.g. isolation forest) returns the decision function scores. """ scores = self._run(self._decision_function, *inputs) # Backward compatibility for sklearn <= 0.21 if constants.IFOREST_THRESHOLD in self._extra_config: scores += self._extra_config[constants.IFOREST_THRESHOLD] return scores def score_samples(self, *inputs): """ Utility functions used to emulate the behavior of the Sklearn API. On anomaly detection (e.g. isolation forest) returns the decision_function score plus offset_ """ return self.decision_function(*inputs) + self._extra_config[constants.OFFSET] # PyTorch containers. class PyTorchSklearnContainer(SklearnContainer): """ Base container for PyTorch models. """ def save(self, location): assert self.model is not None, "Saving a None model is undefined." if constants.TEST_INPUT in self._extra_config: self._extra_config[constants.TEST_INPUT] = None if "torch.jit" in str(type(self.model)): # This is a torchscript model. assert not os.path.exists(location), "Directory {} already exists.".format(location) os.makedirs(location) self.model.save(os.path.join(location, constants.SAVE_LOAD_TORCH_JIT_PATH)) model = self.model self._model = None with open(os.path.join(location, "container.pkl"), "wb") as file: dill.dump(self, file) self._model = model elif "PyTorchBackendModel" in str(type(self.model)): # This is a pytorch model. if not location.endswith("pkl"): location += "pkl" assert not os.path.exists(location), "File {} already exists.".format(location) with open(location, "wb") as file: dill.dump(self, file) else: raise RuntimeError("Model type {} not recognized.".format(type(self.model))) @staticmethod def load(location): """ Method used to load a container from the file system. Args: location: The location on the file system where to load the model. Returns: The loaded model. """ assert os.path.exists(location), "Model location {} does not exist.".format(location) container = None if os.path.isdir(location): # This is a torch.jit model model = torch.jit.load(os.path.join(location, constants.SAVE_LOAD_TORCH_JIT_PATH)) with open(os.path.join(location, "container.pkl"), "rb") as file: container = dill.load(file) container._model = model else: # This is a pytorch model with open(location, "rb") as file: container = dill.load(file) # Need to set the number of threads to use as set in the original container. if container._n_threads is not None: if torch.get_num_interop_threads() != 1: torch.set_num_interop_threads(1) torch.set_num_threads(container._n_threads) return container def to(self, device): self.model.to(device) return self class PyTorchSklearnContainerTransformer(SklearnContainerTransformer, PyTorchSklearnContainer): """ Container for PyTorch models mirroring Sklearn transformers API. """ def _transform(self, *inputs): return self.model.forward(*inputs).cpu().numpy() class PyTorchSklearnContainerRegression(SklearnContainerRegression, PyTorchSklearnContainer): """ Container for PyTorch models mirroring Sklearn regressor API. """ def _predict(self, *inputs): if self._is_regression: return self.model.forward(*inputs).cpu().numpy().ravel() elif self._is_anomaly_detection: return self.model.forward(*inputs)[0].cpu().numpy().ravel() else: return self.model.forward(*inputs)[0].cpu().numpy().ravel() class PyTorchSklearnContainerClassification(SklearnContainerClassification, PyTorchSklearnContainerRegression): """ Container for PyTorch models mirroring Sklearn classifiers API. """ def _predict_proba(self, *input): return self.model.forward(*input)[1].cpu().numpy() class PyTorchSklearnContainerAnomalyDetection(PyTorchSklearnContainerRegression, SklearnContainerAnomalyDetection): """ Container for PyTorch models mirroning the Sklearn anomaly detection API. """ def _decision_function(self, *inputs): return self.model.forward(*inputs)[1].cpu().numpy().ravel() # TorchScript containers. def _torchscript_wrapper(device, function, *inputs, extra_config={}): """ This function contains the code to enable predictions over torchscript models. It is used to translates inputs in the proper torch format. """ inputs = [*inputs] with torch.no_grad(): if type(inputs) == DataFrame and DataFrame is not None: # Split the dataframe into column ndarrays inputs = inputs[0] input_names = list(inputs.columns) splits = [inputs[input_names[idx]] for idx in range(len(input_names))] splits = [df.to_numpy().reshape(-1, 1) for df in splits] inputs = tuple(splits) # Maps data inputs to the expected type and device. for i in range(len(inputs)): if type(inputs[i]) is list: inputs[i] = np.array(inputs[i]) if type(inputs[i]) is np.ndarray: # Convert string arrays into int32. if inputs[i].dtype.kind in constants.SUPPORTED_STRING_TYPES: assert constants.MAX_STRING_LENGTH in extra_config inputs[i] = from_strings_to_ints(inputs[i], extra_config[constants.MAX_STRING_LENGTH]) if inputs[i].dtype == np.float64: # We convert double precision arrays into single precision. Sklearn does the same. inputs[i] = inputs[i].astype("float32") inputs[i] = torch.from_numpy(inputs[i]) elif type(inputs[i]) is not torch.Tensor: raise RuntimeError("Inputer tensor {} of not supported type {}".format(i, type(inputs[i]))) if device.type != "cpu" and device is not None: inputs[i] = inputs[i].to(device) return function(*inputs) class TorchScriptSklearnContainerTransformer(PyTorchSklearnContainerTransformer): """ Container for TorchScript models mirroring Sklearn transformers API. """ def transform(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerTransformer, self)._transform f_wrapped = lambda x: _torchscript_wrapper(device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs) class TorchScriptSklearnContainerRegression(PyTorchSklearnContainerRegression): """ Container for TorchScript models mirroring Sklearn regressors API. """ def predict(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerRegression, self)._predict f_wrapped = lambda x: _torchscript_wrapper(device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs) class TorchScriptSklearnContainerClassification(PyTorchSklearnContainerClassification): """ Container for TorchScript models mirroring Sklearn classifiers API. """ def predict(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerClassification, self)._predict f_wrapped = lambda x: _torchscript_wrapper(device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs) def predict_proba(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerClassification, self)._predict_proba f_wrapped = lambda *x: _torchscript_wrapper(device, f, *x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs) class TorchScriptSklearnContainerAnomalyDetection(PyTorchSklearnContainerAnomalyDetection): """ Container for TorchScript models mirroring Sklearn anomaly detection API. """ def predict(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerAnomalyDetection, self)._predict f_wrapped = lambda x: _torchscript_wrapper(device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs) def decision_function(self, *inputs): device = get_device(self.model) f = super(TorchScriptSklearnContainerAnomalyDetection, self)._decision_function f_wrapped = lambda x: _torchscript_wrapper(device, f, x, extra_config=self._extra_config) # noqa: E731 scores = self._run(f_wrapped, *inputs) if constants.IFOREST_THRESHOLD in self._extra_config: scores += self._extra_config[constants.IFOREST_THRESHOLD] return scores def score_samples(self, *inputs): device = get_device(self.model) f = self.decision_function f_wrapped = lambda x: _torchscript_wrapper(device, f, x, extra_config=self._extra_config) # noqa: E731 return self._run(f_wrapped, *inputs) + self._extra_config[constants.OFFSET] # ONNX containers. class ONNXSklearnContainer(SklearnContainer): """ Base container for ONNX models. The container allows to mirror the Sklearn API. """ def __init__(self, model, n_threads=None, batch_size=None, extra_config={}): super(ONNXSklearnContainer, self).__init__(model, n_threads, batch_size, extra_config) if onnx_runtime_installed(): import onnxruntime as ort sess_options = ort.SessionOptions() if self._n_threads is not None: sess_options.intra_op_num_threads = self._n_threads sess_options.inter_op_num_threads = 1 sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL self._session = ort.InferenceSession(self._model.SerializeToString(), sess_options=sess_options) self._output_names = [self._session.get_outputs()[i].name for i in range(len(self._session.get_outputs()))] self._input_names = [input.name for input in self._session.get_inputs()] self._extra_config = extra_config else: raise RuntimeError("ONNX Container requires ONNX runtime installed.") def save(self, location): assert self.model is not None, "Saving a None model is undefined." import onnx if constants.TEST_INPUT in self._extra_config: self._extra_config[constants.TEST_INPUT] = None assert not os.path.exists(location), "Directory {} already exists.".format(location) os.makedirs(location) onnx.save(self.model, os.path.join(location, constants.SAVE_LOAD_ONNX_PATH)) model = self.model session = self._session self._model = None self._session = None with open(os.path.join(location, constants.SAVE_LOAD_CONTAINER_PATH), "wb") as file: dill.dump(self, file) self._model = model self._session = session @staticmethod def load(location): """ Method used to load a container from the file system. Args: location: The location on the file system where to load the model. Returns: The loaded model. """ assert os.path.exists(location), "Model location {} does not exist.".format(location) assert onnx_runtime_installed import onnx import onnxruntime as ort container = None model = onnx.load(os.path.join(location, constants.SAVE_LOAD_ONNX_PATH)) with open(os.path.join(location, constants.SAVE_LOAD_CONTAINER_PATH), "rb") as file: container = dill.load(file) container._model = model sess_options = ort.SessionOptions() if container._n_threads is not None: # Need to set the number of threads to use as set in the original container. sess_options.intra_op_num_threads = container._n_threads sess_options.inter_op_num_threads = 1 sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL container._session = ort.InferenceSession(container._model.SerializeToString(), sess_options=sess_options) return container def _get_named_inputs(self, inputs): """ Retrieve the inputs names from the session object. """ if len(inputs) < len(self._input_names): inputs = inputs[0] assert len(inputs) == len(self._input_names) named_inputs = {} for i in range(len(inputs)): input_ = np.array(inputs[i]) if input_.dtype.kind in constants.SUPPORTED_STRING_TYPES: assert constants.MAX_STRING_LENGTH in self._extra_config input_ = from_strings_to_ints(input_, self._extra_config[constants.MAX_STRING_LENGTH]) named_inputs[self._input_names[i]] = input_ return named_inputs class ONNXSklearnContainerTransformer(ONNXSklearnContainer, SklearnContainerTransformer): """ Container for ONNX models mirroring Sklearn transformers API. """ def _transform(self, *inputs): assert len(self._output_names) == 1 named_inputs = self._get_named_inputs(inputs) return np.array(self._session.run(self._output_names, named_inputs))[0] class ONNXSklearnContainerRegression(ONNXSklearnContainer, SklearnContainerRegression): """ Container for ONNX models mirroring Sklearn regressors API. """ def _predict(self, *inputs): named_inputs = self._get_named_inputs(inputs) if self._is_regression: assert len(self._output_names) == 1 return np.array(self._session.run(self._output_names, named_inputs))[0].ravel() elif self._is_anomaly_detection: assert len(self._output_names) == 2 return np.array(self._session.run([self._output_names[0]], named_inputs))[0].ravel() else: assert len(self._output_names) == 2 return np.array(self._session.run([self._output_names[0]], named_inputs))[0] class ONNXSklearnContainerClassification(ONNXSklearnContainerRegression, SklearnContainerClassification): """ Container for ONNX models mirroring Sklearn classifiers API. """ def _predict_proba(self, *inputs): assert len(self._output_names) == 2 named_inputs = self._get_named_inputs(inputs) return self._session.run([self._output_names[1]], named_inputs)[0] class ONNXSklearnContainerAnomalyDetection(ONNXSklearnContainerRegression, SklearnContainerAnomalyDetection): """ Container for ONNX models mirroring Sklearn anomaly detection API. """ def _decision_function(self, *inputs): assert len(self._output_names) == 2 named_inputs = self._get_named_inputs(inputs) return np.array(self._session.run([self._output_names[1]], named_inputs)[0]).flatten() # TVM containers. class TVMSklearnContainer(SklearnContainer): """ Base container for TVM models. The container allows to mirror the Sklearn API. The test input size must be the same as the batch size this container is created. """ def __init__(self, model, n_threads=None, batch_size=None, extra_config={}): super(TVMSklearnContainer, self).__init__(model, n_threads, batch_size, extra_config=extra_config) assert tvm_installed() import tvm self._ctx = self._extra_config[constants.TVM_CONTEXT] self._input_names = self._extra_config[constants.TVM_INPUT_NAMES] self._to_tvm_array = lambda x: tvm.nd.array(x, self._ctx) os.environ["TVM_NUM_THREADS"] = str(self._n_threads) def save(self, location): assert self.model is not None, "Saving a None model is undefined." from tvm.contrib import util from tvm import relay assert not os.path.exists(location), "Directory {} already exists.".format(location) os.makedirs(location) path_lib = os.path.join(location, constants.SAVE_LOAD_TVM_LIB_PATH) self._extra_config[constants.TVM_LIB].export_library(path_lib) with open(os.path.join(location, constants.SAVE_LOAD_TVM_GRAPH_PATH), "w") as fo: fo.write(self._extra_config[constants.TVM_GRAPH]) with open(os.path.join(location, constants.SAVE_LOAD_TVM_PARAMS_PATH), "wb") as fo: fo.write(relay.save_param_dict(self._extra_config[constants.TVM_PARAMS])) # Remove all information that cannot be pickled if constants.TEST_INPUT in self._extra_config: self._extra_config[constants.TEST_INPUT] = None lib = self._extra_config[constants.TVM_LIB] graph = self._extra_config[constants.TVM_GRAPH] params = self._extra_config[constants.TVM_PARAMS] ctx = self._extra_config[constants.TVM_CONTEXT] model = self._model self._extra_config[constants.TVM_LIB] = None self._extra_config[constants.TVM_GRAPH] = None self._extra_config[constants.TVM_PARAMS] = None self._extra_config[constants.TVM_CONTEXT] = None self._ctx = "cpu" if self._ctx.device_type == 1 else "cuda" self._model = None with open(os.path.join(location, constants.SAVE_LOAD_CONTAINER_PATH), "wb") as file: dill.dump(self, file) # Restore the information self._extra_config[constants.TVM_LIB] = lib self._extra_config[constants.TVM_GRAPH] = graph self._extra_config[constants.TVM_PARAMS] = params self._extra_config[constants.TVM_CONTEXT] = ctx self._ctx = ctx self._model = model @staticmethod def load(location): """ Method used to load a container from the file system. Args: location: The location on the file system where to load the model. Returns: The loaded model. """ assert tvm_installed() import tvm from tvm.contrib import util, graph_runtime from tvm import relay container = None assert os.path.exists(location), "Directory {} not found.".format(location) path_lib = os.path.join(location, constants.SAVE_LOAD_TVM_LIB_PATH) graph = open(os.path.join(location, constants.SAVE_LOAD_TVM_GRAPH_PATH)).read() lib = tvm.runtime.module.load_module(path_lib) params = relay.load_param_dict(open(os.path.join(location, constants.SAVE_LOAD_TVM_PARAMS_PATH), "rb").read()) # params = bytearray(open(os.path.join(location, "deploy_param.params"), "rb").read()) with open(os.path.join(location, constants.SAVE_LOAD_CONTAINER_PATH), "rb") as file: container = dill.load(file) assert container is not None, "Failed to load the model container." ctx = tvm.cpu() if container._ctx == "cpu" else tvm.gpu container._model = graph_runtime.create(graph, lib, ctx) container._model.set_input(**params) container._extra_config[constants.TVM_GRAPH] = graph container._extra_config[constants.TVM_LIB] = lib container._extra_config[constants.TVM_PARAMS] = params container._extra_config[constants.TVM_CONTEXT] = ctx container._ctx = ctx # Need to set the number of threads to use as set in the original container. os.environ["TVM_NUM_THREADS"] = str(container._n_threads) return container def _to_tvm_tensor(self, *inputs): tvm_tensors = {} msg = "The number of input rows {} is different from the batch size {} the TVM model is compiled for." for i, inp in enumerate(inputs): assert inp.shape[0] == self._batch_size, msg.format(inp.shape[0], self._batch_size) tvm_tensors[self._input_names[i]] = self._to_tvm_array(inp) return tvm_tensors def _predict_common(self, output_index, *inputs): self.model.run(**self._to_tvm_tensor(*inputs)) return self.model.get_output(output_index).asnumpy() class TVMSklearnContainerTransformer(TVMSklearnContainer, SklearnContainerTransformer): """ Container for TVM models mirroring Sklearn transformers API. """ def _transform(self, *inputs): return self._predict_common(0, *inputs) class TVMSklearnContainerRegression(TVMSklearnContainer, SklearnContainerRegression): """ Container for TVM models mirroring Sklearn regressors API. """ def _predict(self, *inputs): out = self._predict_common(0, *inputs) return out.ravel() class TVMSklearnContainerClassification(TVMSklearnContainerRegression, SklearnContainerClassification): """ Container for TVM models mirroring Sklearn classifiers API. """ def _predict_proba(self, *inputs): return self._predict_common(1, *inputs) class TVMSklearnContainerAnomalyDetection(TVMSklearnContainerRegression, SklearnContainerAnomalyDetection): """ Container for TVM models mirroring Sklearn anomaly detection API. """ def _decision_function(self, *inputs): out = self._predict_common(1, *inputs) return out.ravel()
14,047
3,600
909
9a9aaa55b2245d41e80ede3b36d28d0e62ff0f68
35
py
Python
orchestra/contrib/payments/methods/__init__.py
RubenPX/django-orchestra
5ab4779e1ae12ec99569d682601b7810587ed381
[ "Unlicense" ]
68
2015-02-09T10:28:44.000Z
2022-03-12T11:08:36.000Z
orchestra/contrib/payments/methods/__init__.py
RubenPX/django-orchestra
5ab4779e1ae12ec99569d682601b7810587ed381
[ "Unlicense" ]
17
2015-05-01T18:10:03.000Z
2021-03-19T21:52:55.000Z
orchestra/contrib/payments/methods/__init__.py
RubenPX/django-orchestra
5ab4779e1ae12ec99569d682601b7810587ed381
[ "Unlicense" ]
29
2015-03-31T04:51:03.000Z
2022-02-17T02:58:50.000Z
from .options import PaymentMethod
17.5
34
0.857143
from .options import PaymentMethod
0
0
0
6b3b860f8de66d6e240b469addd2766a5117864d
1,305
py
Python
components/studio/studio/KCRFbackend.py
aitmlouk/stackn
c8029394a15b03796a4864938f9db251b65c7354
[ "Apache-2.0" ]
25
2020-05-08T22:24:54.000Z
2022-03-11T18:16:58.000Z
components/studio/studio/KCRFbackend.py
aitmlouk/stackn
c8029394a15b03796a4864938f9db251b65c7354
[ "Apache-2.0" ]
75
2020-05-08T22:15:59.000Z
2021-11-22T10:00:04.000Z
components/studio/studio/KCRFbackend.py
aitmlouk/stackn
c8029394a15b03796a4864938f9db251b65c7354
[ "Apache-2.0" ]
12
2020-11-04T13:09:46.000Z
2022-03-14T16:22:40.000Z
from django.contrib.auth.models import User from rest_framework import authentication from rest_framework import exceptions from django.conf import settings import jwt import requests as r import modules.keycloak_lib as keylib
40.78125
111
0.671264
from django.contrib.auth.models import User from rest_framework import authentication from rest_framework import exceptions from django.conf import settings import jwt import requests as r import modules.keycloak_lib as keylib class KeycloakAuthentication(authentication.BaseAuthentication): def authenticate(self, request): token_str = request.META['HTTP_AUTHORIZATION'] access_token = token_str.replace('Token ', '') discovery_url = settings.OIDC_OP_REALM_AUTH+'/'+settings.KC_REALM res = r.get(discovery_url, verify=settings.OIDC_VERIFY_SSL) if res: realm_info = res.json() public_key = '-----BEGIN PUBLIC KEY-----\n'+realm_info['public_key']+'\n-----END PUBLIC KEY-----' else: print('Failed to discover realm settings: '+settings.KC_REALM) return None try: access_token_json = jwt.decode(access_token, public_key, algorithms='RS256', audience='studio-api') except: print('Failed to authenticate.') return None username = access_token_json['preferred_username'] user = User.objects.get(username=username) request.session['oidc_access_token'] = access_token request.session.save() return (user, None)
986
43
49
1b0f1918f46a3472d52ee7c7d05a7733037cefcc
755
py
Python
blogger_publish.py
durgaswaroop/blog-automation
d1f5bb2f35b85f1e2487e7b5e711dfd5864270de
[ "Unlicense" ]
null
null
null
blogger_publish.py
durgaswaroop/blog-automation
d1f5bb2f35b85f1e2487e7b5e711dfd5864270de
[ "Unlicense" ]
null
null
null
blogger_publish.py
durgaswaroop/blog-automation
d1f5bb2f35b85f1e2487e7b5e711dfd5864270de
[ "Unlicense" ]
null
null
null
# Call easyblogger # Client secret and authentication is already stored in ~/.easyblogger.credentials # blogid is stored in ~/.easyblogger import subprocess import blogger_modifications
35.952381
118
0.682119
# Call easyblogger # Client secret and authentication is already stored in ~/.easyblogger.credentials # blogid is stored in ~/.easyblogger import subprocess import blogger_modifications def publish(html_file, title, labels, post_id): # print('publishing', html_file, 'with title', title, 'labels', labels, 'and post-id', post_id, flush=True) print(post_id) # Modify html as needed. blogger_modifications.modify(html_file) # If post-id doesnt already exist, if not post_id: command = f'easyblogger.exe post -t "{title}" -l "{", ".join(labels)}" -f "{html_file}"' else: command = f'easyblogger.exe update -t "{title}" -l "{", ".join(labels)}" -f "{html_file}" --publish {post_id}' subprocess.run(command)
544
0
23
ed01993e511f84c08d098c1f3223a571cfb8ac43
19,321
py
Python
gnsq/nsqd.py
hiringsolved/gnsq
d1d3c949d3920ba96befac5b9718b1d03ccb6e8b
[ "BSD-3-Clause" ]
null
null
null
gnsq/nsqd.py
hiringsolved/gnsq
d1d3c949d3920ba96befac5b9718b1d03ccb6e8b
[ "BSD-3-Clause" ]
null
null
null
gnsq/nsqd.py
hiringsolved/gnsq
d1d3c949d3920ba96befac5b9718b1d03ccb6e8b
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import import blinker import time from gevent import socket try: import simplejson as json except ImportError: import json # pyflakes.ignore from . import protocol as nsq from . import errors from .message import Message from .httpclient import HTTPClient from .states import INIT, CONNECTED, DISCONNECTED from .stream import Stream from .decorators import cached_property from .version import __version__ HOSTNAME = socket.gethostname() SHORTNAME = HOSTNAME.split('.')[0] USERAGENT = 'gnsq/%s' % __version__ class Nsqd(HTTPClient): """Low level object representing a TCP or HTTP connection to nsqd. :param address: the host or ip address of the nsqd :param tcp_port: the nsqd tcp port to connect to :param http_port: the nsqd http port to connect to :param timeout: the timeout for read/write operations (in seconds) :param client_id: an identifier used to disambiguate this client (defaults to the first part of the hostname) :param hostname: the hostname where the client is deployed (defaults to the clients hostname) :param heartbeat_interval: the amount of time in seconds to negotiate with the connected producers to send heartbeats (requires nsqd 0.2.19+) :param output_buffer_size: size of the buffer (in bytes) used by nsqd for buffering writes to this connection :param output_buffer_timeout: timeout (in ms) used by nsqd before flushing buffered writes (set to 0 to disable). Warning: configuring clients with an extremely low (< 25ms) output_buffer_timeout has a significant effect on nsqd CPU usage (particularly with > 50 clients connected). :param tls_v1: enable TLS v1 encryption (requires nsqd 0.2.22+) :param tls_options: dictionary of options to pass to `ssl.wrap_socket() <http://docs.python.org/2/library/ssl.html#ssl.wrap_socket>`_ :param snappy: enable Snappy stream compression (requires nsqd 0.2.23+) :param deflate: enable deflate stream compression (requires nsqd 0.2.23+) :param deflate_level: configure the deflate compression level for this connection (requires nsqd 0.2.23+) :param sample_rate: take only a sample of the messages being sent to the client. Not setting this or setting it to 0 will ensure you get all the messages destined for the client. Sample rate can be greater than 0 or less than 100 and the client will receive that percentage of the message traffic. (requires nsqd 0.2.25+) :param auth_secret: a string passed when using nsq auth (requires nsqd 0.2.29+) :param user_agent: a string identifying the agent for this client in the spirit of HTTP (default: ``<client_library_name>/<version>``) (requires nsqd 0.2.25+) """ @cached_property def on_message(self): """Emitted when a message frame is received. The signal sender is the connection and the `message` is sent as an argument. """ return blinker.Signal(doc='Emitted when a message frame is received.') @cached_property def on_response(self): """Emitted when a response frame is received. The signal sender is the connection and the `response` is sent as an argument. """ return blinker.Signal(doc='Emitted when a response frame is received.') @cached_property def on_error(self): """Emitted when an error frame is received. The signal sender is the connection and the `error` is sent as an argument. """ return blinker.Signal(doc='Emitted when a error frame is received.') @cached_property def on_finish(self): """Emitted after :meth:`finish`. Sent after a message owned by this connection is successfully finished. The signal sender is the connection and the `message_id` is sent as an argument. """ return blinker.Signal(doc='Emitted after the a message is finished.') @cached_property def on_requeue(self): """Emitted after :meth:`requeue`. Sent after a message owned by this connection is requeued. The signal sender is the connection and the `message_id`, `timeout` and `backoff` flag are sent as arguments. """ return blinker.Signal(doc='Emitted after the a message is requeued.') @cached_property def on_auth(self): """Emitted after the connection is successfully authenticated. The signal sender is the connection and the parsed `response` is sent as arguments. """ return blinker.Signal( doc='Emitted after the connection is successfully authenticated.' ) @cached_property def on_close(self): """Emitted after :meth:`close_stream`. Sent after the connection socket has closed. The signal sender is the connection. """ return blinker.Signal(doc='Emitted after the connection is closed.') @property def is_connected(self): """Check if the client is currently connected.""" return self.state == CONNECTED @property def is_starved(self): """Evaluate whether the connection is starved. This property should be used by message handlers to reliably identify when to process a batch of messages. """ return self.in_flight >= max(self.last_ready * 0.85, 1) def connect(self): """Initialize connection to the nsqd.""" if self.state not in (INIT, DISCONNECTED): return stream = Stream(self.address, self.tcp_port, self.timeout) stream.connect() self.stream = stream self.state = CONNECTED self.send(nsq.MAGIC_V2) def close_stream(self): """Close the underlying socket.""" if not self.is_connected: return self.stream.close() self.state = DISCONNECTED self.on_close.send(self) def read_response(self): """Read an individual response from nsqd. :returns: tuple of the frame type and the processed data. """ response = self._read_response() frame, data = nsq.unpack_response(response) self.last_response = time.time() if frame not in self._frame_handlers: raise errors.NSQFrameError('unknown frame %d' % frame) frame_handler = self._frame_handlers[frame] processed_data = frame_handler(data) return frame, processed_data def listen(self): """Listen to incoming responses until the connection closes.""" while self.is_connected: self.read_response() def identify(self): """Update client metadata on the server and negotiate features. :returns: nsqd response data if there was feature negotiation, otherwise `None` """ self.send(nsq.identify({ # nsqd <0.2.28 'short_id': self.client_id, 'long_id': self.hostname, # nsqd 0.2.28+ 'client_id': self.client_id, 'hostname': self.hostname, # nsqd 0.2.19+ 'feature_negotiation': True, 'heartbeat_interval': self.heartbeat_interval, # nsqd 0.2.21+ 'output_buffer_size': self.output_buffer_size, 'output_buffer_timeout': self.output_buffer_timeout, # nsqd 0.2.22+ 'tls_v1': self.tls_v1, # nsqd 0.2.23+ 'snappy': self.snappy, 'deflate': self.deflate, 'deflate_level': self.deflate_level, # nsqd nsqd 0.2.25+ 'sample_rate': self.sample_rate, 'user_agent': self.user_agent, })) frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data if data == 'OK': return try: data = json.loads(data) except ValueError: self.close_stream() msg = 'failed to parse IDENTIFY response JSON from nsqd: %r' raise errors.NSQException(msg % data) self.max_ready_count = data.get('max_rdy_count', self.max_ready_count) if self.tls_v1 and data.get('tls_v1'): self.upgrade_to_tls() if self.snappy and data.get('snappy'): self.upgrade_to_snappy() elif self.deflate and data.get('deflate'): self.deflate_level = data.get('deflate_level', self.deflate_level) self.upgrade_to_defalte() if self.auth_secret and data.get('auth_required'): self.auth() return data def auth(self): """Send authorization secret to nsqd.""" self.send(nsq.auth(self.auth_secret)) frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data try: response = json.loads(data) except ValueError: self.close_stream() msg = 'failed to parse AUTH response JSON from nsqd: %r' raise errors.NSQException(msg % data) self.on_auth.send(self, response=response) return response def subscribe(self, topic, channel): """Subscribe to a nsq `topic` and `channel`.""" self.send(nsq.subscribe(topic, channel)) def publish_tcp(self, topic, data): """Publish a message to the given topic over tcp.""" self.send(nsq.publish(topic, data)) def multipublish_tcp(self, topic, messages): """Publish an iterable of messages to the given topic over tcp.""" self.send(nsq.multipublish(topic, messages)) def ready(self, count): """Indicate you are ready to receive `count` messages.""" self.last_ready = count self.ready_count = count self.send(nsq.ready(count)) def finish(self, message_id): """Finish a message (indicate successful processing).""" self.send(nsq.finish(message_id)) self.finish_inflight() self.on_finish.send(self, message_id=message_id) def requeue(self, message_id, timeout=0, backoff=True): """Re-queue a message (indicate failure to process).""" self.send(nsq.requeue(message_id, timeout)) self.finish_inflight() self.on_requeue.send( self, message_id=message_id, timeout=timeout, backoff=backoff ) def touch(self, message_id): """Reset the timeout for an in-flight message.""" self.send(nsq.touch(message_id)) def close(self): """Indicate no more messages should be sent.""" self.send(nsq.close()) def nop(self): """Send no-op to nsqd. Used to keep connection alive.""" self.send(nsq.nop()) @property def publish_http(self, topic, data): """Publish a message to the given topic over http.""" nsq.assert_valid_topic_name(topic) return self.http_post('/put', fields={'topic': topic}, body=data) def multipublish_http(self, topic, messages): """Publish an iterable of messages to the given topic over http.""" nsq.assert_valid_topic_name(topic) return self.http_post( url='/mput', fields={'topic': topic}, body='\n'.join(self._validate_http_mpub(m) for m in messages) ) def create_topic(self, topic): """Create a topic.""" nsq.assert_valid_topic_name(topic) return self.http_post('/create_topic', fields={'topic': topic}) def delete_topic(self, topic): """Delete a topic.""" nsq.assert_valid_topic_name(topic) return self.http_post('/delete_topic', fields={'topic': topic}) def create_channel(self, topic, channel): """Create a channel for an existing topic.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/create_channel', fields={'topic': topic, 'channel': channel}, ) def delete_channel(self, topic, channel): """Delete an existing channel for an existing topic.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/delete_channel', fields={'topic': topic, 'channel': channel}, ) def empty_topic(self, topic): """Empty all the queued messages for an existing topic.""" nsq.assert_valid_topic_name(topic) return self.http_post('/empty_topic', fields={'topic': topic}) def empty_channel(self, topic, channel): """Empty all the queued messages for an existing channel.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/empty_channel', fields={'topic': topic, 'channel': channel}, ) def pause_channel(self, topic, channel): """Pause message flow to all channels on an existing topic. Messages will queue at topic. """ nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/pause_channel', fields={'topic': topic, 'channel': channel}, ) def unpause_channel(self, topic, channel): """Resume message flow to channels of an existing, paused, topic.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/unpause_channel', fields={'topic': topic, 'channel': channel}, ) def stats(self): """Return internal instrumented statistics.""" return self.http_get('/stats', fields={'format': 'json'}) def ping(self): """Monitoring endpoint. :returns: should return `"OK"`, otherwise raises an exception. """ return self.http_get('/ping') def info(self): """Returns version information.""" return self.http_get('/info') def publish(self, topic, data): """Publish a message. If connected, the message will be sent over tcp. Otherwise it will fall back to http. """ if self.is_connected: return self.publish_tcp(topic, data) else: return self.publish_http(topic, data) def multipublish(self, topic, messages): """Publish an iterable of messages in one roundtrip. If connected, the messages will be sent over tcp. Otherwise it will fall back to http. """ if self.is_connected: return self.multipublish_tcp(topic, messages) else: return self.multipublish_http(topic, messages)
31.777961
80
0.625123
# -*- coding: utf-8 -*- from __future__ import absolute_import import blinker import time from gevent import socket try: import simplejson as json except ImportError: import json # pyflakes.ignore from . import protocol as nsq from . import errors from .message import Message from .httpclient import HTTPClient from .states import INIT, CONNECTED, DISCONNECTED from .stream import Stream from .decorators import cached_property from .version import __version__ HOSTNAME = socket.gethostname() SHORTNAME = HOSTNAME.split('.')[0] USERAGENT = 'gnsq/%s' % __version__ class Nsqd(HTTPClient): """Low level object representing a TCP or HTTP connection to nsqd. :param address: the host or ip address of the nsqd :param tcp_port: the nsqd tcp port to connect to :param http_port: the nsqd http port to connect to :param timeout: the timeout for read/write operations (in seconds) :param client_id: an identifier used to disambiguate this client (defaults to the first part of the hostname) :param hostname: the hostname where the client is deployed (defaults to the clients hostname) :param heartbeat_interval: the amount of time in seconds to negotiate with the connected producers to send heartbeats (requires nsqd 0.2.19+) :param output_buffer_size: size of the buffer (in bytes) used by nsqd for buffering writes to this connection :param output_buffer_timeout: timeout (in ms) used by nsqd before flushing buffered writes (set to 0 to disable). Warning: configuring clients with an extremely low (< 25ms) output_buffer_timeout has a significant effect on nsqd CPU usage (particularly with > 50 clients connected). :param tls_v1: enable TLS v1 encryption (requires nsqd 0.2.22+) :param tls_options: dictionary of options to pass to `ssl.wrap_socket() <http://docs.python.org/2/library/ssl.html#ssl.wrap_socket>`_ :param snappy: enable Snappy stream compression (requires nsqd 0.2.23+) :param deflate: enable deflate stream compression (requires nsqd 0.2.23+) :param deflate_level: configure the deflate compression level for this connection (requires nsqd 0.2.23+) :param sample_rate: take only a sample of the messages being sent to the client. Not setting this or setting it to 0 will ensure you get all the messages destined for the client. Sample rate can be greater than 0 or less than 100 and the client will receive that percentage of the message traffic. (requires nsqd 0.2.25+) :param auth_secret: a string passed when using nsq auth (requires nsqd 0.2.29+) :param user_agent: a string identifying the agent for this client in the spirit of HTTP (default: ``<client_library_name>/<version>``) (requires nsqd 0.2.25+) """ def __init__( self, address='127.0.0.1', tcp_port=4150, http_port=4151, timeout=60.0, client_id=None, hostname=None, heartbeat_interval=30, output_buffer_size=16 * 1024, output_buffer_timeout=250, tls_v1=False, tls_options=None, snappy=False, deflate=False, deflate_level=6, sample_rate=0, auth_secret=None, user_agent=USERAGENT, ): self.address = address self.tcp_port = tcp_port self.http_port = http_port self.timeout = timeout self.client_id = client_id or SHORTNAME self.hostname = hostname or HOSTNAME self.heartbeat_interval = 1000 * heartbeat_interval self.output_buffer_size = output_buffer_size self.output_buffer_timeout = output_buffer_timeout self.tls_v1 = tls_v1 self.tls_options = tls_options self.snappy = snappy self.deflate = deflate self.deflate_level = deflate_level self.sample_rate = sample_rate self.auth_secret = auth_secret self.user_agent = user_agent self.state = INIT self.last_response = time.time() self.last_message = time.time() self.last_ready = 0 self.ready_count = 0 self.in_flight = 0 self.max_ready_count = 2500 self._frame_handlers = { nsq.FRAME_TYPE_RESPONSE: self.handle_response, nsq.FRAME_TYPE_ERROR: self.handle_error, nsq.FRAME_TYPE_MESSAGE: self.handle_message } @cached_property def on_message(self): """Emitted when a message frame is received. The signal sender is the connection and the `message` is sent as an argument. """ return blinker.Signal(doc='Emitted when a message frame is received.') @cached_property def on_response(self): """Emitted when a response frame is received. The signal sender is the connection and the `response` is sent as an argument. """ return blinker.Signal(doc='Emitted when a response frame is received.') @cached_property def on_error(self): """Emitted when an error frame is received. The signal sender is the connection and the `error` is sent as an argument. """ return blinker.Signal(doc='Emitted when a error frame is received.') @cached_property def on_finish(self): """Emitted after :meth:`finish`. Sent after a message owned by this connection is successfully finished. The signal sender is the connection and the `message_id` is sent as an argument. """ return blinker.Signal(doc='Emitted after the a message is finished.') @cached_property def on_requeue(self): """Emitted after :meth:`requeue`. Sent after a message owned by this connection is requeued. The signal sender is the connection and the `message_id`, `timeout` and `backoff` flag are sent as arguments. """ return blinker.Signal(doc='Emitted after the a message is requeued.') @cached_property def on_auth(self): """Emitted after the connection is successfully authenticated. The signal sender is the connection and the parsed `response` is sent as arguments. """ return blinker.Signal( doc='Emitted after the connection is successfully authenticated.' ) @cached_property def on_close(self): """Emitted after :meth:`close_stream`. Sent after the connection socket has closed. The signal sender is the connection. """ return blinker.Signal(doc='Emitted after the connection is closed.') @property def is_connected(self): """Check if the client is currently connected.""" return self.state == CONNECTED @property def is_starved(self): """Evaluate whether the connection is starved. This property should be used by message handlers to reliably identify when to process a batch of messages. """ return self.in_flight >= max(self.last_ready * 0.85, 1) def connect(self): """Initialize connection to the nsqd.""" if self.state not in (INIT, DISCONNECTED): return stream = Stream(self.address, self.tcp_port, self.timeout) stream.connect() self.stream = stream self.state = CONNECTED self.send(nsq.MAGIC_V2) def close_stream(self): """Close the underlying socket.""" if not self.is_connected: return self.stream.close() self.state = DISCONNECTED self.on_close.send(self) def send(self, data, async=False): try: return self.stream.send(data, async) except Exception: self.close_stream() raise def _read_response(self): try: size = nsq.unpack_size(self.stream.read(4)) return self.stream.read(size) except Exception: self.close_stream() raise def read_response(self): """Read an individual response from nsqd. :returns: tuple of the frame type and the processed data. """ response = self._read_response() frame, data = nsq.unpack_response(response) self.last_response = time.time() if frame not in self._frame_handlers: raise errors.NSQFrameError('unknown frame %d' % frame) frame_handler = self._frame_handlers[frame] processed_data = frame_handler(data) return frame, processed_data def handle_response(self, data): if data == nsq.HEARTBEAT: self.nop() self.on_response.send(self, response=data) return data def handle_error(self, data): error = errors.make_error(data) self.on_error.send(self, error=error) if error.fatal: self.close_stream() return error def handle_message(self, data): self.last_message = time.time() self.ready_count -= 1 self.in_flight += 1 message = Message(*nsq.unpack_message(data)) message.on_finish.connect(self.handle_finish) message.on_requeue.connect(self.handle_requeue) message.on_touch.connect(self.handle_touch) self.on_message.send(self, message=message) return message def handle_finish(self, message): self.finish(message.id) def handle_requeue(self, message, timeout, backoff): self.requeue(message.id, timeout, backoff) def handle_touch(self, message): self.touch(message.id) def finish_inflight(self): self.in_flight -= 1 def listen(self): """Listen to incoming responses until the connection closes.""" while self.is_connected: self.read_response() def check_ok(self, expected='OK'): frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data if frame != nsq.FRAME_TYPE_RESPONSE: raise errors.NSQException('expected response frame') if data != expected: raise errors.NSQException('unexpected response %r' % data) def upgrade_to_tls(self): self.stream.upgrade_to_tls(**self.tls_options) self.check_ok() def upgrade_to_snappy(self): self.stream.upgrade_to_snappy() self.check_ok() def upgrade_to_defalte(self): self.stream.upgrade_to_defalte(self.deflate_level) self.check_ok() def identify(self): """Update client metadata on the server and negotiate features. :returns: nsqd response data if there was feature negotiation, otherwise `None` """ self.send(nsq.identify({ # nsqd <0.2.28 'short_id': self.client_id, 'long_id': self.hostname, # nsqd 0.2.28+ 'client_id': self.client_id, 'hostname': self.hostname, # nsqd 0.2.19+ 'feature_negotiation': True, 'heartbeat_interval': self.heartbeat_interval, # nsqd 0.2.21+ 'output_buffer_size': self.output_buffer_size, 'output_buffer_timeout': self.output_buffer_timeout, # nsqd 0.2.22+ 'tls_v1': self.tls_v1, # nsqd 0.2.23+ 'snappy': self.snappy, 'deflate': self.deflate, 'deflate_level': self.deflate_level, # nsqd nsqd 0.2.25+ 'sample_rate': self.sample_rate, 'user_agent': self.user_agent, })) frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data if data == 'OK': return try: data = json.loads(data) except ValueError: self.close_stream() msg = 'failed to parse IDENTIFY response JSON from nsqd: %r' raise errors.NSQException(msg % data) self.max_ready_count = data.get('max_rdy_count', self.max_ready_count) if self.tls_v1 and data.get('tls_v1'): self.upgrade_to_tls() if self.snappy and data.get('snappy'): self.upgrade_to_snappy() elif self.deflate and data.get('deflate'): self.deflate_level = data.get('deflate_level', self.deflate_level) self.upgrade_to_defalte() if self.auth_secret and data.get('auth_required'): self.auth() return data def auth(self): """Send authorization secret to nsqd.""" self.send(nsq.auth(self.auth_secret)) frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data try: response = json.loads(data) except ValueError: self.close_stream() msg = 'failed to parse AUTH response JSON from nsqd: %r' raise errors.NSQException(msg % data) self.on_auth.send(self, response=response) return response def subscribe(self, topic, channel): """Subscribe to a nsq `topic` and `channel`.""" self.send(nsq.subscribe(topic, channel)) def publish_tcp(self, topic, data): """Publish a message to the given topic over tcp.""" self.send(nsq.publish(topic, data)) def multipublish_tcp(self, topic, messages): """Publish an iterable of messages to the given topic over tcp.""" self.send(nsq.multipublish(topic, messages)) def ready(self, count): """Indicate you are ready to receive `count` messages.""" self.last_ready = count self.ready_count = count self.send(nsq.ready(count)) def finish(self, message_id): """Finish a message (indicate successful processing).""" self.send(nsq.finish(message_id)) self.finish_inflight() self.on_finish.send(self, message_id=message_id) def requeue(self, message_id, timeout=0, backoff=True): """Re-queue a message (indicate failure to process).""" self.send(nsq.requeue(message_id, timeout)) self.finish_inflight() self.on_requeue.send( self, message_id=message_id, timeout=timeout, backoff=backoff ) def touch(self, message_id): """Reset the timeout for an in-flight message.""" self.send(nsq.touch(message_id)) def close(self): """Indicate no more messages should be sent.""" self.send(nsq.close()) def nop(self): """Send no-op to nsqd. Used to keep connection alive.""" self.send(nsq.nop()) @property def base_url(self): return 'http://%s:%s/' % (self.address, self.http_port) def publish_http(self, topic, data): """Publish a message to the given topic over http.""" nsq.assert_valid_topic_name(topic) return self.http_post('/put', fields={'topic': topic}, body=data) def _validate_http_mpub(self, message): if '\n' not in message: return message error = 'newlines are not allowed in http multipublish' raise errors.NSQException(error) def multipublish_http(self, topic, messages): """Publish an iterable of messages to the given topic over http.""" nsq.assert_valid_topic_name(topic) return self.http_post( url='/mput', fields={'topic': topic}, body='\n'.join(self._validate_http_mpub(m) for m in messages) ) def create_topic(self, topic): """Create a topic.""" nsq.assert_valid_topic_name(topic) return self.http_post('/create_topic', fields={'topic': topic}) def delete_topic(self, topic): """Delete a topic.""" nsq.assert_valid_topic_name(topic) return self.http_post('/delete_topic', fields={'topic': topic}) def create_channel(self, topic, channel): """Create a channel for an existing topic.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/create_channel', fields={'topic': topic, 'channel': channel}, ) def delete_channel(self, topic, channel): """Delete an existing channel for an existing topic.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/delete_channel', fields={'topic': topic, 'channel': channel}, ) def empty_topic(self, topic): """Empty all the queued messages for an existing topic.""" nsq.assert_valid_topic_name(topic) return self.http_post('/empty_topic', fields={'topic': topic}) def empty_channel(self, topic, channel): """Empty all the queued messages for an existing channel.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/empty_channel', fields={'topic': topic, 'channel': channel}, ) def pause_channel(self, topic, channel): """Pause message flow to all channels on an existing topic. Messages will queue at topic. """ nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/pause_channel', fields={'topic': topic, 'channel': channel}, ) def unpause_channel(self, topic, channel): """Resume message flow to channels of an existing, paused, topic.""" nsq.assert_valid_topic_name(topic) nsq.assert_valid_channel_name(channel) return self.http_post( url='/unpause_channel', fields={'topic': topic, 'channel': channel}, ) def stats(self): """Return internal instrumented statistics.""" return self.http_get('/stats', fields={'format': 'json'}) def ping(self): """Monitoring endpoint. :returns: should return `"OK"`, otherwise raises an exception. """ return self.http_get('/ping') def info(self): """Returns version information.""" return self.http_get('/info') def publish(self, topic, data): """Publish a message. If connected, the message will be sent over tcp. Otherwise it will fall back to http. """ if self.is_connected: return self.publish_tcp(topic, data) else: return self.publish_http(topic, data) def multipublish(self, topic, messages): """Publish an iterable of messages in one roundtrip. If connected, the messages will be sent over tcp. Otherwise it will fall back to http. """ if self.is_connected: return self.multipublish_tcp(topic, messages) else: return self.multipublish_http(topic, messages) def __str__(self): return self.address + ':' + str(self.tcp_port) def __hash__(self): return hash(str(self)) def __eq__(self, other): return isinstance(other, Nsqd) and str(self) == str(other) def __cmp__(self, other): return hash(self) - hash(other)
3,855
0
538
1f2d1844b3eccb84918f80fdac47539a12562abf
950
py
Python
perfil/migrations/0002_auto_20211018_0921.py
Felipe-007/Ecommerce
f3003b3130709b12ae87b45867a8364f00fef2f1
[ "MIT" ]
null
null
null
perfil/migrations/0002_auto_20211018_0921.py
Felipe-007/Ecommerce
f3003b3130709b12ae87b45867a8364f00fef2f1
[ "MIT" ]
null
null
null
perfil/migrations/0002_auto_20211018_0921.py
Felipe-007/Ecommerce
f3003b3130709b12ae87b45867a8364f00fef2f1
[ "MIT" ]
null
null
null
# Generated by Django 3.2.7 on 2021-10-18 12:21 from django.conf import settings from django.db import migrations, models import django.db.models.deletion
29.6875
137
0.629474
# Generated by Django 3.2.7 on 2021-10-18 12:21 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil', name='usuario', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário'), ), ]
0
773
23