text
stringlengths 8
6.05M
|
|---|
import urllib2
from BeautifulSoup import BeautifulSoup
#the text of news stories are usually hidden in a mess of html
#for ads, comments, and related stories. The trick here is to find
#a unique marker for where a story begins, and then grab all of the
#stories text and images (without any extra junk).
def NPR_news_parser(url):
"""rips story text and images from NPR news stories"""
story_text = " "
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
#sensibily, NPR marks their stories with a "storytext" id
txt = soup.findAll(id='storytext')
imgs = txt[0].findAll('img')
for item in txt[0].contents:
#the actual text is always in between either paragraph tags or blockquoates
if getattr(item, 'name', None)=='p' or getattr(item, 'name', None)=='blockquote':
story_text = story_text + unicode(item.prettify(), "utf-8")
i = 0
#thow any images at the bottom of the purified html
for item in imgs:
if i%2==0:
story_text = story_text + unicode(item.prettify(), "utf-8")
i = i+1
return story_text
def reuters_news_parser(url):
"""rips story text and images from NPR news stories"""
story_text = " "
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
#Reuters, less sensible than NPR
txt = soup.findAll(attrs={"class" : "column2 gridPanel grid8"})
imgs = soup.findAll(id='articleImage')
print len(txt[0])
for item in txt[0].contents:
if getattr(item, 'name', None)=='p' or getattr(item, 'name', None)=='blockquote':
story_text = story_text + unicode(item.prettify(), "utf-8")
print ""
print ""
print ""
i = 0
print story_text
for item in imgs:
if i%2==0:
story_text = story_text + unicode(item.prettify(), "utf-8")
i = i+1
return story_text
|
"""A set of native python APIs to create a network model and run
network simulations."""
from .circuit import Circuit # noqa: F401
from .demand import Demand # noqa: F401
from .interface import Interface # noqa: F401
from .model import Model # noqa: F401
from .exceptions import ModelException # noqa: F401
from .node import Node # noqa: F401
from .rsvp import RSVP_LSP # noqa: F401
from .srlg import SRLG # noqa: F401
from .utilities import * # noqa: F401,F403
from .parallel_link_model import Parallel_Link_Model # noqa: F401
from .master_model import MasterModel # noqa: F401
|
from datetime import datetime
from struct import unpack
from typing import Any
from typing import Dict
from typing import List
from typing import Union
import serial
class ChannelNotFoundError(Exception):
'''Raised when the logger channel is not found'''
pass
class ChannelError(Exception):
'''Raised when reading a channel that indicates an error'''
pass
class CallNotSuccessfullError(Exception):
'''Raised when a call was not sucessfull and the logger returned NAK'''
pass
class Combilog():
'''
:port str: port the logger is connected to e.g. com3 or /dev/ttyACM0
:logger_addr str|int: address of the logger as specified in the
logger settings
:baudrate int: baudrate as specified in the logger setup. Must be either
2400, 4800, 9600 (default), 19200, 38400
:bytesize int: bytesite as specified in the logger setup. Must be either
5, 6, 7, 8 (default)
:parity str: parity as specified in the logger settings. Must be either
"N": None (default), "E": Even, "O": Odd
:stopbits int: number of stopbits as specified in the logger settings.
Must be either 1 or 2
:timeout float: timeout as specified in the logger settings in seconds
'''
def __init__(
self,
logger_addr: Union[str, int],
port: str,
baudrate: int = 9600,
bytesize: int = 8,
parity: str = 'N',
stopbits: int = 1,
timeout: float = 1.0,
) -> None:
BAUDRATE = (2400, 4800, 9600, 19200, 38400)
BYTESIZE = (5, 6, 7, 8)
PARITY = ('N', 'E', 'O')
STOPBITS = (1, 2)
# check input
if baudrate not in BAUDRATE:
raise ValueError(
f'baudrate must be {", ".join(str(i) for i in BAUDRATE)}'
f', not {baudrate}',
)
if bytesize not in BYTESIZE:
raise ValueError(
f'bytesize must be {", ".join(str(i) for i in BYTESIZE)}'
f', not {bytesize}',
)
if parity not in PARITY:
raise ValueError(
f'parity must be {", ".join(str(i) for i in PARITY)}'
f', not {parity}',
)
if stopbits not in STOPBITS:
raise ValueError(
f'stopbits must be {", ".join(str(i) for i in STOPBITS)}'
f', not {stopbits}',
)
if not isinstance(timeout, float):
raise TypeError(f'timeout must be float, not {type(timeout)}')
self.logger_addr = str(logger_addr)
# add leading 0 if only one digit
if len(self.logger_addr) == 1:
self.logger_addr = '0' + self.logger_addr
# initialze serial object do not open
self.ser = serial.Serial()
self.ser.port = port
self.ser.baudrate = baudrate
self.ser.bytesize = bytesize
self.ser.parity = parity
self.ser.stopbits = stopbits
self.ser.timeout = timeout
def authenticate(self, passwd: str) -> bool:
'''
:passwd str: password as specified via the logger's webserver
'''
with self.ser as ser:
telegram = f'${self.logger_addr}P{passwd}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp == b'\x06':
return True
else:
return False
def device_id(self) -> Dict[str, str]:
'''
get the device identification containing:
vendor name (e.g. Friedrichs), model name (e.g. COM1020)
hw_revision (hardware ver), sw_revission (software/frimware ver)
'''
with self.ser as ser:
telegram = f'${self.logger_addr}V\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
info = resp.decode('latin-1')[1:]
dev_id = {
'vendor_name': info[0:10],
'model_name': info[10:17],
'hw_revision': info[18:23],
'sw_revision': info[24:28],
}
return dev_id
def device_info(self) -> Dict[str, Union[str, int]]:
'''
get the device information containing:
location, serial number, number of channels
'''
with self.ser as ser:
telegram = f'${self.logger_addr}S\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
info = resp.decode('latin-1')[1:]
dev_info = {
'location': info[:20].strip(),
'serial_number': int(info[20:26]),
'nr_channels': int(info[26:].strip()),
}
return dev_info
def status_info(self) -> Dict[str, str]:
'''
get the device status and see if there are any errors
read about the codes in the manual pp 124-125
'''
with self.ser as ser:
telegram = f'${self.logger_addr}Z\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
info = resp.decode('latin-1')[1:]
status_info = {
'channel_status': info[:8],
'module_status': info[9:],
}
return status_info
def get_channel_info(self, channel_nr: str) -> Dict[str, str]:
'''
:channel_nr str: number of the channel to get information from
must be from '01' to '20' (internal channels)
or '80' to 'BB' (external channels)
values are hexadecimal in only uppercase
'''
with self.ser as ser:
telegram = f'${self.logger_addr}B{channel_nr}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
success = (resp != b'\x15' and resp != b'')
if success:
info = resp.decode('latin-1')[1:] # skip '=...'
channel_info = {
'channel_type': _channel_type_to_txt(int(info[0])),
'channel_notation': info[1:20].strip(),
'data_format': _data_format_to_txt(int(info[21])),
'field_length': int(info[22]),
'decimals': int(info[23]),
'unit': info[24:29].strip(),
'host_input': _host_input_possible(int(info[30])),
'type_of_calculation': _channel_calc_to_txt(int(info[31])),
}
return channel_info
else:
raise ChannelNotFoundError(f'channel {channel_nr} was not found')
def get_channel_list(self) -> List[str]:
# internal channels
INT_CHANNEL = [str(i) for i in list(range(1, 20))]
# add leading 0 to channel numbers
for i in range(0, 9):
INT_CHANNEL[i] = f'0{i+1}'
int_channel_list = []
for j in INT_CHANNEL:
try:
info = self.get_channel_info(j)
int_channel_list.append(info['channel_notation'])
except ChannelNotFoundError:
pass
# TODO: get external channels
# for some reason an empty external channel has len 31 not 32
# as specified in the manual so this is broken
"""
EXT_CHANNEL = [str(hex(i))[2:].upper() for i in list(range(128, 188))]
ext_channel_list = []
for i in EXT_CHANNEL:
try:
info = self.get_channel_info(i)
ext_channel_list.append(info['channel_notation'])
except:
pass
"""
channel_list = int_channel_list
return channel_list
def read_channel(self, channel_nr: str):
with self.ser as ser:
telegram = f'${self.logger_addr}R{channel_nr}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
channel = resp.decode('latin-1')[1:]
if channel.startswith('E'):
raise ChannelError(
f'Cannot read channel {channel_nr}, '
'the channel indicates an error',
)
return channel.strip()
def write_channel(
self,
channel_nr: str,
channel_value: Union[float, int, str],
) -> None:
'''
Write to a channel --> set the channel to a specific value
a previous Authentication is required! (self.authenticate(passwd=...))
'''
with self.ser as ser:
telegram = (
f'${self.logger_addr}W{channel_nr}{channel_value}\r'
).encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
f'Unable write to channel {channel_nr}. '
'Did you authenticate and does the channel exist?',
)
def reset_channel(self, channel_nr: str) -> None:
'''
reset a channel e.g. reset the min/max value.
a previous Authentication is required! (self.authenticate(passwd=...))
:channel_nr: must be from '01' to '20' (internal channels)
or '80' to 'BB' (external channels)
'''
with self.ser as ser:
telegram = f'${self.logger_addr}D{channel_nr}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
f'Unable to reset channel {channel_nr}. '
'Did you authenticate and does the channel exist?',
)
def pointer_to_start(self, pointer: Union[str, int]) -> None:
'''sets a pointer to start'''
if str(pointer) == '1':
call = 'C'
elif str(pointer) == '2':
call = 'c'
else:
raise ValueError(f'pointer must be either 1 or 2, not {pointer}')
with self.ser as ser:
telegram = f'${self.logger_addr}{call}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
'Unable to set pointer 1. Did you authenticate?',
)
def read_event(
self,
pointer: Union[str, int],
) -> Dict[str, List[float]]:
'''
read the event at the position of the pointer
returns a dictionary with the timestamp as key
if there are no events an empty dict is returned
'''
if str(pointer) == '1':
call = 'E'
elif str(pointer) == '2':
call = 'e'
else:
raise ValueError(f'pointer must be either 1 or 2, not {pointer}')
with self.ser as ser:
telegram = f'${self.logger_addr}{call}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if len(resp) > 3:
events = resp.decode('latin-1')[1:].split(';')
# the first char is the address and therefore not needed
date = datetime.strptime(events[0][1:], '%y%m%d%H%M%S')
# remove carriage return at the end
# and convert from IEE Std 754 Short Real Format
event = {
str(date): [_hexIEE_to_dec(i) for i in events[1:-1]],
}
return event
else:
return {}
def repeat_read_event(
self,
pointer: Union[str, int],
) -> Dict[str, List[float]]:
'''
read an event from the logger's storage
'''
if str(pointer) == '1':
call = 'F'
elif str(pointer) == '2':
call = 'f'
else:
raise ValueError(f'pointer must be either 1 or 2, not {pointer}')
with self.ser as ser:
telegram = f'${self.logger_addr}{call}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if len(resp) > 3:
events = resp.decode('latin-1')[1:].split(';')
# the first char is the address and therefore not needed
date = datetime.strptime(events[0][1:], '%y%m%d%H%M%S')
# remove carriage return at the end
# and convert from IEE Std 754 Short Real Format
event = {
str(date): [_hexIEE_to_dec(i) for i in events[1:-1]],
}
return event
else:
return {}
def pointer_to_date(
self,
pointer: Union[str, int],
date: Union[str, datetime],
) -> None:
'''
sets pointer 1 to a specific date
:date: str with Format '%y%m%d%H%M%S' or a datetime.dateime object
'''
if str(pointer) == '1':
call = 'C'
elif str(pointer) == '2':
call = 'c'
else:
raise ValueError(f'pointer must be either 1 or 2, not {pointer}')
if isinstance(date, datetime):
date = date.strftime('%y%m%d%H%M%S')
if len(date) != 12:
raise ValueError(
f'date must have len 12, not {len(date)} '
'and must have the format %y%m%d%H%M%S',
)
with self.ser as ser:
telegram = f'${self.logger_addr}{call}{date}\r'.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
f'Unable to set pointer 1 to {date}. Did you authenticate? '
'has "date" the format %y%m%d%H%M%S or is a datetime.datetime'
' object?',
)
def pointer_to_pos(
self,
pointer: Union[str, int],
position: str,
) -> None:
'''
sets pointer 1 to a specific position
'''
if str(pointer) == '1':
call = 'C'
elif str(pointer) == '2':
call = 'c'
else:
raise ValueError(f'pointer must be either 1 or 2, not {pointer}')
with self.ser as ser:
telegram_str = f'${self.logger_addr}{call}{position}\r'
telegram = telegram_str.encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
f'Unable to set pointer 1 to position {position}. '
'Did you authenticate?',
)
def set_datetime(
self,
date: Union[str, datetime] = datetime.now(),
) -> None:
'''
Set the logger's clock (default is computer time)
a previous Authentication is required!
'''
if isinstance(date, datetime):
date = date.strftime('%y%m%d%H%M%S')
if len(date) != 12:
raise ValueError(
f'date must have len 12, not {len(date)} '
'and must have the format %y%m%d%H%M%S',
)
with self.ser as ser:
telegram = (f'${self.logger_addr}G{date}\r').encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
f'Unable to set the date to {date}. '
'Did you authenticate?',
)
def read_datetime(self) -> datetime:
'''read the time and return it as a datetime.datetime object'''
with self.ser as ser:
telegram = (f'${self.logger_addr}H\r').encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
resp_str = resp.decode('latin-1')[1:-1]
logger_datetime = datetime.strptime(resp_str, '%y%m%d%H%M%S')
return logger_datetime
def get_rate(self) -> Dict[str, int]:
'''get the measuring and averaging rate in seconds'''
with self.ser as ser:
telegram = (f'${self.logger_addr}X\r').encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
rates_str = resp.decode('latin-1')[1:]
rates = {
'measuring_rate': int(rates_str[0:2]),
'averaging_interval': int(rates_str[2:7]),
}
return rates
def set_rate(
self,
measuring_rate: int,
averaging_interval: int,
) -> None:
'''set the logger's measuring and averaging rate'''
if measuring_rate > 100:
raise ValueError('Cannot set measuring rate higher than 99')
if averaging_interval > 43200:
raise ValueError('max averaging interval rate is 43200 (12h)')
# bring to correct length
len_averaging_interval = len(str(averaging_interval))
if len(str(measuring_rate)) < 2:
measuring_rate_str = '0' + str(measuring_rate)
else:
measuring_rate_str = str(measuring_rate)
if len_averaging_interval < 5:
averaging_interval_str = ((5 - len_averaging_interval) * '0'
+ str(averaging_interval))
else:
averaging_interval_str = str(averaging_interval)
rate = f'{measuring_rate_str}{averaging_interval_str}'
with self.ser as ser:
telegram = (f'${self.logger_addr}Y{rate}\r').encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
f'Unable to set the rate to {measuring_rate} '
f'and {averaging_interval}. Did you authenticate?',
)
def delete_memory(self) -> None:
'''deletes the logger storage cannot be undone!'''
with self.ser as ser:
telegram = (f'${self.logger_addr}C.ALL\r').encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
"Unable to delete the logger's storage. "
'Did you authenticate?',
)
def get_nr_events(self) -> int:
'''
get the number of logs available with the currently set pointer
'''
with self.ser as ser:
telegram = (f'${self.logger_addr}N\r').encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
logs = int(resp.decode('latin-1')[1:])
return logs
def transparent_mode(self, state: bool) -> None:
'''switch the transparent mode on or off'''
if state:
call = 'T1'
if not state:
call = 'T2'
with self.ser as ser:
telegram = (f'${self.logger_addr}{call}\r').encode('latin-1')
ser.write(telegram)
resp = ser.read_until(b'\r')
if resp != b'\x06':
raise CallNotSuccessfullError(
'Unable to change the state of the transparent modee.'
'Did you authenticate?',
)
def read_logger(
self,
pointer: Union[str, int],
verbose: bool = False,
output_type: str = 'dict',
) -> Union[Dict[str, List[float]], List[Union[Any]]]:
'''
reads all bookings starting from the set pointer
:pointer str|int: the pointer from where to read
:verbose bool: print output to the stdout
:output_type str: output as a "dict" which can be converted to a pd df
output as "list" to be written to csv using the csv module
depending on the number of logs this can take a while
'''
# get number of logs
logs = self.get_nr_events()
# read all events
i = 0
events = {}
while i < logs:
if verbose:
print(f'reading event {i+1} of {logs}')
event = self.read_event(pointer)
events.update(event)
i += 1
if output_type == 'dict':
return events
elif output_type == 'list':
list_events = []
for j in events.keys():
values = [k for k in events[j]]
timestamp_values = [j] + values # type: ignore
list_events.append(timestamp_values)
return list_events
else:
raise ValueError(
f'output_type must be dict or list, not {output_type}',
)
def _channel_type_to_txt(channel_type: int) -> str:
CHANNEL_TYPES = {
0: 'empty channel (EM)',
1: 'analogue input channel (AR)',
2: 'arithmeic channel (AR)',
3: 'digital output channel (DO)',
4: 'digital input channel (DI)',
5: 'setpoint channel (VO)',
6: 'alarm channel (AL)',
}
try:
return CHANNEL_TYPES[channel_type]
except KeyError:
return 'unknown channel type'
def _channel_calc_to_txt(calc_type: int) -> str:
CALC_TYPES = {
0: 'normal calculation of average value',
1: 'calculation of average value with wind direction',
2: 'calculation of the sum over the averaging interval',
3: 'continuous sum',
4: 'vectorial average for wind velocity',
5: 'vectorial average for wind direction',
}
try:
return CALC_TYPES[calc_type]
except KeyError:
return 'unknown calculation type'
def _data_format_to_txt(data_format: int) -> str:
DATA_FORMATS = {
0: 'no format',
1: 'bool',
2: 'integer',
3: 'real',
4: 'set 8',
}
try:
return DATA_FORMATS[data_format]
except KeyError:
return 'unknown data format'
def _host_input_possible(host_input: int) -> Union[bool, str]:
if host_input == 0:
return True
elif host_input == 1:
return False
else:
return 'unknown'
def _hexIEE_to_dec(hexval: str, digits: int = 2) -> float:
'''decode the data read from the logger's storage drive'''
dec = round(unpack('!f', bytes.fromhex(hexval))[0], digits)
return dec
|
#!/usr/bin/env python
import unittest
from asyncdnspy.tcp_client import TCPClient
from asyncdnspy.dns_raw_message import DNSRawMessage
from asyncdnspy.dnspy_enum import RecordType, SocketType
class TCPClientTest(unittest.TestCase):
def test_send(self):
tcp_client = TCPClient('8.8.8.8', 53)
tcp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.a, socket_type=SocketType.tcp)
result = tcp_client.send(data)
self.assertNotEqual(result, -1)
tcp_client.close()
def test_receive(self):
tcp_client = TCPClient('8.8.8.8', 53)
tcp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.a, socket_type=SocketType.tcp)
result = tcp_client.send(data)
self.assertNotEqual(result, -1)
response = tcp_client.receive()
self.assertTrue(len(response) > 0)
tcp_client.close()
def main(self):
self.test_send()
self.test_receive()
if __name__ == '__main__':
tests = TCPClientTest()
tests.main()
|
from pydantic.version import VERSION as PYDANTIC_VERSION
PYDANTIC2 = PYDANTIC_VERSION.startswith("2")
__all__ = [
"BaseModel",
"ValidationError",
"Field",
"root_validator",
"AnyUrl",
"BaseSettings",
"EmailStr",
"validator",
]
if PYDANTIC2:
from pydantic.v1 import (
AnyUrl,
BaseModel,
BaseSettings,
EmailStr,
Field,
ValidationError,
root_validator,
validator,
)
else:
from pydantic import ( # type: ignore[no-redef,assignment]
AnyUrl,
BaseModel,
BaseSettings,
EmailStr,
Field,
ValidationError,
root_validator,
validator,
)
|
from ex107 import moeda
p = int(input('Digite um número: '))
moeda.resumo(p, 80, 35)
|
from ftplib import FTP
import os
host = "pcredit.ayz.pl"
user = "pcredit"
password = "OE06jiai"
File2Send = "C:\\Users\\sjaku\\Desktop\\linuxpl\\"
MacOSx2Send = "//Users//szymon//Downloads//[Dla_sklepu]//banery//slider_baner//"
FTP_Server = "//domains//kreatywneklocki.pl//public_html//img//lego//"
ftp = FTP(host)
ftp.login(user, password)
ftp.cwd(FTP_Server)
dirList = os.listdir(MacOSx2Send)
def deleteFileFromFTP():
for f in dirList:
#print FTP_Server + f
ftp.delete(FTP_Server + f)
print "FTP file deleted: " + FTP_Server + f
print "-" * 70
def createDirectory():
dir = "lego"
ftp.mkd(dir)
print "FTP directory created: " + dir
def deleteDirectoryFromFTP():
ftp.rmd(FTP_Server)
print "FTP directory deleted: " + FTP_Server
print "-" * 70
def uploadFile():
for f in dirList:
file = open(MacOSx2Send + f, "rb")
a = "STOR " + f
ftp.storbinary(a, file)
file.close()
deleteDirectoryFromFTP()
ftp.quit()
|
import unittest
import doctest
from zeam.form.ztk.testing import FunctionalLayer
def test_suite():
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
globs= {}
suite = unittest.TestSuite()
for filename in ['fields.txt', 'validation.txt']:
test = doctest.DocFileSuite(
optionflags=optionflags,
globs=globs)
test.layer = FunctionalLayer
suite.addTest(test)
return suite
|
import random
print("'Spanzuratoarea' cu cifre.")
print("Doriti sa jucati? ")
dorinta = input()
bucla3 = 1
if dorinta == "da":
while bucla3 == 1:
print('Ghiceste prima cifra:')
incercare = 10
nums_0 = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
nums_1 = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
nums_2 = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
x = random.choice(nums_0)
y = random.choice(nums_1)
z = random.choice(nums_2)
print(str(x), str(y), str(z))
print('_ ' * 3)
ghicire_1 = input()
bucla = 1
bucla1 = 1
bucla2 = 1
while bucla == 1:
if ghicire_1 == str(x):
print(str(x) + '_ ' * 2)
print("Ghicste a doua cifra:")
ghicire_2 = input()
while bucla1 == 1:
if ghicire_2 == str(y):
print(str(x) + str(y) + '_')
print("Ghiceste a treia cifra: ")
ghicire_3 = input()
while bucla2 == 1:
if ghicire_3 == str(z):
print(str(x)+str(y)+str(z))
bucla = 0
bucla1 = 0
bucla2 = 0
else:
print("Mai incearca")
incercare = incercare-1
print("Ti-au mai ramas", incercare, "vieti")
if incercare == 0:
bucla = 0
bucla1 = 0
bucla2 = 0
print("Game over")
else:
print("Tasteaza o cifra: ")
ghicire_3 = input()
else:
print("Mai incearca: ")
incercare = incercare-1
print("Ti-au mai ramas", incercare, "vieti")
if incercare == 0:
bucla = 0
bucla1 = 0
bucla2 = 0
print("Game over")
else:
print("Tasteaza o cifra: ")
ghicire_2 = input()
elif ghicire_1 != str(x):
print("Mai incearca")
incercare = incercare-1
print("Ti-au mai ramas", incercare, "vieti")
if incercare == 0:
bucla = 0
bucla1 = 0
bucla2 = 0
print("Game over")
else:
print("Tasteaza o cifra: ")
ghicire_1 = input()
print("Doriti sa mai jucati? ")
dorinta2 = input()
if dorinta2 == "nu":
bucla3 = 0
else:
continue
else:
print("ok")
print("va multumim!")
|
from django.db import models
from ..users.models import User
from ..authors.models import Author
# Create your models here.
class BookManager(models.Manager):
def book_valid(self, postData):
errors = {}
if len(postData['title']) < 1: # null
errors['bad_title'] = "Oops, you forgot to enter the title of the book."
if 'author' not in postData: # null
errors['bad_author'] = "Oops, you forgot to choose an author. If the author is not available, add one."
if len(postData['description']) < 1: # null
errors['no_desc'] = "Oops, you forgot to describe the book."
elif len(postData['description']) < 5: # not long enough
errors['bad_desc'] = "Hmmm, that doesn't tell us much about the book. Description should be a minimum of 5 characters."
return errors
def create_book(self, postData, user_id):
# get the user
user = User.objects.get(id=user_id)
# get the author
author = Author.objects.get(id=postData['author'])
# create the book
book = self.create(title=postData['title'], author=author, description=postData['description'], uploaded_by_id=user)
# add the relationship
book.users_who_like.add(user)
return book
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author, related_name="books")
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
uploaded_by_id = models.ForeignKey(User, related_name="uploaded_books")
users_who_like = models.ManyToManyField(User, related_name="liked_books")
objects = BookManager()
def __repr__(self):
return f'<Book {self.id} - Title: {self.title}>'
|
import itertools
DEFAULT_SIGN = '_'
def get_player(current_player):
if current_player == 'X':
print 'Player 1, your sign is %s: ' % current_player
else:
print 'Player 2, your sign is %s: ' % current_player
def get_field(board, current_player):
while True:
try:
row = int(raw_input("Horisontal Row: ")) - 1
column = int(raw_input("Vertical Col: ")) - 1
if row in range(0, 3) and column in range(0, 3):
while board[row][column] == DEFAULT_SIGN:
field = board[row][column] = current_player
return field
else:
print "Incorrect choice, please try again..."
else:
print "There is no such row or column, please try again"
except(ValueError, NameError, KeyboardInterrupt, UnboundLocalError, TypeError):
print "You can not input characters or white space"
def print_board(board):
for row in board:
print " ".join(row)
def print_winner(current_player):
if current_player == 'X':
print 'Player 1, WON!!!!!!'
else:
print 'Player 2, WON!!!!!!'
def get_winner(board, current_player, attempts):
new_board = []
for a in board:
new_board.extend(a)
'''Combinations to WIN below'''
combinations = [new_board[0:3], new_board[3:6], new_board[6:9], new_board[0:7:3], new_board[1:8:3],
new_board[2:9:3], new_board[::4], new_board[2:8:2]]
for combination in combinations:
if len(set(combination)) == 1 and current_player in combination:
return print_winner(current_player), exit(0)
if len(set(combination)) >= 2 and DEFAULT_SIGN not in combination:
while attempts == 8:
print 'There is no combinations to WIN'
return exit(0)
def main():
sign = ('X', 'O')
board = [['_', '_', '_'],
['_', '_', '_'],
['_', '_', '_']]
attempts = 0
for current_player in itertools.cycle(sign):
get_player(current_player)
get_field(board, current_player)
print_board(board)
attempts += 1
while attempts >= 5:
get_winner(board, current_player, attempts)
break
if __name__ == '__main__':
main()
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.Homepage, name='home'),
path('count/',views.countfunction, name='COUNTIT'), #'count/ is the urlname'
path('about/',views.aboutpage, name='about')
]
|
from pico2d import *
import game_framework
from boy import Boy
# from enum import Enum
# BOYS_COUNT = 1000
class Grass:
def __init__(self):
self.image = load_image('../res/grass.png')
print(self.image)
def draw(self):
self.image.draw(400, 30)
def handle_events():
global boy
global span
events = get_events()
for e in events:
if e.type == SDL_QUIT:
game_framework.quit()
elif (e.type, e.key) == (SDL_KEYDOWN, SDLK_ESCAPE):
game_framework.pop_state()
else:
boy.handle_event(e)
def enter():
global boy, grass
boy = Boy()
grass = Grass()
# def main():
# global running
# enter()
# while running:
# handle_events()
# print(running)
# update()
# draw()
# exit()
def draw():
global grass, boy
clear_canvas()
grass.draw()
boy.draw()
update_canvas()
def update():
boy.update()
delay(0.03)
# fill here
def exit():
pass
if __name__ == '__main__':
import sys
current_module = sys.modules[__name__]
open_canvas()
game_framework.run(current_module)
close_canvas()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#######################
import os
import sys
import indigo
import math
import decimal
import datetime
import socket
import subprocess
from ghpu import GitHubPluginUpdater
class Plugin(indigo.PluginBase):
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.updater = GitHubPluginUpdater(self)
self.apiVersion = "2.0"
self.localAddress = ""
# create empty device list
self.deviceList = {}
def __del__(self):
indigo.PluginBase.__del__(self)
###################################################################
# Plugin
###################################################################
def deviceStartComm(self, device):
self.debugLog(u"Started device: " + device.name)
device.stateListOrDisplayStateIdChanged()
self.addDeviceToList (device)
def deviceStopComm(self,device):
if device.id in self.deviceList:
self.debugLog("Stoping device: " + device.name)
self.deleteDeviceFromList(device)
def deviceCreated(self, device):
self.debugLog(u'Created device "' + device.name)
pass
def addDeviceToList(self,device):
if device:
if device.id not in self.deviceList:
propsAddress = device.pluginProps["address"]
propsAddress = propsAddress.strip()
propsAddress = propsAddress.replace (' ','')
pingNextTime = datetime.datetime.now() - datetime.timedelta(seconds=10)
pingInterval = device.pluginProps["pingInterval"]
self.deviceList[device.id] = {'ref':device, 'address':propsAddress, 'pingInterval':pingInterval, 'pingNextTime': pingNextTime}
def deleteDeviceFromList(self, device):
if device:
if device.id in self.deviceList:
del self.deviceList[device.id]
def startup(self):
self.loadPluginPrefs()
self.debugLog(u"startup called")
self.updater.checkForUpdate()
def shutdown(self):
self.debugLog(u"shutdown called")
def getDeviceConfigUiValues(self, pluginProps, typeId, devId):
valuesDict = pluginProps
errorMsgDict = indigo.Dict()
if "pingInterval" not in valuesDict:
valuesDict["pingInterval"] = 300
return (valuesDict, errorMsgDict)
def validateDeviceConfigUi(self, valuesDict, typeId, devId):
self.debugLog(u"validating device Prefs called")
self.debugLog(u"validating IP Address")
ipAdr = valuesDict[u'address']
if ipAdr.count('.') != 3:
errorMsgDict = indigo.Dict()
errorMsgDict[u'address'] = u"This needs to be a valid IP address."
return (False, valuesDict, errorMsgDict)
if self.validateAddress (ipAdr) == False:
errorMsgDict = indigo.Dict()
errorMsgDict[u'address'] = u"This needs to be a valid IP address."
return (False, valuesDict, errorMsgDict)
pingInterval = valuesDict[u'pingInterval']
try:
iInterval = int (pingInterval)
if iInterval < 1:
errorMsgDict = indigo.Dict()
errorMsgDict[u'pingInterval'] = u"This needs to be > 0."
return False
except Exception, e:
errorMsgDict = indigo.Dict()
errorMsgDict[u'pingInterval'] = u"This needs to be a valid number."
return False
return (True, valuesDict)
def validatePrefsConfigUi(self, valuesDict):
return (True, valuesDict)
def closedDeviceConfigUi(self, valuesDict, userCancelled, typeId, devId):
if userCancelled is False:
indigo.server.log ("Device preferences were updated.")
device = indigo.devices[devId]
self.deleteDeviceFromList(device)
self.addDeviceToList (device)
def closedPrefsConfigUi ( self, valuesDict, UserCancelled):
# If the user saves the preferences, reload the preferences
if UserCancelled is False:
indigo.server.log ("Preferences were updated, reloading Preferences...")
self.loadPluginPrefs()
def loadPluginPrefs(self):
# set debug option
if 'debugEnabled' in self.pluginPrefs:
self.debug = self.pluginPrefs['debugEnabled']
else:
self.debug = False
def validateAddress (self,value):
try:
socket.inet_aton(value)
except socket.error:
return False
return True
###################################################################
# Concurrent Thread.
###################################################################
def runConcurrentThread(self):
self.debugLog(u"Starting Concurrent Thread")
try:
while self.stopThread == False:
indigoDevice = None
try:
todayNow = datetime.datetime.now()
for pingDevice in self.deviceList:
pingNextTime = self.deviceList[pingDevice]['pingNextTime']
if pingNextTime <= todayNow:
pingInterval = self.deviceList[pingDevice]['pingInterval']
pingNextTime = todayNow + datetime.timedelta(seconds=int(pingInterval))
self.deviceList[pingDevice]['pingNextTime'] = pingNextTime
indigoDevice = self.deviceList[pingDevice]['ref']
self.deviceRequestStatus(indigoDevice)
except Exception,e:
self.errorLog (u"Error: " + str(e))
pass
self.sleep(0.300)
except self.StopThread:
pass
except Exception, e:
self.errorLog (u"Error: " + str(e))
pass
def stopConcurrentThread(self):
self.stopThread = True
self.debugLog(u"stopConcurrentThread called")
###################################################################
# Custom Action callbacks
###################################################################
def markForRequestStatus(self,device):
if device.id in self.deviceList:
self.deviceList[device.id]['pingNextTime'] = datetime.datetime.now() - datetime.timedelta(seconds=10)
def deviceRequestStatus(self,dev):
newValue = self.pingDevice(dev)
if not newValue == dev.states['onOffState']:
dev.updateStateOnServer(key='onOffState', value=newValue)
if newValue:
indigo.server.log (dev.name + u" now is up!")
else:
indigo.server.log (dev.name + u" now is down!")
pass
def pingDevice (self,device):
if device.id in self.deviceList:
pingAddress = self.deviceList[device.id]['address']
for x in range(0, 3):
if self.pingAddress(pingAddress):
return True
self.sleep (0.100)
return False
def pingAddress (self, address):
self.debugLog(u"Pinging address " + address + " ...")
try:
ret = 0
ret = subprocess.call(["/sbin/ping", "-c", "4", "-t", "2", address], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
if ret == 0:
self.debugLog(address + u": is reachable.")
return True
else:
if ret == 1:
self.debugLog(address + u": host not found")
elif ret == 2:
self.debugLog(address + u": ping timed out")
self.debugLog(u"Ping result: Failure")
return False
except Exception, e:
return False
def actionControlSensor(self, action, dev):
if action.sensorAction == indigo.kSensorAction.RequestStatus:
indigo.server.log ('sent "' + dev.name + '" status request')
self.markForRequestStatus(dev)
#self.deviceRequestStatus(dev)
########################################
# Actions Methods
########################################
def silentStatusRequest (self, pluginAction, device):
self.markForRequestStatus(device)
#self.deviceRequestStatus (device)
#pass
########################################
# Menu Methods
########################################
def toggleDebugging(self):
if self.debug:
indigo.server.log("Turning off debug logging")
self.pluginPrefs["debugEnabled"] = False
else:
indigo.server.log("Turning on debug logging")
self.pluginPrefs["debugEnabled"] = True
self.debug = not self.debug
return
def checkForUpdates(self):
update = self.updater.checkForUpdate()
if (update != None):
pass
return
def updatePlugin(self):
self.updater.update()
|
class Persona:
_siguiente = 0
def __init__(self, nombre ="Invitado", activo=True):
Persona._siguiente = Persona._siguiente + 1
self.__codigo = Persona._siguiente
self.__nombre = __nombre_Mayuscula(nomb)
self.activo = activo
#prvado con gion bajo
@property
def nombre(self):
return self.__nombre
@nombre.setter
def nombre(self, nom):
self.__nombre = nom
@property
def codigo(self):
return self.__codigo
@codigo.setter
def codigo(self,cod):
self.__codigo = cod
def siguiente(self):
Persona._siguiente = Persona._siguiente + 1
return Persona._siguiente
def __nombre_Mayuscula(self, nomb):
return self.nomb.upper()
def mostrar_datos(self):
return "Codigo:{} - Nombre:{} -Activo:{}".format(self.codigo, self.nombre, self.activo)
class Empleado(Persona):
def __init__(self,)
per1 = Persona()
print(per1.codigo)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 15:14:17 2019
@author: KelvinOX25
"""
import pyvisa
import time
import logging
import numpy as np
import struct
from qcodes import VisaInstrument, validators as vals
class Keithley_6220(VisaInstrument):
def __init__(self, name, address, **kw):
super().__init__(name, address, **kw)
self.add_parameter('I',
label='Current',
unit = 'A',
get_cmd = 'SOUR:CURR?',
set_cmd = 'SOUR:CURR:AMPL '+'{}',
vals=vals.Numbers(-10e-6, 10e-6),
set_parser=float,
get_parser=float)
self.add_parameter('Irange',
label='Current',
unit = 'A',
get_cmd = 'SOUR:CURR:RANG ?',
set_cmd = 'SOUR:CURR:RANG '+'{}',
vals=vals.Numbers(1e-9, 10e-6),
set_parser=float,
get_parser=float)
def init(self):
lines = ['*RST;',
'SOUR:CURR:RANG:AUTO OFF',
'SOUR:CURR:RANG 1e-6',
'SOUR:CURR:COMP 21',
'SOUR:CURR:AMPL 0',
'OUTP ON']
for l in lines:
self.write_raw(l)
time.sleep(0.2)
def set_R_Attn( self, R_bias, Attn ):
pass
##Testing our codes
#from qcodes.instrument.base import Instrument
#try:
# Instrument.close_all()
#except KeyError:
# pass
#except NameError:
# pass
#
#gen = Tektronix_AWG3252('gen', 'TCPIP0::192.168.13.32::inst0::INSTR')
#gen.init()
#gen.V.set(0.324)
|
import pandas as pd
from utils import Example, Indexer, pos
import random
def spooky_authorship_data(args, test_split=0.7, max_char_length=500, postags=False):
with open("data/spooky-authorship/train.csv", encoding='utf-8') as f:
train_df = pd.read_csv(f)
train_df.applymap(lambda s: s[1:-1] if s.startswith("\"") else s)
print("Spooky Authorship Dataset:")
print(" ", "authors: ", set(train_df["author"]))
if postags or args.train_options == "POS":
examples = [Example(pos(passage), author) for passage, author in zip(train_df["text"], train_df["author"]) if len(passage) <= max_char_length]
else:
examples = [Example(passage, author, id) for passage, author, id in zip(train_df["text"], train_df["author"], train_df["id"]) if len(passage) <= max_char_length]
random.shuffle(examples)
if args.kaggle:
train_exs = examples
with open("data/spooky-authorship/test.csv") as f:
test_df = pd.read_csv(f)
test_df.applymap(lambda s: s[1:-1] if s.startswith("\"") else s)
test_exs = [Example(passage, "<UNK>", id) for passage, id in zip(test_df["text"], test_df["id"])]
else:
test_idx = int(test_split * len(examples))
train_exs = examples[:test_idx]
test_exs = examples[test_idx:]
authors = Indexer()
for author in set(train_df["author"]):
authors.get_index(author)
return train_exs, test_exs, authors
|
import os
from managers.core import CoreManager
import config
os.system("export GOOGLE_APPLICATION_CREDENTIALS='service_account.json'")
core_manager = CoreManager(config.params)
core_manager.translate()
|
import mysql.connector
from mysql.connector import Error
import xlsxwriter
def insert_profile(term,gre_date,work_experience_months,ug_score,ug_score_pattern,en_exam_pattern,en_exam_score,
gre_score,status,course_name,university_name,gre_verbal_score,gre_quant_score,
gre_awa_score,en_exam_writing_score,en_exam_listening_score,
en_exam_reading_score,en_exam_speaking_score,nickname):
term = str(term)
gre_date = str(gre_date)
work_experience_months = float(work_experience_months)
ug_score = float(ug_score)
ug_score_pattern = str(ug_score_pattern)
en_exam_pattern = str(en_exam_pattern)
en_exam_score = float(en_exam_score)
gre_score = int(gre_score)
status = str(status)
course_name = str(course_name)
university_name = str(university_name)
gre_verbal_score = int(gre_verbal_score)
gre_quant_score = int(gre_quant_score)
gre_awa_score = float(gre_awa_score)
en_exam_writing_score = int(en_exam_writing_score)
en_exam_listening_score = int(en_exam_listening_score)
en_exam_reading_score = int(en_exam_reading_score)
en_exam_speaking_score = int(en_exam_speaking_score)
nickname = str(nickname)
emp_data= dict(term=term, gre_date=gre_date, work_experience_months=work_experience_months, ug_score=ug_score,
ug_score_pattern=ug_score_pattern, en_exam_pattern=en_exam_pattern, en_exam_score=en_exam_score,
gre_score=gre_score, status=status, course_name=course_name, university_name=university_name,
gre_verbal_score=gre_verbal_score, gre_quant_score=gre_quant_score, gre_awa_score=gre_awa_score,
en_exam_writing_score=en_exam_writing_score, en_exam_listening_score=en_exam_listening_score,
en_exam_reading_score=en_exam_reading_score, en_exam_speaking_score=en_exam_speaking_score,
nickname=nickname)
print emp_data
# try:
# pass
#
# conn = mysql.connector.connect(host='localhost',
# database='yocket_db',
# user='root',
# password='root')
# cursor = conn.cursor()
#
# cursor.execute('INSERT INTO yocket_db.profiles VALUES("%s","%s",%d,%d,"%s","%s",%d,%d,"%s","%s","%s",%d,%d,%d,%d,%d,%d,%d,%s)'%
# (term,
# gre_date,
# work_experience_months,
# ug_score,ug_score_pattern,en_exam_pattern,
# en_exam_score,gre_score,status,course_name,university_name,gre_verbal_score,gre_quant_score,gre_awa_score,en_exam_writing_score,en_exam_listening_score,en_exam_reading_score,en_exam_speaking_score,nickname))
# conn.commit()
# except Error as error:
# print(error)
|
import argparse
import webbrowser
import colorama
from colorama import Fore
from . import scraper
def print_data(data) :
for num, i in enumerate(data, start=1) :
title = Fore.BLUE + i["title"]
store = Fore.YELLOW + i["store"]
print(f"{num}. {title} - {store}")
def open_browser(master_list, inputs_list) :
inputs_list = inputs_list.replace(" ", "").split(",") # remove all spaces then split all choices
for num_choice in inputs_list :
if "-" in num_choice :
start, end = num_choice.split("-")
slicing = slice(int(start)-1, int(end))
sliced_games = master_list[slicing]
[webbrowser.open_new_tab(game["url"]) for game in sliced_games]
else :
webbrowser.open_new_tab(master_list[int(num_choice) - 1]["url"])
def main() :
colorama.init(autoreset=True)
parser = argparse.ArgumentParser(description="Scrape and show limited free games")
parser.add_argument("-v", "--view-only", action="store_true", help="List the games without waiting for input to open website")
parser.add_argument("-tp", "--twitch-prime",action="store_true", help="Show twitch prime/prime gaming limited free games")
args = parser.parse_args()
free_games = scraper.extract()
free_games = sorted(free_games, key=lambda x : x["store"])
if args.twitch_prime == False :
free_games = [i for i in free_games if i["store"] != "amazon.com"]
print_data(free_games)
if args.view_only == False:
input_choice = input("\nChoose game(s) :")
open_browser(free_games, input_choice)
if __name__ == "__main__":
main()
|
from unittest import TestLoader, TestSuite
from pyunitreport import HTMLTestRunner
from assertion import AssertionsTest
from searchtest import SearchTests
assertionTest = TestLoader().loadTestsFromTestCase(AssertionsTest)
searchTest = TestLoader().loadTestsFromTestCase(SearchTests)
smokeTest = TestSuite([assertionTest, searchTest])
kwargs = {
"output": 'smoke-report'
}
runner = HTMLTestRunner(**kwargs)
runner.run(smokeTest)
|
# -*- coding: UTF-8 -*-
# 如果制定了当前编码为utf8 编码,则该文件中的汉字都是byte string格式。
# 在函数中取数来的字符串一般是unicode string格式
# 文件操作的步骤:打开文件-操作文件-关闭文件
import io
# 读取文件的两种方法
# 1, 拿到文件描述器,将文件加入到内存中,适合小文件 f.readLines(),在生产环境中尽量少用readLines()方法
f = io.open("onePoem", "r", encoding="utf-8")
for i in f.readlines():
print(i.strip())
# 2,拿到文件描述器,然后获取文件迭代器,此时在读取的时候会利用迭代器一部分一部分读取文件,对于大文件也不会太消耗内存
for j in f:
print(j.strip())
# s = u"你好"
# print(s)
# s2 = s.encode("UTF-8") # 将utf8(此时utf8编码的字符要是unicode string格式)编码转换成unicode编码
# print(s2)
f = io.open("onePoem", "r", encoding="utf-8")
# # 文件读写格式:
#
# data = f.read() # 此时会把文件的所有内容都读到内存中
for i in f:
print(i.strip())
# f.write("hello,world".encode("utf-8"))
# # f.read([number]) number : 表示从读取到的文件内容中拿number个数个字符,python中,一个中文和一个英文都是占一个字符
# print(data)
f.close()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 16 16:29:58 2021
@author: chanchanchan
"""
#Variables in Bender Element Analyisis:
#Fast Fourior Transform:
#Input Signal
Input_Signal_kHz = 3
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import DissertationPlotwithDataMain as main
change_in_frequency=[]
for frequency in np.arange(0, 2046, 1):
change_in_time = 0.000001
n = 2048
k = 1/n/change_in_time
change_in_frequency.append(frequency*k*0.001)
#FFT Graphs variables
#3
df_data3 = pd.DataFrame({'x':main.data3_input_new, 'y':main.data3_output_new})
fft_data3_input = np.fft.fft(df_data3['x']) #S1-F1
fft_data3_output_pre = [0] +(np.fft.fft(df_data3['y'])).tolist()
del fft_data3_output_pre[1]
fft_data3_output = np.array(fft_data3_output_pre)
mag_fft_data3_input = np.abs(fft_data3_input)
mag_fft_data3_output = np.abs(fft_data3_output)
#4
df_data4 = pd.DataFrame({'x':main.data4_input_new, 'y':main.data4_output_new})
fft_data4_input = np.fft.fft(df_data4['x'])
fft_data4_output_pre = [0] + (np.fft.fft(df_data4['y'])).tolist() #turn to list to set the first value to 0
del fft_data4_output_pre[1]
fft_data4_output = np.array(fft_data4_output_pre)
mag_fft_data4_input = np.abs(fft_data4_input)
mag_fft_data4_output = np.abs(fft_data4_output)
#5
df_data5 = pd.DataFrame({'x':main.data5_input_new, 'y':main.data5_output_new})
fft_data5_input = np.fft.fft(df_data5['x']) #S1-F1
fft_data5_output_pre = [0] +(np.fft.fft(df_data5['y'])).tolist()
del fft_data5_output_pre[1]
fft_data5_output = np.array(fft_data5_output_pre)
mag_fft_data5_input = np.abs(fft_data5_input)
mag_fft_data5_output = np.abs(fft_data5_output)
#6
df_data6 = pd.DataFrame({'x':main.data6_input_new, 'y':main.data6_output_new})
fft_data6_input = np.fft.fft(df_data6['x']) #S1-F1
fft_data6_output_pre = [0] +(np.fft.fft(df_data6['y'])).tolist()
del fft_data6_output_pre[1]
fft_data6_output = np.array(fft_data6_output_pre)
mag_fft_data6_input = np.abs(fft_data6_input)
mag_fft_data6_output = np.abs(fft_data6_output)
#7
df_data7 = pd.DataFrame({'x':main.data7_input_new, 'y':main.data7_output_new})
fft_data7_input = np.fft.fft(df_data7['x']) #S1-F1
fft_data7_output_pre = [0] +(np.fft.fft(df_data7['y'])).tolist()
del fft_data7_output_pre[1]
fft_data7_output = np.array(fft_data7_output_pre)
mag_fft_data7_input = np.abs(fft_data7_input)
mag_fft_data7_output = np.abs(fft_data7_output)
#Stacked phase variables
#3 output
real_fft_data3out = fft_data3_output.real
imag_fft_data3out = fft_data3_output.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data3out, imag_fft_data3out):
TangentI = np.arctan(imag_data/real_data)
np.seterr(invalid='ignore')
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
deg3_phaseout = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj3_1 = []
for deg in deg3_phaseout:
if deg > 0:
adj3_1.append(deg)
elif deg <= 0:
adj3_1.append(deg+360)
adj3_2 = []
for deg in deg3_phaseout:
if deg > 0:
adj3_2.append(deg)
elif deg < 0:
adj3_2.append(deg+360)
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap3 = []
for adj_a, adj_b in zip(adj3_1, adj3_2):
if adj_b > adj_a:
unwrap3.append(adj_a+(360-adj_b))
else:
unwrap3.append(adj_a-adj_b)
unwrap3.pop()
stacked_phase3_pre = [0] + unwrap3
#cumulative summation for staced_phase
stacked_phase3out = np.cumsum(stacked_phase3_pre)
#3 input
real_fft_data3in = fft_data3_input.real
imag_fft_data3in = fft_data3_input.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data3in, imag_fft_data3in):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
np.seterr(invalid='ignore')
deg3_phasein = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj3_1 = []
for deg in deg3_phasein:
if deg > 0:
adj3_1.append(deg)
elif deg <= 0:
adj3_1.append(deg+360)
del adj3_1[0]
adj3_1 = [360] + adj3_1
adj3_2 = []
for deg in deg3_phasein:
if deg > 0:
adj3_2.append(deg)
elif deg < 0:
adj3_2.append(deg+360)
del adj3_2[0] #for plotting with the same length
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap3 = []
for adj_a, adj_b in zip(adj3_1, adj3_2):
if adj_b > adj_a:
unwrap3.append(adj_a+(360-adj_b))
else:
unwrap3.append(adj_a-adj_b)
stacked_phase3_pre = [0] + unwrap3
#cumulative summation for staced_phase
stacked_phase3in = np.cumsum(stacked_phase3_pre)
#4output
real_fft_data4out = fft_data4_output.real
imag_fft_data4out = fft_data4_output.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data4out, imag_fft_data4out):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
np.seterr(invalid='ignore')
deg4_phaseout = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj4_1 = []
for deg in deg4_phaseout:
if deg > 0:
adj4_1.append(deg)
elif deg <= 0:
adj4_1.append(deg+360)
adj4_2 = []
for deg in deg4_phaseout:
if deg > 0:
adj4_2.append(deg)
elif deg < 0:
adj4_2.append(deg+360)
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap4 = []
for adj_a, adj_b in zip(adj4_1, adj4_2):
if adj_b > adj_a:
unwrap4.append(adj_a+(360-adj_b))
else:
unwrap4.append(adj_a-adj_b)
unwrap4.pop()
stacked_phase4_pre = [0] + unwrap4
#cumulative summation for staced_phase
stacked_phase4out = np.cumsum(stacked_phase4_pre)
#4input
real_fft_data4in = fft_data4_input.real
imag_fft_data4in = fft_data4_input.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data4in, imag_fft_data4in):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
deg4_phasein = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj4_1 = []
for deg in deg4_phasein:
if deg > 0:
adj4_1.append(deg)
elif deg <= 0:
adj4_1.append(deg+360)
del adj4_1[0]
adj4_1 = [360] + adj4_1
adj4_2 = []
for deg in deg4_phasein:
if deg > 0:
adj4_2.append(deg)
elif deg < 0:
adj4_2.append(deg+360)
del adj4_2[0]
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap4 = []
for adj_a, adj_b in zip(adj4_1, adj4_2):
if adj_b > adj_a:
unwrap4.append(adj_a+(360-adj_b))
else:
unwrap4.append(adj_a-adj_b)
stacked_phase4_pre = [0] + unwrap4
#cumulative summation for staced_phase
stacked_phase4in = np.cumsum(stacked_phase4_pre)
#5output
real_fft_data5out = fft_data5_output.real
imag_fft_data5out = fft_data5_output.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data5out, imag_fft_data5out):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
np.seterr(invalid='ignore')
deg5_phaseout = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj5_1 = []
for deg in deg5_phaseout:
if deg > 0:
adj5_1.append(deg)
elif deg <= 0:
adj5_1.append(deg+360)
adj5_2 = []
for deg in deg5_phaseout:
if deg > 0:
adj5_2.append(deg)
elif deg < 0:
adj5_2.append(deg+360)
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap5 = []
for adj_a, adj_b in zip(adj5_1, adj5_2):
if adj_b > adj_a:
unwrap5.append(adj_a+(360-adj_b))
else:
unwrap5.append(adj_a-adj_b)
unwrap5.pop()
stacked_phase5_pre = [0] + unwrap5
#cumulative summation for staced_phase
stacked_phase5out = np.cumsum(stacked_phase5_pre)
#5input
real_fft_data5in = fft_data5_input.real
imag_fft_data5in = fft_data5_input.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data5in, imag_fft_data5in):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
deg5_phasein = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj5_1 = []
for deg in deg5_phasein:
if deg > 0:
adj5_1.append(deg)
elif deg <= 0:
adj5_1.append(deg+360)
del adj5_1[0]
adj5_1 = [360] + adj5_1
adj5_2 = []
for deg in deg5_phasein:
if deg > 0:
adj5_2.append(deg)
elif deg < 0:
adj5_2.append(deg+360)
del adj5_2[0]
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap5 = []
for adj_a, adj_b in zip(adj5_1, adj5_2):
if adj_b > adj_a:
unwrap5.append(adj_a+(360-adj_b))
else:
unwrap5.append(adj_a-adj_b)
stacked_phase5_pre = [0] + unwrap5
#cumulative summation for staced_phase
stacked_phase5in = np.cumsum(stacked_phase5_pre)
#6output
real_fft_data6out = fft_data6_output.real
imag_fft_data6out = fft_data6_output.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data6out, imag_fft_data6out):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
deg6_phaseout = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj6_1 = []
for deg in deg6_phaseout:
if deg > 0:
adj6_1.append(deg)
elif deg <= 0:
adj6_1.append(deg+360)
adj6_2 = []
for deg in deg6_phaseout:
if deg > 0:
adj6_2.append(deg)
elif deg < 0:
adj6_2.append(deg+360)
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap6 = []
for adj_a, adj_b in zip(adj6_1, adj6_2):
if adj_b > adj_a:
unwrap6.append(adj_a+(360-adj_b))
else:
unwrap6.append(adj_a-adj_b)
unwrap6.pop()
stacked_phase6_pre = [0] + unwrap6
#cumulative summation for staced_phase
stacked_phase6out = np.cumsum(stacked_phase6_pre)
#6input
real_fft_data6in = fft_data6_input.real
imag_fft_data6in = fft_data6_input.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data6in, imag_fft_data6in):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
deg6_phasein = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj6_1 = []
for deg in deg6_phasein:
if deg > 0:
adj6_1.append(deg)
elif deg <= 0:
adj6_1.append(deg+360)
del adj6_1[0]
adj6_1 = [360] + adj6_1
adj6_2 = []
for deg in deg6_phasein:
if deg > 0:
adj6_2.append(deg)
elif deg < 0:
adj6_2.append(deg+360)
del adj6_2[0]
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap6 = []
for adj_a, adj_b in zip(adj6_1, adj6_2):
if adj_b > adj_a:
unwrap6.append(adj_a+(360-adj_b))
else:
unwrap6.append(adj_a-adj_b)
stacked_phase6_pre = [0] + unwrap6
#cumulative summation for staced_phase
stacked_phase6in = np.cumsum(stacked_phase6_pre)
#7output
real_fft_data7out = fft_data7_output.real
imag_fft_data7out = fft_data7_output.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data7out, imag_fft_data7out):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
deg7_phaseout = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj7_1 = []
for deg in deg7_phaseout:
if deg > 0:
adj7_1.append(deg)
elif deg <= 0:
adj7_1.append(deg+360)
adj7_2 = []
for deg in deg7_phaseout:
if deg > 0:
adj7_2.append(deg)
elif deg < 0:
adj7_2.append(deg+360)
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap7 = []
for adj_a, adj_b in zip(adj7_1, adj7_2):
if adj_b > adj_a:
unwrap7.append(adj_a+(360-adj_b))
else:
unwrap7.append(adj_a-adj_b)
unwrap7.pop()
stacked_phase7_pre = [0] + unwrap7
#cumulative summation for staced_phase
stacked_phase7out = np.cumsum(stacked_phase7_pre)
#7input
real_fft_data7in = fft_data7_input.real
imag_fft_data7in = fft_data7_input.imag
PIvalue = 4*np.arctan(1)
phase = []
for real_data, imag_data in zip(real_fft_data7in, imag_fft_data7in):
TangentI = np.arctan(imag_data/real_data)
if real_data > 0 and imag_data == 0:
phase.append(0)
elif real_data > 0 and imag_data > 0:
phase.append(TangentI)
elif real_data > 0 and imag_data < 0:
phase.append(TangentI + 2*PIvalue)
elif real_data == 0 and imag_data == 0:
phase.append(0)
elif real_data == 0 and imag_data > 0:
phase.append(PIvalue/2)
elif real_data == 0 and imag_data < 0:
phase.append(3*PIvalue/2)
elif real_data < 0 and imag_data == 0:
phase.append(PIvalue)
elif real_data< 0 and imag_data > 0:
phase.append(TangentI + PIvalue)
elif real_data< 0 and imag_data < 0:
phase.append(TangentI + PIvalue )
deg7_phasein = np.degrees(phase)
#adj_1 and adj_2 created for the summation of unwrap
adj7_1 = []
for deg in deg7_phasein:
if deg > 0:
adj7_1.append(deg)
elif deg <= 0:
adj7_1.append(deg+360)
del adj7_1[0]
adj7_1 = [360] + adj7_1
adj7_2 = []
for deg in deg7_phasein:
if deg > 0:
adj7_2.append(deg)
elif deg < 0:
adj7_2.append(deg+360)
del adj7_2[0]
#unwrap and stacked_phase_pre created for the summation of stacked_phase_d
unwrap7 = []
for adj_a, adj_b in zip(adj7_1, adj7_2):
if adj_b > adj_a:
unwrap7.append(adj_a+(360-adj_b))
else:
unwrap7.append(adj_a-adj_b)
stacked_phase7_pre = [0] + unwrap7
#cumulative summation for staced_phase
stacked_phase7in = np.cumsum(stacked_phase7_pre)
if __name__ == "__main__" :
plt.xlabel('Frequency (kHz)')
plt.xlim([0, 20])
if __name__ == "__main__" :
if Input_Signal_kHz == 3:
plt.plot(change_in_frequency, mag_fft_data3_input, label = '3kHz')
plt.title('Fast Fourier Transform (Input)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.ylim([0, 700])
plt.show()
plt.plot(change_in_frequency, mag_fft_data3_output, label = '3kHz')
plt.title('Fast Fourier Transform (Output)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.xlim([0, 20])
plt.ylim([0, 700])
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.title('Stacked Phase')
plt.ylabel('Stacked Phase (Degree)')
plt.xlabel('Frequency (kHz)')
plt.ylim([0, 8000])
plt.xlim([0, 20])
plt.scatter(change_in_frequency, stacked_phase3out, label = '3kHz')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
elif Input_Signal_kHz == 4:
plt.plot(change_in_frequency, mag_fft_data4_input, label = '4kHz')
plt.title('Fast Fourier Transform (Input)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.plot(change_in_frequency, mag_fft_data4_output, label = '4kHz')
plt.title('Fast Fourier Transform (Output)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.xlim([0, 20])
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.title('Stacked Phase')
plt.ylabel('Stacked Phase (Degree)')
plt.xlabel('Frequency (kHz)')
plt.ylim([0, 8000])
plt.xlim([0, 20])
plt.scatter(change_in_frequency, stacked_phase4out, label = '4kHz')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
elif Input_Signal_kHz == 5:
plt.plot(change_in_frequency, mag_fft_data5_input, label = '5kHz')
plt.title('Fast Fourier Transform (Input)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.plot(change_in_frequency, mag_fft_data5_output, label = '5kHz')
plt.title('Fast Fourier Transform (Output)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.xlim([0, 20])
plt.ylim([0, 700])
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.title('Stacked Phase')
plt.ylabel('Stacked Phase (Degree)')
plt.xlabel('Frequency (kHz)')
plt.ylim([0, 8000])
plt.xlim([0, 20])
plt.scatter(change_in_frequency, stacked_phase5out, label = '5kHz')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
elif Input_Signal_kHz == 6:
plt.plot(change_in_frequency, mag_fft_data6_input, label = '6kHz')
plt.title('Fast Fourier Transform (Input)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.plot(change_in_frequency, mag_fft_data6_output, label = '6kHz')
plt.title('Fast Fourier Transform (Output)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.xlim([0, 20])
plt.ylim([0, 700])
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.title('Stacked Phase')
plt.ylabel('Stacked Phase (Degree)')
plt.xlabel('Frequency (kHz)')
plt.ylim([0, 8000])
plt.xlim([0, 20])
plt.scatter(change_in_frequency, stacked_phase6out, label = '6kHz')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
elif Input_Signal_kHz == 7:
plt.plot(change_in_frequency, mag_fft_data7_input, label = '7kHz')
plt.title('Fast Fourier Transform (Input)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.plot(change_in_frequency, mag_fft_data7_output, label = '7kHz')
plt.title('Fast Fourier Transform (Output)')
plt.ylabel('Magnitude (Arbitary Units)')
plt.xlim([0, 20])
plt.ylim([0, 700])
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
plt.title('Stacked Phase')
plt.ylabel('Stacked Phase (Degree)')
plt.xlabel('Frequency (kHz)')
plt.ylim([0, 8000])
plt.xlim([0, 20])
plt.scatter(change_in_frequency, stacked_phase7out, label = '7kHz')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left' )
plt.show()
|
from datetime import datetime
from db_config import db, ma
class PerformanceAnalysis(db.Model):
__tablename__ = "performance_analysis"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
start_date = db.Column(db.DateTime, nullable=False, default=datetime.now)
end_date = db.Column(db.DateTime, nullable=False, default=datetime.now)
region = db.Column(db.String(32))
site_name = db.Column(db.String(32))
probe = db.Column(db.String(32))
app_type = db.Column(db.String(32))
timestamp = db.Column(
db.DateTime, default=datetime.now, onupdate=datetime.utcnow
)
class DeviceManagement(db.Model):
__tablename__ = "device_management"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
probe_name = db.Column(db.String(32))
region = db.Column(db.String(32))
site_name = db.Column(db.String(32))
mobile_technology = db.Column(db.String(32))
mobile_model = db.Column(db.String(32))
cordinates = db.Column(db.String(32))
date_of_installation = db.Column(db.DateTime, nullable=False, default=datetime.now)
device_id = db.Column(db.String(32))
mobile_number = db.Column(db.String(32))
mobile_os = db.Column(db.String(32))
current_version = db.Column(db.String(32))
update = db.Column(db.String(32))
remote_management = db.Column(db.String(32))
timestamp = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.utcnow)
class FaultManagement(db.Model):
__tablename__ = "fault_management"
fault_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
time = db.Column(db.DateTime, nullable=False, default=datetime.now)
probe_id = db.Column(db.String(32))
region = db.Column(db.String(32))
site_name = db.Column(db.String(32))
ip = db.Column(db.String(32))
fault_description = db.Column(db.String(100))
current_version = db.Column(db.String(32))
status = db.Column(db.String(32))
ack_by = db.Column(db.String(32))
ack_time = db.Column(db.DateTime, nullable=False, default=datetime.now)
class PerformanceAnalysisSchema(ma.Schema):
class Meta:
fields = ('id', 'start_date', 'end_date', 'region', 'site_name', 'probe', 'app_type', 'timestamp')
class DeviceManagementSchema(ma.Schema):
class Meta:
fields = ('id', 'probe_name', 'region', 'site_name', 'mobile_technology', 'mobile_model', 'cordinates',
'date_of_installation', 'device_id', 'mobile_number', 'mobile_os',
'current_version', 'update', 'remote_management', 'timestamp')
class FaultManagementSchema(ma.Schema):
class Meta:
fields = ('fault_id', 'time', 'probe_id', 'region', 'site_name', 'ip', 'fault_description', 'current_version',
'status', 'ack_by', 'ack_time')
class FaultManagementSchema(ma.Schema):
class Meta:
fields = ('fault_id', 'time', 'probe_id', 'region', 'site_name', 'ip', 'fault_description', 'current_version',
'status', 'ack_by', 'ack_time')
|
import requests
def _url(path):
return 'http://104.199.18.3:8080'+path
def get_containers() :
return requests.get(_url('/containers'))
def get_containers_running() :
return requests.get(_url('/containers?state=running'))
def get_services():
return requests.get(_url('/services'))
def get_nodes():
return requests.get(_url('/nodes'))
def get_images():
return requests.get(_url('/images'))
def get_container(con_id):
return requests.get(_url('/containers/{:s}'.format(con_id)))
def get_container_log(con_id):
return requests.get(_url('containers/{:s}/logs'.format(con_id)))
def delete_containers():
return requests.delete(_url('/containers'))
def delete_container(con_id):
return requests.delete(_url('/containers/{:s}'.format(con_id)))
def delete_images():
return requests.delete(_url('/images'))
def delete_image(i_id):
return requests.delete(_url('/images/{:s}'.format(i_id)))
if __name__ == "__main__":
print(get_containers())
print(get_containers_running())
print(get_services())
print(get_nodes())
print(get_images())
print(get_container("0cf4b3f313f4"))
#print(get_container_log("b9babde8c4db"))
print(delete_containers())
print(delete_container("0cf4b3f313f4"))
print(delete_images())
print(delete_image("9e7424e5dbae"))
|
import datetime
import random
def wishMe():
hour = int(datetime.datetime.now().hour)
wish = ""
if hour >= 0 and hour < 12:
wish = "Good Morning!"
elif hour >= 12 and hour < 18:
wish = "Good Afternoon!"
else:
wish = "Good Evening!"
return f"{wish} I am Shaktiman, How can I help you?"
statements = [
wishMe(),
"Hola! Welcome back. Please let me know how can I help You.",
"Good to see you! Please tell me if I can help you.",
"Hello! please say what you need, I will try to help you as much as I can.",
]
def greetings():
return random.choice(statements)
|
import urllib
from django.contrib.auth import authenticate, login
from rest_framework import serializers, status
from rest_framework.authentication import BasicAuthentication
from rest_framework.response import Response
import json
from edtech.apis.api_error_response import APIErrorResponse
from edtech.apis.edtech_api import EdtechAPI
from edtech.models.test_series import TestSeries
from django.contrib.auth import get_user_model
class _LoginRequestDTO(serializers.Serializer):
username = serializers.CharField(max_length=100)
password = serializers.CharField(max_length=100)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
class SignupAPI(EdtechAPI):
authentication_classes = (BasicAuthentication,)
def options(self, request, *args, **kwargs):
return APIErrorResponse.method_not_allowed()
def post(self, request):
lr_dto = _LoginRequestDTO(data=request.data)
if not lr_dto.is_valid():
return Response(lr_dto.errors, status.HTTP_400_BAD_REQUEST)
lr_data = lr_dto.data
user = authenticate(username=lr_data['username'], password=lr_data['password'])
if user is None:
try:
user = get_user_model().objects.create_user(username=lr_data['username'], password=lr_data['username'], email=lr_data['username'])
except Exception as e:
return APIErrorResponse.user_already_exist(lr_data['username'])
if user is not None:
user.is_staff = True
# save user properties in sqlite auth_user table.
user.save()
login(request, user)
response = Response({
'status': 'success',
'msg': 'User logged in successfully',
'userId': user.id,
'email': user.email,
})
authenticated_user_cookie_map = {
"userId": user.id,
"email": user.email,
}
# request.session['test_series_id'] = TestSeries.objects.order_by('?').first().id
# request.session['question_no'] = 1
response.set_cookie('authenticated_user', urllib.quote(json.dumps(authenticated_user_cookie_map)))
return response
else:
return APIErrorResponse.user_already_exist(lr_data['username'])
|
from datetime import datetime
from income import Income
from expense import Expense
class MoneyTracker:
def __init__(self, aggregated_object):
self.aggregated_object = aggregated_object
def show_expenses_for_date(self, date, expenses):
for expense in expenses:
if expense.date == date:
print("{}, {}, New Expense".format(expense.amount, expense.name))
def show_incomes_for_date(self, date, incomes):
for income in incomes:
if income.date == date:
print("{}, {}, New Income".format(income.amount, income.name))
def show_data_for_date(self, date):
print("=== {} ===".format(date))
self.show_incomes_for_date(date, self.aggregated_object.incomes_list)
self.show_expenses_for_date(date, self.aggregated_object.expenses_list)
def show_all_data(self):
for date in self.aggregated_object.dates_list:
self.show_data_for_date(date)
def show_data_for_specific_date(self):
while True:
try:
date = str(input("Enter date in the following format 'dd-mm-yyyy'\n>>>>: "))
if date not in self.aggregated_object.dates_list:
raise Exception
else:
break
except Exception:
print("Invalid date or format!")
self.show_data_for_date(date)
def generate_expenses_ordered_by_category(self):
ordered_by_category = {}
for expense in self.aggregated_object.expenses_list:
if expense.name not in ordered_by_category.keys():
ordered_by_category[expense.name] = []
ordered_by_category[expense.name].append(expense)
return ordered_by_category
def show_expenses_ordered_by_category(self):
ordered_by_category = self.generate_expenses_ordered_by_category()
for category, records in sorted(ordered_by_category.items()):
print("=== {} ===".format(category))
for record in records:
print("{} {}".format(record.amount, record.date))
def gather_info(self, category_type='income'):
while True:
try:
amount = float(input('New {} amount: '.format(category_type)))
if amount <= 0:
raise ValueError
break
except TypeError:
print("The amount must be integer or decimal!")
except ValueError:
print("The amount must be positive number!")
category_name = str(input('New {} type: '.format(category_type)))
while True:
try:
date = input("New {} date in the following format 'dd-mm-yyyy': ".format(category_type))
datetime.strptime(date, "%d-%m-%Y")
break
except ValueError:
print('Invalid date or format!')
return (amount, category_name, date)
def add_income(self, username):
amount, category_name, date = self.gather_info(category_type='income')
new_income = Income(amount, category_name, date)
self.aggregated_object.incomes_list.append(new_income)
if date not in self.aggregated_object.dates_list:
self.aggregated_object.dates_list.append(date)
self.aggregated_object.save_changes_to_file(username)
def add_expense(self, username):
amount, category_name, date = self.gather_info(category_type='expense')
new_expense = Expense(amount, category_name, date)
self.aggregated_object.expenses_list.append(new_expense)
if date not in self.aggregated_object.dates_list:
self.aggregated_object.dates_list.append(date)
self.aggregated_object.save_changes_to_file(username)
if __name__ == '__main__':
main()
|
from redis import Redis
redis_connection = Redis()
key = "some-key"
value = "some-val"
redis_connection.set(key, value)
print(redis_connection.get(key))
|
'''
Created on Feb 12, 2013
@author: Christian M Brodbeck
'''
from itertools import izip
import os
from mne.label import _get_annot_fname
from mne.utils import get_subjects_dir
from nibabel.freesurfer import read_annot, write_annot
def fix_annot_names(subject, parc, clean_subject=None, clean_parc=None,
hemi='both', subjects_dir=None):
"""Fix for Freesurfer's mri_surf2surf corrupting label names in annot files
Notes
-----
Requires nibabel > 1.3.0 for annot file I/O
"""
# process args
subjects_dir = get_subjects_dir(subjects_dir)
if clean_subject is None:
clean_subject = subject
if clean_parc is None:
clean_parc = parc
fpaths, hemis = _get_annot_fname(None, subject, hemi, parc, subjects_dir)
clean_fpaths, _ = _get_annot_fname(None, clean_subject, hemi, clean_parc,
subjects_dir)
for fpath, clean_fpath, hemi in izip(fpaths, clean_fpaths, hemis):
labels, ctab, names = read_annot(fpath)
_, _, clean_names = read_annot(clean_fpath)
if all(n == nc for n, nc in izip(names, clean_names)):
continue
if len(clean_names) != len(names):
err = ("Different names in %s annot files: %s vs. "
"%s" % (hemi, str(names), str(clean_names)))
raise ValueError(err)
for clean_name, name in izip(clean_names, names):
if not name.startswith(clean_name):
err = "%s does not start with %s" % (str(name), clean_name)
raise ValueError(err)
write_annot(fpath, labels, ctab, clean_names)
def is_fake_mri(mri_dir):
"""Check whether a directory is a fake MRI subject directory
Parameters
----------
mri_dir : str(path)
Path to a directory.
Returns
-------
True is `mri_dir` is a fake MRI directory.
"""
items = os.listdir(mri_dir)
# need to contain:
nc = [c for c in ['bem', 'surf'] if c not in items]
# does not contain:
c = [c for c in ['mri', 'src', 'stats'] if c in items]
if c or nc:
return False
else:
return True
|
age = 1
if age > 0 and age != 1:
print 'abc'
elif age == 1:
print 1
else:
print 'ddd'
arr = [1, 2, 3]
for e in arr:
print e
print range(5)
|
#!/usr/bin/env python3
import os
def main():
cmd = "python3 -m pip install --upgrade fx_bin"
print(cmd)
os.system(cmd)
if __name__ == '__main__':
main()
|
#import zmq
import logging
import sys
import os
import time
import datetime
from .PlatformClient import PlatformClient
from . import PlatformStructs as Pstruct
from . import helper
POLLING_INTERVAL = 1 # seconds
class Solver(PlatformClient):
def __init__(self,index):
super().__init__()
self.logger = logging.getLogger("Solver")
self.logger.setLevel(logging.INFO)
# ch = logging.StreamHandler()
# # formatter = logging.Formatter("---%(name)s---: \n%(message)s\n\r")
# formatter = logging.Formatter("---%(name)s---:%(message)s")
# ch.setFormatter(formatter)
# self.logger.addHandler(ch)
self.solve = False
self.mediators = {}
self.resource_providers = {}
self.job_creators = {}
self.resource_offers = {}
self.job_offers = {}
self.job_offer_part_one_completed = []
self.job_offer_part_two_completed = []
self.matched_jos = {}
self.matched_ros = {}
self.index = index
def register(self, account):
self.account = account
self.contract.registerSolver(account)
def matchable(self, resource_offer, job_offer):
# Both parts of job offer should have arrived.
if job_offer.offerId not in self.job_offer_part_one_completed or job_offer.offerId not in self.job_offer_part_two_completed:
self.logger.info("two parts of offer has not been arrived yet.")
return (False, False)
#instructionCap >= instructionLimit
#print("Begin Matching")
if resource_offer.instructionCap < job_offer.instructionLimit:
self.logger.info("Too many instructions: %s < %s"
%(resource_offer.instructionCap,job_offer.instructionLimit))
return(False, False)
# memoryCap >= ramLimit
if resource_offer.memoryCap < job_offer.ramLimit:
self.logger.info("It takes a lot of ram: %s < %s"
%(ResourceOffer.memoryCap, job_offer.ramLimit))
return(False, False)
#localStorageCap>= localStorageLimit
if resource_offer.localStorageCap < job_offer.localStorageLimit:
self.logger.info("Too much disk: %s < %s" %(resource_offer.localStorageCap,job_offer.localStorageLimit))
return(False, False)
#bandwidthCap >= bandwidthLimit
if resource_offer.bandwidthCap < job_offer.bandwidthLimit:
self.logger.info("Too much data: %s < %s" %(resource_offer.bandwidthCap,job_offer.bandwidthLimit))
return(False, False)
#instructionPrice >= instructionMaxPrice
if resource_offer.instructionPrice > job_offer.instructionMaxPrice:
self.logger.info("Instructions too expensive: %s > %s" %(resource_offer.instructionPrice,job_offer.instructionMaxPrice))
return(False, False)
#bandwidthPrice >= bandwidthMaxPrice
if resource_offer.bandwidthPrice > job_offer.bandwidthMaxPrice:
self.logger.info("Data too expensive : %s > %s" %(resource_offer.bandwidthPrice,job_offer.bandwidthMaxPrice))
return(False, False)
# if (datetime.datetime.now() + datetime.timedelta(0,0,self.resource_providers[resource_offer.resourceProvider].timePerInstruction * job_offer.instructionLimit)) > job_offer.completionDeadline:
self.logger.info("Type: %s" %(time.time() +
job_offer.instructionLimit *
self.resource_providers[resource_offer.resourceProvider].timePerInstruction
))
self.logger.info("Type: %s" %job_offer.completionDeadline)
completionDeadline = time.time() + job_offer.instructionLimit * self.resource_providers[resource_offer.resourceProvider].timePerInstruction
if completionDeadline > job_offer.completionDeadline:
self.logger.info("Not Enough Time to complete: %s > %s" %(completionDeadline,job_offer.completionDeadline))
return(False, False)
#JO.arch = RP.arch
if self.resource_providers[resource_offer.resourceProvider].arch != job_offer.arch:
self.logger.info("Architecture mismatch")
return(False, False)
# JO.trustedMediators intersection RP.trustedMediators != empty
for i in self.resource_providers[resource_offer.resourceProvider].trustedMediators:
for j in self.job_creators[job_offer.jobCreator].trustedMediators:
if i == j:
#Deposits >=jobDeposit
try :
sharedMediator = self.mediators[i]
except KeyError:
self.logger.info("Mutually trusted mediator %s has not registered" %i)
continue
#necessary price of mediation by mediation
mediatorDeposit = job_offer.instructionLimit * sharedMediator.instructionPrice + job_offer.bandwidthLimit * sharedMediator.bandwidthPrice + job_offer.bandwidthLimit * sharedMediator.bandwidthPrice
#necessary price of job execution by resource_offer
jobDeposit = job_offer.instructionLimit * resource_offer.instructionPrice + job_offer.bandwidthLimit * resource_offer.bandwidthPrice + job_offer.bandwidthLimit * resource_offer.bandwidthPrice
#Assume that Fine is 10xprice
fineRate = 100 #TODO make finerate global constant
fine = fineRate * jobDeposit
# if ((resource_offer.deposit > (fine + mediatorDeposit)) and (job_offer.deposit > (fine + mediatorDeposit)) ):
return(True,i)
self.logger.info("No mutually trusted Mediator available") #only will reach here if there is no mediator
self.logger.info(self.resource_providers[resource_offer.resourceProvider].trustedMediators)
self.logger.info(self.job_creators[job_offer.jobCreator].trustedMediators)
return(False, False)
def bfs(self, resourceList, jobList, edgeList, currentNode, jobsVisited, resourcesVisited, resourceOrJob, prevTraversedJob, prevTraversedResource):
#Start searching all connected nodes
if (resourceOrJob == 'resource'):
#prevTraversedResource.append(currentNode)
for i in resourceList[currentNode]:
#if i not in prevTraversedJob:
if jobsVisited[i] == 0: #i is not visited, free nodes
edgeList.append([i,currentNode])
return(True, edgeList)
#recursively search if not found
for i in resourceList[currentNode]:
if i not in prevTraversedJob:
edgeList2 = edgeList
prevTraversedJob2 = prevTraversedResource
prevTraversedJob2.append(i)
edgeList2.append([i,currentNode])
[found, edgeList3] = self.bfs(resourceList, jobList,edgeList2, i, jobsVisited, resourcesVisited, 'job', prevTraversedJob2, prevTraversedResource)
if found: #if found == True
return(found,edgeList3)
elif resourceOrJob == 'job':
#prevTraversedJob.append(currentNode)
for i in jobList[currentNode]:
#if i not in prevTraversedResource:
if resourcesVisited[i] == 0: #i is not visited, free node
edgeList.append([currentNode,i])
return(True, edgeList)
#recursively search if not found
for i in jobList[currentNode]:
if i not in prevTraversedResource:
edgeList2 = edgeList
prevTraversedResource2 = prevTraversedResource
prevTraversedResource2.append(i)
edgeList2.append([currentNode,i])
[found, edgeList3 ] = self.bfs(resourceList, jobList,edgeList2, i, jobsVisited, resourcesVisited, 'resource', prevTraversedJob, prevTraversedResource2)
if found: #if found == True
return(found, edgeList3)
return(False, [])
def GreedyMatches(self):
ros = list(self.resource_offers.keys())
jos = list(self.job_offers.keys())
matches = []
for jo in jos:
for ro in ros:
(match, mediator) = self.matchable(self.resource_offers[ro], self.job_offers[jo])
if match:
matches.append((jo, ro, mediator))
ros.remove(ro)
break
return matches
#maximum bipartate matching algorithm
def HopcroftKarp(self):
#Create graph
jobList={}
resourceList = {}
mediatorList = {}
edges = []
#list of visited nodes
visitedJob = {}
visitedResource = {}
for j in self.resource_offers:
resourceList[j]=[]
visitedResource[j]=0
self.logger.info("#JOs: %s, #RO: %s" %(self.job_offers, self.resource_offers))
#create edges for each node
for i in self.job_offers:
edges = []
visitedJob[i]=0
mediatorList[i] = {}
for j in self.resource_offers:
[result,mediator] = self.matchable(self.resource_offers[j],self.job_offers[i])
if (result):
self.logger.info("Matchable")
edges.append(j)
resourceList[j].append(i)
mediatorList[i][j]=mediator
else:
self.logger.info("Not Matchable")
jobList[i] = edges
#this uses Hopcroft-Karp algorithm for maximal bipartate matchingMediator
P = []
for i in jobList:
[result, newEdges] = self.bfs(resourceList, jobList, [], i, visitedJob, visitedResource, 'job', [i], [])
if (result != False): #Important step for null results
for j in newEdges:
visitedJob[j[0]]=1
visitedResource[j[1]]=1
mediator = mediatorList[j[0]][j[1]]
j.append(mediator)
if result: #if we found a successful graph
if j not in P:
P.append(j)
else:
P.remove(j) #Why would you remove it?
return(P)
def platformListener(self):
self.active = True
self.logger.info("Listening for contract events...")
while self.active:
events = self.contract.poll_events()
for event in events:
params = event['params']
name = event['name']
transactionHash = event['transactionHash']
self.getReceipt(name, transactionHash)
self.logger.info("{}({}).".format(name, params))
if name == "MediatorRegistered":
self.logger.info("%s" %name)
self.mediators[params['addr']] = Pstruct.Mediator(
params['arch'],
params['instructionPrice'],
params['bandwidthPrice'])
elif name == "ResourceProviderRegistered":
self.logger.info("%s" %name)
self.resource_providers[params['addr']] = Pstruct.ResourceProvider(params['arch'], params['timePerInstruction'])
elif name == "JobCreatorRegistered":
self.logger.info("%s" %name)
self.job_creators[params['addr']] = Pstruct.JobCreator()
elif name == "ResourceProviderAddedTrustedMediator":
self.logger.info("%s" %name)
self.logger.info("name : %s addr : %s" %(name, params['addr']))
self.resource_providers[params['addr']].trustedMediators.append(params['mediator'])
elif name == "JobCreatorAddedTrustedMediator":
self.logger.info("%s" %name)
self.job_creators[params['addr']].trustedMediators.append(params['mediator'])
elif name == "ResourceOfferPosted":
self.logger.info("%s = %s" %(name, params["iroid"]))
offer = Pstruct.ResourceOffer(
params['offerId'],
params['addr'],
params['instructionPrice'],
params['instructionCap'],
params['memoryCap'],
params['localStorageCap'],
params['bandwidthCap'],
params['bandwidthPrice'],
params['deposit'],
params['iroid']
)
self.resource_offers[params['offerId']] = offer
elif "JobOfferPosted" in name:
self.logger.info("%s offerID = %s" % (name, params['offerId']))
if "One" in name:
self.logger.info("%s = %s" % (name, params['ijoid']))
self.logger.info("completionDeadline: %s Type: %s" %(params['completionDeadline'],type(params['completionDeadline'])))
helper.storeJobOffer(event,self.job_offers)
if name == "JobOfferPostedPartOne":
self.job_offer_part_one_completed.append(params['offerId'])
if name == "JobOfferPostedPartTwo":
self.job_offer_part_two_completed.append(params['offerId'])
elif name == "Matched":
# self.logger.info("I: %s" %name)
joid = params['jobOfferId']
roid = params['resourceOfferId']
self.logger.info("I: job offer %s matchID = %s" %(name, joid))
self.logger.info("I: resource offer %s matchID = %s" %(name, roid))
ijoid = self.matched_jos[joid].ijoid
iroid = self.matched_ros[roid].iroid
self.logger.info("I: job offer %s = %s" %(name, ijoid))
self.logger.info("I: resource offer %s = %s" %(name, iroid))
self.helper.logEvent(self.index, name, self.ethclient, event['transactionHash'], joid=joid, ijoid=ijoid)
#after reading events call mathing
matches = self.HopcroftKarp()
# matches = self.GreedyMatches()
if matches:
self.logger.info(matches)
self.logger.info(self.resource_providers)
self.logger.info(self.resource_offers)
self.logger.info(self.job_creators)
self.logger.info(self.job_offers)
for i in matches:
self.logger.info("I: postMatch job offer = %s" %self.job_offers[i[0]].ijoid)
self.logger.info("I: postMatch resource offer = %s" %self.resource_offers[i[1]].iroid)
self.logger.info("jo id: %s" %self.job_offers[i[0]].offerId)
self.logger.info("ro id: %s" %self.resource_offers[i[1]].offerId)
txHash = self.contract.postMatch(self.account, True,
self.job_offers[i[0]].offerId,
self.resource_offers[i[1]].offerId,
i[2])
#remove matches from list
self.matched_jos[i[0]] = self.job_offers.pop(i[0])
self.matched_ros[i[1]] = self.resource_offers.pop(i[1])
self.wait()
|
# -*- coding:utf-8 -*-
import requests
import json
import common
from lxml import etree
def extract_product_json(cjson):
if cjson['Msg'] != 'success':
print 'get product json error\n'
return None
product_sum = cjson['PkgsCount']
products = cjson['Pkgs']
item = {}
for product in products:
item['description'] = product['BriefIntroduction2']
item['deptcity'] = product['DepartureCityName']
item['deptdate'] = product['Highlight3'].split(':'.decode('utf-8'))[1]
item['pname'] = product['ProductName']
item['type'] = product['ProductTypeName']
item['url'] = product['ProductUrl']
item['pprice'] = product['SalesPrice']
item['oprice'] = product['OriginalPrice']
item['discoutn'] = product['Discount']
class CtripSaleProductsClass(object):
def __init__(self):
self.url_dict = {'host': 'http://vacations.ctrip.com',
'path': '/Package-Booking-Promotion/jsondataprovider/Query',
'args': {}
}
self.data = {'Data': '{"GroupId":"1",'
'"ChannelId":"1",'
'"DestinationId":"0",'
'"DepartureDate":null,'
'"DepartureCityId":"0",'
'"Channel":"1",'
'"PageIndex":1,'
'"SaleCity":0'
'}',
'QType': 'queryv3'
}
self.header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.8",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "http://vacations.ctrip.com",
"Referer": "http://vacations.ctrip.com/deals/grouptravel",
"X-Requested-With": "XMLHttpRequest"
}
def request_products(self, method):
if method == 'get':
url = common.compose_url(self.url_dict)
response = requests.get(url, headers=self.header).content.decode('gb2312')
return response
if method == 'post':
self.url_dict['path'] = '/Package-Booking-Promotion/jsondataprovider/Query'
url = common.compose_url(self.url_dict)
response = requests.post(url, data=self.data, headers=self.header).content.decode('gb2312')
cjson = json.loads(response)
return cjson
def extract_first_page_products(self):
response = self.request_products('get')
page_tree = etree.HTML(response)
products = page_tree.xpath('//ul[@class ="basefix"]/li/a')
item = {}
for product in products:
item['url'] = product.xpath('./@href')[0]
pro_info = product.xpath('./div[@class="pro_info"]')[0]
item['type'] = pro_info.xpath('./h3/span/text()')[0][1:-1]
item['pname'] = pro_info.xpath('./h3/text()')[0]
item['deptcity'] = item['pname'].split('出发'.decode('utf-8'))[0].split('[')[-1]
item['pprice'] = pro_info.xpath('./div/div[2]/div[1]/span/text()')[0]
item['discount'] = pro_info.xpath('./div/div[2]/div[2]/text()')[0]
pro_detail = product.xpath('./div[@class="hover_pro_detial"]')[0]
txt_info = pro_detail.xpath('./div[1]/p/text()')
item['deptdate'] = ''
item['description'] = ''
for txt in txt_info:
if '出发日期'.decode('utf-8') in txt:
item['deptdate'] = txt.split(':'.decode('utf-8'))[-1]
else:
item['description'] += txt + '/'
item['oprice'] = pro_detail.xpath('./div[2]/div/del/text()')[0]
# store item into db
pass
def get_products(self):
page_path = ['/deals/grouptravel', # GroupId":"1","ChannelId":"1"Channel:1
'/deals/freetravel', # GroupId":"2","ChannelId":"2 Channel:2
'/deals/localtravel', # GroupId":"5","ChannelId":"4 Channel:4
# '/deals/cruise', #
'/deals/selfdriving'] # GroupId":"3","ChannelId":"4 Channel:4
for path in page_path:
self.url_dict['path'] = path
self.extract_first_page_products()
cjson = self.request_products('post')
extract_product_json(cjson)
|
#!/usr/bin/env python
import sys
sys.path.append("..")
import math
from game.base.entity import Entity
from game.base.signal import Signal
import weakref
import glm
def test_scene():
scene = Signal()
ent = Entity(None, scene)
slot = scene.connect(ent)
ent.slot = weakref.ref(slot)
assert len(scene) == 1
ent.remove()
assert len(scene) == 0
def test_scene_blocked():
scene = Signal()
ent = Entity(None, scene)
scene.blocked += 1
slot = scene.connect(ent)
scene.blocked -= 1
ent.slot = weakref.ref(slot)
assert len(scene) == 0
scene.refresh()
assert len(scene) == 1
scene.blocked += 1
ent.remove()
scene.blocked -= 1
assert len(scene) == 1
scene.refresh()
assert len(scene) == 0
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 22 11:07:15 2017
@author: jte-sre
"""
from __future__ import division
def reference_building(building):
"""
This function calculates parameters for a reference building with the
dimensions of the examined building. Especially the values for the primary
energy requirement and the transmission heat losses are needed to check if
the examined building satisfies the requirement for subsidies in accordance
with the KfW program for efficient buildings
Parameters
----------
building : dictionary
Information about the dimensions of the building
Returns
-------
reference_building : dictionary
Informations about the primary energy requirement and the transmission
heat losses of the reference building
"""
#%% Define Parameters:
component_size = {}
component_size["Area"] = building["Area"]
component_size["Volume"] = 0.76 * building["Area"] * building["Volume"]
component_size["OuterWall"] = building["Area"] * building["OuterWall"]
component_size["Rooftop"] = building["Area"] * building["Rooftop"]
component_size["GroundFloor"] = building["Area"] * building["GroundFloor"]
component_size["Window"] = building["Area"] * building["Window"]
component_size["Window_south"] = 0.25 * component_size["Window"]
component_size["Window_north"] = 0.25 * component_size["Window"]
component_size["Window_east"] = 0.25 * component_size["Window"]
component_size["Window_west"] = 0.25 * component_size["Window"]
#Calculation for one heating period
t = 185 #days
# U-Values for EnEV reference building (W/m²K)
u_ref = {}
u_ref["Window"] = 1.3
u_ref["OuterWall"] = 0.28
u_ref["GroundFloor"] = 0.35
u_ref["Rooftop"] = 0.2
Fx = {}
Fx["Window"] = 1
Fx["OuterWall"] = 1
Fx["GroundFloor"] = 0.6
Fx["Rooftop"] = 1
#%% Starting Calculation
#%% Losses
#Transmisison heating losses
building_parts = ("Window", "OuterWall", "Rooftop", "GroundFloor")
H_t = sum(component_size[i] * u_ref[i] * Fx[i] for i in building_parts) #W/K
#Surcharge für thermal bridge
U_wb = 0.05
H_t = H_t + U_wb * sum(component_size[i] for i in building_parts) #W/K
#Specific Transmissison losses
H_t_spec = H_t / sum(component_size[i] for i in building_parts) #W/m²K
#Ventilation losses
ro_cp = 0.34 #Wh/m³K
n = 0.7 #1/h
H_v = ro_cp * n * component_size["Volume"] #W/K
# Total heating losses
G_t = 2900 #Kd - Gradtagzahl
f_na = 0.95 #Parameter for switching off the heater in the night
f_ql = 0.024 * G_t * f_na
Q_l = (H_t + H_v) * f_ql #kWh
#%%Profits
#Annual solar radiation per direction
I_s = 270 #kWh/m²a
I_e = 155 #kWh/m²a
I_w = 155 #kWh/m²a
I_n = 100 #kWh/m²a
#Solar profits
F_f = 0.7 #reductoin becaus of window frame
F_s = 0.9 #shading coefficient
F_c = 1.0 #reduction because of sun protection
g = 0.6 * 0.9 #reduction because of not-vertical radiation
Q_s = F_f * F_s * F_c * g * (I_s * component_size["Window_south"] +
I_w * component_size["Window_west"] +
I_e * component_size["Window_east"] +
I_n * component_size["Window_north"])
#kWh/HP - Solar gains per heating period
#Internal profits
q_i = 5 #W/m²
Q_i = 0.024 * q_i * component_size["Area"] * t #kWh/a
Q_g = Q_i + Q_s #kWh/HP - Internal gains per heating period
#%% Total heating demand
eta = 0.95 #utilisation factor for heating gains
Q_h = Q_l - eta * Q_g
Q_tw = 12.5 * component_size["Area"]
# Q_h_spec = Q_h / A_n
Q_htw_spec = (Q_h + Q_tw) / component_size["Area"]
#%% Primary Energy
#Expenditure factor for heating system
eg = 0.97 # DIN V 4701-10 Table C.3-4b "Brennwertkessel Verbessert"
Q_p = eg * Q_htw_spec * component_size["Area"]
reference_building = {}
reference_building["H_t_spec"] = H_t_spec
reference_building["Q_p"] = Q_p / 1000 # Switch to MW
reference_building["Q_s"] = Q_s
reference_building["Q_i"] = Q_i
reference_building["Q_tw"] = Q_tw
reference_building["H_v"] = H_v
reference_building["f_ql"] = f_ql
reference_building["eta"] = eta
reference_building["U-values"] = {}
reference_building["U-values"]["GroundFloor"] = u_ref["GroundFloor"]
reference_building["U-values"]["Window"] = u_ref["Window"]
reference_building["U-values"]["Rooftop"] = u_ref["Rooftop"]
reference_building["U-values"]["OuterWall"] = u_ref["OuterWall"]
return reference_building
|
def pattern(n):
nums = map(str, xrange(n, 0, -1))
return '\n'.join(''.join(nums[:a]) for a in xrange(n, 0, -1))
|
'''
输出10行的杨辉三角
'''
row=10 #行数
triangle= [[0 for i in range(row)] for i in range(row)] #10行10列数组
for i in range(row): #行
for j in range(row): #列
if j==0 or j==i:
triangle[i][j]=1
else:
triangle[i][j]=triangle[i-1][j]+triangle[i-1][j-1]
# if triangle[i][j]!=0: #直角输出
# print(triangle[i][j],end=" ")
# print()
for i in range(row): #等腰三角输出
num=row-i
for j in range(num+1):
print(" ",end="")
for k in range(i+1):
if triangle[i][k]!=0:
print(triangle[i][k],end=" ")
print()
|
#!/usr/bin/env python3
from sympy import isprime
n = 1
total_primes = 0
while True:
donji_lijevi = (2*n + 1)**2 - 2*n
gornji_lijevi = donji_lijevi - 2*n
gornji_desni = gornji_lijevi - 2*n
if isprime(donji_lijevi):
total_primes += 1
if isprime(gornji_lijevi):
total_primes += 1
if isprime(gornji_desni):
total_primes += 1
if total_primes / (4*n + 1) < 0.1:
print(2*n + 1)
break
n += 1
|
from random import randint
print("You only have 3 chances to guess")
bil = randint(0, 30)
chance = 3
for chances in range(chance):
#Input number
number = int(input("input numbers 0 to 30="))
if number == bil:
print("hore,your prediction is right")
break;
elif number < bil:
print("a little more your prediction is right, the number you enter is small")
elif number > bil:
print("a little more your prediction is right, the number you enter is big")
else:
print(f"sorry,all your guesses wrong and answer {bil},thanks")
|
#!/usr/bin/env python
Donators = {
u"James Hemmaplardh": [10, 50, 900],
u"Bill Gates": [999, 9999, 999],
u"The Batman": [1],
u"Dash Berlin": [10, 5],
u"Avicii Levels": [69, 99, 101]
}
def send_thankyou():
while True:
name = raw_input(u"Enter 'list' for a list of donor names. Enter First and Last name:")
if name == u"list":
print Donators
else:
Donators[name]=[]
return name
def collect_donation(DonorName):
while True:
name = int(raw_input(u"Enter the donation ammount:"))
try:
name <= 0
except ZeroDivisionError:
value = float(u'infinity')
for i in Donators:
if i == DonorName:
Donators[i].append(name)
return name
else:
print u"Enter an integer:"
def send_ThankYou(DonorName, name):
print u"Dear %s,"%(DonorName)
print u" Thank you for your donation of $%s."%(name)
print u" Regards, The NSA"
def create_report():
report = []
for i in Donators:
report.append([i,sum(Donators[i]), len(Donators[i]), (sum(Donators[i])/len(Donators[i]))])
report.sort(key=lambda i: i[1])
for i in report:
print (u"DonorName:%s, TotalDonated:$%i, Donations:%i, AverageDonation:$%i" % (i[0], i[1], i[2], i[3]))
while True:
user_choice = raw_input(u"Enter '1' ,'2' or '3'. 1 to Send a ThankYou, 2 to Create a Report, 3 to exit:")
if user_choice == u"1":
DonorName = send_thankyou()
name = collect_donation(DonorName)
send_ThankYou(DonorName,name)
elif user_choice == u"2":
create_report()
elif user_choice == u"3":
print u"The program will now terminate."
break
else:
continue
|
'''
created by shadysource
MIT license
'''
## this preparation tool is created for my data
# it could be used as inspiration for other datasets
# copyright 2018 shadySource
# MIT license
import os
from os.path import join
import cv2
import numpy as np
from PIL import Image
def preprocess_image(x):
x = x.astype(np.float64)
channelmax = np.asarray(np.max(np.max(x, axis=0), axis=0))
channelmax[channelmax < 1] = 1000
x = x / channelmax
x = x - 0.5
x = x * 2.
return x.astype(np.float16)
def unprocess_image(x):
x = x.astype(np.float64)
x = x / 2
x = x + 0.5
x = x * 255
x = x.astype(np.uint8)
return x
def _unprocess_image(x):
'''
This is a mistake which resulted in the training data being
messed up. Notice '0.' is added instead of '0.5'
this results in old models not working unless this is used
while loading from PIL
'''
x = x.astype(np.float64)
x = x / 2
x = x + 0.
x = x * 255
x = x.astype(np.uint8)
return x
def preprocess_label(x):
x = x.astype(np.float16)
x = np.divide(x, 255)
return x
def unprocess_label(x):
x = np.multiply(x, 255)
x = x.astype(np.uint8)
return x
if __name__ == "__main__":
# for my files, the last integer in the filename is the data image number.
name_key = lambda x : int(''.join([s for s in x[x.find('img'):] if s.isdigit()]))
output_data_dir = '/home/marvin/road_detector/train'
input_data = '/home/marvin/road_detector/unzipped/inputs'
labels_data = '/home/marvin/road_detector/unzipped/labels/'
reshape_size = 512
for _dir in os.listdir(input_data):
input_data_dirs = [join(input_data, _dir, 'RGB-PanSharpen')]#,
# join(input_data,_dir,'RGB-PanSharpen')]
label_data_dirs = [join(labels_data, _dir[:-12]+ '_Masks')]#,
# join(labels_data, _dir[:-12]+ '_Roads_Nodes')]
if not os.path.exists(output_data_dir):
os.makedirs(output_data_dir)
input_file_lists = [os.listdir(input_data_dir) for input_data_dir in input_data_dirs]
input_keys_lists = [[name_key(f) for f in input_files] for input_files in input_file_lists]
label_files_lists = [os.listdir(label_data_dir) for label_data_dir in label_data_dirs]
label_files_lists[0].sort(key=name_key) # this one is used to get the order of files
label_keys_lists = [[name_key(f) for f in label_files] for label_files in label_files_lists]
x = []
y = []
dataset_name = os.path.basename(label_data_dirs[0])
for j, label_file in enumerate(label_files_lists[0]):
img_number = label_keys_lists[0][j]
try:
input_file_idx = [input_keys.index(img_number) for input_keys in input_keys_lists]
except ValueError: # input image_number is not in the list of files
continue
input_files = [input_file_lists[i][input_file_idx[i]] for i in range(len(input_file_idx))]
image = [cv2.imread(join(input_data_dirs[i], input_files[i]), cv2.IMREAD_UNCHANGED) for i in range(len(input_file_idx))]
image = [np.array(Image.fromarray(unprocess_image(preprocess_image(img))).resize((reshape_size, reshape_size))) for img in image]
image = np.concatenate(image, axis=-1)
# print(image.shape)
image = preprocess_image(image)
# Image.fromarray(unprocess_image(image)).show()
# Show image layers and exit:
# image = unprocess_image(image, 8)
# for i in range(image.shape[-1]):
# cv2.imshow('image', image[:, :, i])
# cv2.waitKey(2000)
# print(np.mean(image[:, :, i]))
# print(np.max(image[:, :, i]))
# print(np.min(image[:, :, i]))
# exit()
label_file_idx = [label_keys.index(img_number) for label_keys in label_keys_lists]
label_files = [label_files_lists[i][label_file_idx[i]] for i in range(len(label_file_idx))]
labels = [cv2.imread(join(label_data_dirs[i], label_files[i]), cv2.IMREAD_GRAYSCALE) for i in range(len(label_file_idx))]
labels = [np.array(Image.fromarray(img).resize((reshape_size, reshape_size))) for img in labels]
labels = np.concatenate(labels, axis=-1)
# print(labels.shape)
labels = np.expand_dims(preprocess_label(labels), axis=-1)
# Image.fromarray(unprocess_label(labels)).show()
# exit()
# for i in range(labels.shape[-1]):
# # cv2.imshow('label', labels)
# # cv2.waitKey(10000)
# print(np.mean(labels[:, :, i]))
# print(np.max(labels[:, :, i]))
# print(np.min(labels[:, :, i]))
# exit()
# measure mean and std
# x.append(np.mean(np.mean(image, axis=0), axis=0))
# print('mean',np.mean(np.array(x), axis=0))
# y.append(np.std(np.std(image, axis=0), axis=0))
# print('std', np.mean(np.array(y), axis=0))
# continue
x.append(image)
y.append(labels)
if j%500==0 and j > 0: # about 3 gigs uncompressed
x = np.array(x)
y = np.array(y)
print('x', x.shape, 'y', y.shape)
print('saving as', join(output_data_dir, _dir + str(img_number)), '...')
np.savez_compressed(join(output_data_dir, _dir + str(img_number)), x=x, y=y)
x = []
y = []
# save anything left over
x = np.array(x)
y = np.array(y)
print('x', x.shape, 'y', y.shape)
print('saving as', join(output_data_dir, _dir + str(img_number)), '...')
np.savez_compressed(join(output_data_dir, _dir + str(img_number)), x=x, y=y)
print('!!SAVING COMPLETE!!')
def data_generator(data_dir, batch_size=8, shape=[256, 256], flip_prob=.5):
while True:
for npz_file in os.listdir(data_dir):
data_path = join(data_dir, npz_file)
print('loading', data_path)
data = np.load(data_path)
data_x = data['x']
data_y = data['y']
data_len = data_x.shape[0]
for i in range(data_len//batch_size):
image, mask = ([], [])
for j in range(batch_size):
# data_idx = np.random.randint(0, data_len)
data_idx = (i * (batch_size) + j) % data_len
# cropping indices
# x_idx = np.random.randint(0, data_x.shape[1]-shape[0])
# y_idx = np.random.randint(0, data_x.shape[2]-shape[1])
# x = data_x[data_idx, x_idx:x_idx+shape[0], y_idx:y_idx+shape[1], :]
# y = data_y[data_idx, x_idx:x_idx+shape[0], y_idx:y_idx+shape[1], :]
x = data_x[data_idx]
y = data_y[data_idx]
if np.random.random() < flip_prob:
if np.random.random() < 0.5:
x = x[:,::-1,:]
y = y[:,::-1,:]
if np.random.random() < 0.5:
x = x[::-1,:,:]
y = y[::-1,:,:]
image.append(x)
mask.append(y)
yield image, mask
|
def BuscaBin(list, item):
prim = 0
ult = len(list) -1
found = False
while prim <= ult and not found:
meio = (prim + ult) // 2
if list[meio] == item:
found = True
else:
if item < list[meio]:
ult = meio - 1
else:
prim = meio + 1
return found
lista = [42, 32, 19, 17, 13, 8, 2, 1, 0, ]
lista.sort()
# print(lista)
buscar = int(input("Digite um número para buscar na lista?"))
resultado = BuscaBin(lista, buscar)
if resultado == True:
print(f'O valor {buscar} encontra-se na lista')
else:
print(f'O valor {buscar} não foi encontrado na lista')
# print(BuscaBin(lista, buscar))
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# In[2]:
#Lettura Excel del foglio users.
users = pd.read_excel('keystroke_51_Aggiornato.xls','users')
#users
users.shape
# In[3]:
users.columns
# In[4]:
#per filtrare i dati utilizziamo la funzione iloc
#il primo parametro sono le righe, mentre il secondo rappresenta la classe Age
Y = users.iloc[:, 4].values
print (Y)
# In[5]:
#Trasformiamo l'array in dataframe
dataFrame_Y= pd.DataFrame(Y)
dataFrame_Y
# In[6]:
#Seleziono utenti con età compresa tra 7-18 anni
piccolo = dataFrame_Y[(dataFrame_Y == 1)].dropna()
#cast forzato ad int
piccolo.astype(int)
# In[7]:
#Seleziono utenti con età compresa tra 19-29 anni
medio=dataFrame_Y[(dataFrame_Y==2)].dropna()
#cast forzato ad int
medio.astype(int)
# In[8]:
#Seleziono utenti con età compresa tra 30-65 anni
grande = dataFrame_Y[(dataFrame_Y==3)].dropna()
grande.astype(int)
# In[9]:
#visualizziamo tramite l'utilizzo di grafici, i dati filtrati.
#get_ipython().run_line_magic('matplotlib', 'inline')
# In[10]:
#creazione labels per il grafico
labels = '7-18 Anni', '19-29 Anni', '30-65 Anni'
#print("Utenti con età compresa tra 7-18 anni:",piccolo.size)
#print("Utenti con età compresa tra 19-29 anni:",medio.size)
#print("Utenti con età compresa tra 30-65 anni:",grande.size)
#assegno alla variabile size il valore delle lables
sizes = [piccolo.size, medio.size, grande.size]
explode = (0, 0,0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90, counterclock=False)
fig1.suptitle('Suddivisione utenti per età')
ax1.axis('equal')
plt.show()
# In[11]:
labels = labels
p=piccolo.size
m=medio.size
g=grande.size
y = [p, m, g]
fig, ax2 = plt.subplots()
xticks = [1,2,3] # ci serve per posizionare le barre e anche le label
ax2.bar(xticks, y, align='center')
ax2.set_title("Suddivisione utenti per età")
ax2.set_xticklabels(labels) # verranno posizionate dove sono gli xticks
ax2.set_xticks(xticks)
plt.show()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
#!/usr/bin/python
# -*-coding:gbk-*-
import json
from json import JSONDecodeError
from helper.Grep import Grep
from urllib import parse
import os
import sys
from functools import reduce
import requests
class anjuke():
timesleep = 1
def __init__(self, timesleep):
self.timesleep = timesleep
def geo(self, obj):
url = "http://api.map.baidu.com/geocoder/v2/?location=" + obj["lng"] + "," + obj[
"lat"] + "&output=json&pois=1&ak=rhRhulWg5FGAh83v7fkHxhc4j2779L8d"
requst = requests.get(url)
try:
jsonObj = json.loads(requst.text)
if jsonObj["status"] == 0:
address = jsonObj["result"]["formatted_address"]
province = jsonObj["result"]["addressComponent"]["province"]
city = jsonObj["result"]["addressComponent"]["city"]
district = jsonObj["result"]["addressComponent"]["district"]
obj["address"] = address
obj["province"] = province
obj["city"] = city
poi = jsonObj["result"]["pois"][0]["addr"]
obj["poi"] = poi
obj["district"] = district
except(IndexError, JSONDecodeError):
print("poi不存在")
def getCity(self):
grep = Grep().setTimesleep(self.timesleep)
url = 'https://www.anjuke.com/sy-city.html'
dic = {"href": {"isAttr": True, "grepname": "href"},
"text": {"isAttr": False, "isText": True, "grepname": "name"}}
grep.html(url)
selects = grep.soup.select(".city_list a")
return grep.setSelects(selects).grep(dic, "city.json")
def collector(self, mod_num=1, mod_index=0):
try:
file = open("./conf/city.json", mode='rb')
city_list = json.loads(file.read())
except IOError:
city_list = self.getCity()
city_list = Grep().divGroup(city_list, mod_num, mod_index)
Grep().save(city_list, ".." + os.sep + "out1" + os.sep + "city_" + str(mod_index) + ".json")
for city in city_list:
flag = os.path.exists("./out1/" + city["name"] + ".json")
if not flag:
try:
self.getXiaoQu(city["href"], city["name"])
except OSError:
continue
def getXiaoQuDetail(self, url, obj):
grep = Grep().setTimesleep(self.timesleep)
arr = url.split("community")
header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Host": url.replace("https://", "").split("/")[0],
"Referer": arr[0] + "community/props/sale" + arr[1],
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0"
}
grep.html(url, autoHeader=False, header=header, isPrintUrl=False, isProxyPrint=False)
try:
image = grep.soup.select_one("#j-switch-basic img:nth-of-type(1)").get("src")
obj["bigimage"] = image
decription = grep.soup.select_one(".comm-brief-mod.j-ext-infos > p:nth-of-type(1)")
obj["decription"] = ""
if decription is not None:
obj["decription"] = decription.text
info = grep.soup.select("dl.basic-parms-mod")[0]
lx = info.select_one("dt:nth-of-type(1)").text + info.select_one("dd:nth-of-type(1)").text
areas = info.select_one("dt:nth-of-type(3)").text + info.select_one("dd:nth-of-type(3)").text
sums = info.select_one("dt:nth-of-type(4)").text + info.select_one("dd:nth-of-type(4)").text
years = info.select_one("dt:nth-of-type(5)").text + info.select_one("dd:nth-of-type(5)").text
lh = info.select_one("dt:nth-of-type(8)").text + info.select_one("dd:nth-of-type(8)").text
kfs = info.select_one("dt:nth-of-type(9)").text + info.select_one("dd:nth-of-type(9)").text
wygs = info.select_one("dt:nth-of-type(10)").text + info.select_one("dd:nth-of-type(10)").text
obj["params"] = lx + ";" + areas + ";" + sums + ";" + years + ";" + lh + ";" + kfs + ";" + wygs + ";"
except:
print(obj)
def getXiaoQu(self, url, name, index=1, ls=[]):
isHasNext = False
flag = os.path.exists("." + os.sep + "out1" + os.sep + name + "-" + str(index) + ".json")
if not flag:
rurl = url + "/community/p" + str(index) + "?from=navigation"
grep = Grep().setTimesleep(self.timesleep)
header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Host": url.replace("https://", ""),
"Referer": url + "?from=navigation",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0"
}
grep.html(rurl, autoHeader=False, header=header)
selects = grep.soup.select(".li-itemmod")
check = grep.soup.select_one(".aNxt")
isHasNext = check
onePage = []
for select in selects:
obj = {}
image = select.select_one("img").get("src")
xiaoqu = select.select_one(".li-info h3 a")
rname = xiaoqu.text
info = select.select_one(".date").text.strip()
href = xiaoqu.get("href")
location = select.select_one(".bot-tag > a:nth-of-type(1)").get("href")
params = location.split("#")[1]
res = parse.parse_qs(params)
lng = res["l1"][0]
lat = res["l2"][0]
obj = {"img": image, "href": href, "name": rname, "lng": lng, "lat": lat, "info": info}
self.getXiaoQuDetail(href, obj)
# self.geo(obj)
onePage.append(obj)
print(obj)
ls.append(obj)
grep.save(onePage, ".." + os.sep + "out1" + os.sep + name + "-" + str(index) + ".json")
if isHasNext:
index = index + 1
self.getXiaoQu(url, name, index, ls)
def str2float(s):
def fn(x, y):
return x * 10 + y
n = s.index('.')
s1 = list(map(int, [x for x in s[:n]]))
s2 = list(map(int, [x for x in s[n + 1:]]))
return reduce(fn, s1) + reduce(fn, s2) / (10 ** len(s2))
if __name__ == '__main__':
anjuke(str2float(sys.argv[1])).collector(int(sys.argv[2]), int(sys.argv[3]))
|
"""
Utilities for importing the LANDFIRE dataset into LandscapeSim.
The ST-Sim model library uses the BPS_MODEL code as the name, which is not suitable for display.
The goal here is to use the existing metadata to import the appropriate name for the model name, but leave the
model library untouched.
BpS_CODE --> Library Model
BpS_Name --> Descriptive Name
VegetationType --> Vegetation classification (could be used to filter down)
Class<A-E>Cover --> Cover Type
Class<A-E>Struct --> Structural Stage
State Classes are defined with a unique combination of Cover Type and Structural Stage. The ST-Sim model library
defines 27 possible state classes, of which a maximum of 5 are used for any vegetation type.
"""
import csv
import json
import os
import numpy
from django.conf import settings
from landscapesim.common.geojson import zonal_stats
from landscapesim.importers import ProjectImporter
from landscapesim.importers.project import STRATUM
from landscapesim.models import Stratum, StateClass
# Unique identifier for this contributor module's library name.
LIBRARY_NAME = 'LANDFIRE'
# Data file paths
LANDFIRE_DIR = os.path.join(settings.BASE_DIR, 'materials', 'landfire')
BPS_TIF = os.path.join(LANDFIRE_DIR, 'LANDFIRE_130BPS.tif')
SCLASS_TIF = os.path.join(LANDFIRE_DIR, 'LANDFIRE_130SCLASS.tif')
BPS_FILE = os.path.join(LANDFIRE_DIR, 'US_130_BPS.csv')
SCLASS_FILE = os.path.join(LANDFIRE_DIR, 'US_130_SCLASS.csv')
BPS_SC_FILE = os.path.join(LANDFIRE_DIR, 'LANDFIRE_BPS_SCLASS_mapping.csv')
SCLASS_ID_FILE = os.path.join(LANDFIRE_DIR, 'LANDFIRE_STSIM_SCLASS_ID_mapping.csv')
# Disallow use of module if data is not present.
all_data_exist = all(os.path.exists(p) for p in (LANDFIRE_DIR, BPS_TIF, SCLASS_TIF, BPS_TIF, BPS_FILE, SCLASS_FILE,
BPS_SC_FILE, SCLASS_ID_FILE))
if not all_data_exist:
raise ImportError(
"LANDFIRE support is not enabled."
"Check to see that all necessary files exist in <project_root>/materials/landfire"
)
def create_mapping(path, src, dest, key_type=None) -> dict:
""" Create a dictionary between two attributes. """
if key_type is None:
key_type = int
with open(path, 'r') as f:
reader = csv.DictReader(f)
raw_data = [r for r in reader]
mapping = {key_type(row[src]): row[dest] for row in raw_data}
return mapping
BPS_MAPPING = create_mapping(BPS_FILE, 'VALUE', 'BPS_MODEL')
BPS_NAMES = create_mapping(BPS_FILE, 'VALUE', 'BPS_NAME')
SCLASS_MAPPING= create_mapping(SCLASS_FILE, 'Value', 'Label')
SCLASS_A_MAPPING = create_mapping(BPS_SC_FILE, 'code', 'A')
SCLASS_B_MAPPING = create_mapping(BPS_SC_FILE, 'code', 'B')
SCLASS_C_MAPPING = create_mapping(BPS_SC_FILE, 'code', 'C')
SCLASS_D_MAPPING = create_mapping(BPS_SC_FILE, 'code', 'D')
SCLASS_E_MAPPING = create_mapping(BPS_SC_FILE, 'code', 'E')
SCLASS_ID_MAPPING = create_mapping(SCLASS_ID_FILE, 'Name', 'ID', str)
SCLASS_ALL_MAPPINGS = (
('A', SCLASS_A_MAPPING),
('B', SCLASS_B_MAPPING),
('C', SCLASS_C_MAPPING),
('D', SCLASS_D_MAPPING),
('E', SCLASS_E_MAPPING)
)
# Build a color description
R = create_mapping(BPS_FILE, 'VALUE', 'R')
G = create_mapping(BPS_FILE, 'VALUE', 'G')
B = create_mapping(BPS_FILE, 'VALUE', 'B')
BPS_COLORS = {k: '255,{},{},{}'.format(R[k], G[k], B[k]) for k in BPS_MAPPING}
class LandfireProjectImporter(ProjectImporter):
""" A custom Project importer that uses more descriptive names than those stored in the SyncroSim database. """
def _extract_sheet_alternative_names_and_colors(
self,
sheet_config,
map_key,
name_mapping,
color_mapping
):
"""
Custom definitions based on external CSV mappings for a given value.
"""
sheet_name, model, sheet_map, type_map = sheet_config
self.console.export_sheet(sheet_name, self.temp_file, **self.sheet_kwargs)
with open(self.temp_file, 'r') as sheet:
reader = csv.DictReader(sheet)
data = [r for r in reader]
for row in data:
mapped_row = self.map_row(row, sheet_map, type_map)
row_id = mapped_row[map_key]
descriptive_name = name_mapping[row_id]
color = color_mapping[row_id]
mapped_row['description'] = descriptive_name
mapped_row['color'] = color
instance_data = {**self.import_kwargs, **mapped_row}
model.objects.create(**instance_data)
print("Imported {} (with customized LANDFIRE descriptions)".format(sheet_name))
self._cleanup_temp_file()
def import_stratum(self):
self._extract_sheet_alternative_names_and_colors(STRATUM, 'stratum_id', BPS_NAMES, BPS_COLORS)
# Register the importer classes so that LandscapeSim picks them up
PROJECT_IMPORTER_CLASS = LandfireProjectImporter
def get_initial_conditions(scenario, reporting_unit):
""" Retreive the initial conditions from a given reporting unit. """
feature = dict(
geometry=json.loads(reporting_unit.polygon.json),
type='Feature',
properties={}
)
# Collect zonal stats from rasters
bps_stats, bps_raster = zonal_stats(feature, BPS_TIF)
sclass_stats, sclass_raster = zonal_stats(feature, SCLASS_TIF)
# The count of the area that *is not* masked, i.e. the count within the reporting unit
count = bps_raster.count()
# Yield each set of initial conditions
for value in bps_stats:
if value in BPS_MAPPING:
# If the raster value is not found, skip it
try:
bps_model_code = int(BPS_MAPPING[value])
except ValueError:
continue
stratum = Stratum.objects.filter(name=BPS_MAPPING[value], project=scenario.project)
# Not all BpS vegetation types have a STM model. Since we can't model them, we skip them.
if not stratum:
continue
stratum = stratum.first()
stateclass_names = []
for sclass_type, lookup in SCLASS_ALL_MAPPINGS:
if bps_model_code in lookup:
name = lookup[bps_model_code]
if name:
stateclass_names.append((sclass_type, name))
sclass_locations = sclass_raster[numpy.where(bps_raster == value)]
sclass_keys_found, sclass_counts = numpy.unique(sclass_locations, return_counts=True)
for i, name_tuple in enumerate(stateclass_names):
name, stateclass = name_tuple
if i not in sclass_keys_found:
relative_amount = 0.0
else:
sclass_idx = list(sclass_keys_found).index(i)
relative_amount = sclass_counts[sclass_idx] / count * 100.0 # Percent of the state class over the total number of cells
stateclass = StateClass.objects.filter(name=stateclass, project=scenario.project).first()
yield {
'scenario': scenario,
'relative_amount': relative_amount,
'stratum': stratum,
'stateclass': stateclass,
'reporting_unit': reporting_unit
}
def create_strata_raster():
""" Create a stratum raster for importing into ST-Sim. """
pass
def create_stateclass_raster():
""" Create a stateclass raster for importing into ST-Sim. """
pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-06 09:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=70)),
('headshot', models.ImageField(blank=True, null=True, upload_to='headshots')),
('visible', models.BooleanField(default=True)),
('github_url', models.URLField()),
('linkedin_url', models.URLField()),
('personal_website', models.URLField()),
('team_type', models.CharField(choices=[('Core Team', 'Core Team'), ('Contributor', 'Contributor')], max_length=50)),
],
options={
'db_table': 'teams',
},
),
]
|
"""A demo shows how to use binary focal loss."""
import numpy as np
from keras.datasets import imdb
from keras.layers import Dense
from keras.models import Input, Model
from keras.regularizers import l2
from sklearn.utils import compute_class_weight
from losses.focal_loss import binary_focal_loss
def create_model(l=0.01):
inputs = Input(shape=(8000,))
x = inputs
x = Dense(32, activation="relu", kernel_regularizer=l2(l), bias_regularizer=l2(l))(x)
x = Dense(16, activation="relu", kernel_regularizer=l2(l), bias_regularizer=l2(l))(x)
outputs = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss=binary_focal_loss(gamma=2), optimizer="adam", metrics=["accuracy"])
return model
def vectorizer_sequences(sequences, dimension=8000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
if __name__ == "__main__":
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=8000)
x_train = vectorizer_sequences(x_train)
x_test = vectorizer_sequences(x_test)
class_weight = compute_class_weight("balanced", np.unique(y_train), y_train)
print("class weight:", class_weight)
model = create_model(l=0.1)
model.summary()
# the class weight is the alpha of focal loss, so in focal loss function, we needn't define the alpha variable.
model.fit(x_train, y_train, batch_size=512, epochs=20, verbose=1, validation_data=(x_test, y_test),
class_weight=dict(enumerate(class_weight)))
loss, accuracy = model.evaluate(x_test, y_test)
print("loss:", loss)
print("accuracy:", accuracy)
|
from django.shortcuts import render, redirect
from .models import Contact
from login.decorators import login_required
def intro(request):
return render(request, 'about/intro.html')
# @login_required
def contact(request):
if request.method == 'GET':
return render(request, 'about/contact.html')
elif request.method == 'POST':
# 함수속에서 정의된 변수는 지역변수local variable이고 함수가 종료되면 사라진다
# 따라서 프로그램 어디서든 부를 수 있는 전역변수 global variable로 바꿔줘야 된다
global contact
# html에서 작성내용 받아오기
contact_email = request.POST.get('contact-email', None)
contact_title = request.POST.get('contact-title', None)
contact_contents = request.POST.get('contact-contents', None)
contact_type = request.POST.get('contact-select', None)
# contact.html에서 제출하기 버튼
check_contents = request.POST.get('check-contents', None)
# contact_check.html에서 제출하기 버튼
submit = request.POST.get('submit', None)
contact_temp = Contact(
contact_email=contact_email,
contact_title=contact_title,
contact_contents=contact_contents,
contact_type=contact_type,
)
# 작성한 글 확인하는 페이지로 이동
if check_contents == 'check-contents':
contact = contact_temp
return render(request, 'about/contact_check.html', {'contact': contact})
# 작성한 글 저장하고 완료페이지로 이동
elif submit == 'submit':
contact.save()
return render(request, 'about/contact_complete.html')
def faq(request):
return render(request, 'about/faq.html')
def policy(request):
return render(request, 'about/policy.html')
def terms(request):
return render(request, 'about/terms.html')
|
class Solution:
def parse_roman_numerals(self, input):
new = list()
for char in input:
if char == "M":
new.append(1000)
elif char == "D":
new.append(500)
elif char == "C":
new.append(100)
elif char == "L":
new.append(50)
elif char == "X":
new.append(10)
elif char == "V":
new.append(5)
elif char == "I":
new.append(1)
else:
new.append(0)
break
result = new[:]
for i in range(0,len(new)-1):
if new[i] == new[i+1]:
result[i+1] = result[i] + result[i+1]
result[i] = 0
elif new[i] < new[i+1]:
result[i] = -result[i]
else:
pass
return(sum(result))
|
from django.conf.urls import url
from .views import views,garmin_views
app_name = 'user_input'
urlpatterns = [
url(r'^daily_input/$',views.UserDailyInputView.as_view(),
name='user_daily_input'),
url(r'^daily_input/item/$',views.UserDailyInputItemView.as_view(),
name='user_daily_input_item'),
url(r'^daily_activity/$',views.DailyActivityView.as_view(),
name='user_daily_activity'),
url(r'^daily_input/item/recent/$', views.UserDailyInputLatestItemView.as_view()),
url(r'^daily_input/garmin_data/$',garmin_views.GarminData.as_view()),
url(r'^daily_input/get_activity_info$',views.GetManualActivityInfo.as_view())
]
|
#!/usr/bin/python
import math
arr = []
ans = {}
for i in range(1000):
arr.append(i ** 2)
for i in range(1, 1000):
for j in range(i, 1000):
k = math.sqrt(arr[i] + arr[j])
if int(k) == k:
p = k + math.sqrt(arr[i]) + math.sqrt(arr[j])
if p in ans:
ans[p] += 1
else:
ans[p] = 0
largest = 0
index = 0
for i in range(1, 1000):
if i in ans and ans[i] > largest:
largest = ans[i]
index = i
print(index)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from tkinter import Tk, Text, Scrollbar, Menu, messagebox, filedialog, BooleanVar, Checkbutton, Label, Entry, StringVar, Grid, Frame
import os, subprocess, json, string
class Editor():
def __init__(self, root):
self.root = root
self.TITLE = "Winux Editor"
self.file_path = None
self.set_title()
frame = Frame(root)
self.yscrollbar = Scrollbar(frame, orient="vertical")
self.editor = Text(frame, yscrollcommand=self.yscrollbar.set)
self.editor.pack(side="left", fill="both", expand=1)
self.editor.config( wrap = "word", # use word wrapping
undo = True, # Tk 8.4
width = 80 )
self.editor.focus()
self.yscrollbar.pack(side="right", fill="y")
self.yscrollbar.config(command=self.editor.yview)
frame.pack(fill="both", expand=1)
#instead of closing the window, execute a function
root.protocol("WM_DELETE_WINDOW", self.file_quit)
#create a top level menu
self.menubar = Menu(root)
#Menu item File
filemenu = Menu(self.menubar, tearoff=0)# tearoff = 0 => can't be seperated from window
filemenu.add_command(label="New", underline=1, command=self.file_new, accelerator="Ctrl+N")
filemenu.add_command(label="Open...", underline=1, command=self.file_open, accelerator="Ctrl+O")
filemenu.add_command(label="Save", underline=1, command=self.file_save, accelerator="Ctrl+S")
filemenu.add_command(label="Save As...", underline=5, command=self.file_save_as, accelerator="Ctrl+Alt+S")
filemenu.add_separator()
filemenu.add_command(label="Exit", underline=2, command=self.file_quit, accelerator="Alt+F4")
self.menubar.add_cascade(label="File", underline=0, menu=filemenu)
# display the menu
root.config(menu=self.menubar)
def save_if_modified(self, event=None):
if self.editor.edit_modified(): #modified
response = messagebox.askyesnocancel("Save?", "This document has been modified. Do you want to save changes?") #yes = True, no = False, cancel = None
if response: #yes/save
result = self.file_save()
if result == "saved": #saved
return True
else: #save cancelled
return None
else:
return response #None = cancel/abort, False = no/discard
else: #not modified
return True
def file_new(self, event=None):
result = self.save_if_modified()
if result != None: #None => Aborted or Save cancelled, False => Discarded, True = Saved or Not modified
self.editor.delete(1.0, "end")
self.editor.edit_modified(False)
self.editor.edit_reset()
self.file_path = None
self.set_title()
def file_open(self, event=None, filepath=None):
result = self.save_if_modified()
if result != None: #None => Aborted or Save cancelled, False => Discarded, True = Saved or Not modified
if filepath == None:
filepath = filedialog.askopenfilename()
if filepath != None and filepath != '':
with open(filepath, encoding="utf-8") as f:
fileContents = f.read()# Get all the text from file.
# Set current text to file contents
self.editor.delete(1.0, "end")
self.editor.insert(1.0, fileContents)
self.editor.edit_modified(False)
self.file_path = filepath
def file_save(self, event=None):
if self.file_path == None:
result = self.file_save_as()
else:
result = self.file_save_as(filepath=self.file_path)
return result
def file_save_as(self, event=None, filepath=None):
if filepath == None:
filepath = filedialog.asksaveasfilename(filetypes=(('Text files', '*.txt'), ('Python files', '*.py *.pyw'), ('All files', '*.*'))) #defaultextension='.txt'
try:
with open(filepath, 'wb') as f:
text = self.editor.get(1.0, "end-1c")
f.write(bytes(text, 'UTF-8'))
self.editor.edit_modified(False)
self.file_path = filepath
self.set_title()
return "saved"
except FileNotFoundError:
print('FileNotFoundError')
return "cancelled"
def file_quit(self, event=None):
result = self.save_if_modified()
if result != None: #None => Aborted or Save cancelled, False => Discarded, True = Saved or Not modified
self.root.destroy() #sys.exit(0)
def set_title(self, event=None):
if self.file_path != None:
title = os.path.basename(self.file_path)
else:
title = "Untitled"
self.root.title(title + " - " + self.TITLE)
def undo(self, event=None):
self.editor.edit_undo()
def redo(self, event=None):
self.editor.edit_redo()
def main(self, event=None):
self.editor.bind("<Control-o>", self.file_open)
self.editor.bind("<Control-O>", self.file_open)
self.editor.bind("<Control-S>", self.file_save)
self.editor.bind("<Control-s>", self.file_save)
self.editor.bind("<Control-y>", self.redo)
self.editor.bind("<Control-Y>", self.redo)
self.editor.bind("<Control-Z>", self.undo)
self.editor.bind("<Control-z>", self.undo)
if __name__ == "__main__":
root = Tk()
root.wm_state('zoomed')
editor = Editor(root)
editor.main()
root.mainloop()
|
# ###### this is simple ML program with use of Classification Tree method
# ###### this program learn sex of people from ghad, vazn, kafsh, and predict base on them
import csv
from sklearn import tree
x = []
y = []
with open('somefile_withdataforlearning.csv', 'r') as fin:
data = csv.reader(fin)
for line in data:
x.append(line[1:4]) # ghad, vazn, kafsh!
y.append(line[4]) # sex
clf = tree.DecisionTreeClassifier()
machine_xy = clf.fit(x, y)
newdata = [[185, 79, 43], [165, 57, 38]]
answer = machine_xy.predict(newdata)
print(answer[0], answer[1])
|
from .models import Role, UserRole, Permission
from shop.models import Shop
from account.models import User
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework import generics, pagination
from rest_framework.response import Response
from .serializers import RoleSerializers, UserRoleSerializers, PermissionSerializers
from rest_framework.permissions import IsAuthenticated
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.views import APIView
from .permissions import IsProductRead
# Create your views here.
# Roles
class Roles(generics.ListAPIView):
permission_classes = [IsAuthenticated]
queryset = Role.objects.order_by('created_at').reverse()
serializer_class = RoleSerializers
pagination_class = pagination.PageNumberPagination
class SingleRole(APIView):
parser_classes = [MultiPartParser]
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
data = request.data
shop = Shop.objects.get(id=request.data['shop_id'])
role = Role(shop=shop) # add foreign key
role_serializer = RoleSerializers(role, data=data)
if role_serializer.is_valid():
role_serializer.save()
return Response(role_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(role_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, *args, **kwargs):
data = request.data
id = kwargs['id']
role = get_object_or_404(
Role, id=id)
role_serializer = RoleSerializers(role, data=data)
if role_serializer.is_valid():
role_serializer.save()
return Response(role_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(role_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, *args, **kwargs):
id = kwargs['id']
role = get_object_or_404(
Role, id=id)
role_serializer = RoleSerializers(role, many=False)
return Response(role_serializer.data, status=status.HTTP_201_CREATED)
def delete(self, request, *args, **kwargs):
id = kwargs['id']
role = get_object_or_404(
Role, id=id)
role.delete()
return Response('Success', status=status.HTTP_201_CREATED)
# UserRoles
class UserRoles(generics.ListAPIView):
permission_classes = [IsAuthenticated]
queryset = UserRole.objects.order_by('created_at').reverse()
serializer_class = UserRoleSerializers
pagination_class = pagination.PageNumberPagination
class SingleUserRole(APIView):
parser_classes = [MultiPartParser]
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
data = request.data
role = Role.objects.get(id=request.data['role_id'])
user = User.objects.get(id=request.data['user_id'])
userRole = UserRole(user=user, role=role) # add foreign key
userRole_serializer = UserRoleSerializers(userRole, data=data)
if userRole_serializer.is_valid():
userRole_serializer.save()
return Response(userRole_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(userRole_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, *args, **kwargs):
data = request.data
role = Role.objects.get(id=request.data['role_id'])
userRole = get_object_or_404(
UserRole, id=request.data['id'])
userRole.role = role
userRole_serializer = UserRoleSerializers(userRole, data=data)
if userRole_serializer.is_valid():
userRole_serializer.save()
return Response(userRole_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(userRole_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, *args, **kwargs):
if IsProductRead(request.user):
userRole = get_object_or_404(
UserRole, user=request.user.id)
userRole_serializer = UserRoleSerializers(userRole, many=False)
return Response(userRole_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response([{"deatil": "You don't have permission to read this content!"}], status=status.HTTP_401_UNAUTHORIZED)
def delete(self, request, *args, **kwargs):
id = kwargs['id']
userRole = get_object_or_404(
UserRole, id=id)
userRole.delete()
return Response('Success', status=status.HTTP_201_CREATED)
# Permissions
class MyPermissions(generics.ListAPIView):
permission_classes = [IsAuthenticated]
queryset = Permission.objects.order_by('created_at').reverse()
serializer_class = PermissionSerializers
pagination_class = pagination.PageNumberPagination
class SingleMyPermission(APIView):
parser_classes = [MultiPartParser]
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
data = request.data
role = Role.objects.get(id=request.data['role_id'])
permission = Permission(role=role) # add foreign key
permission_serializer = PermissionSerializers(permission, data=data)
if permission_serializer.is_valid():
permission_serializer.save()
return Response(permission_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(permission_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, *args, **kwargs):
data = request.data
permission = get_object_or_404(
Permission, id=request.data['id'])
permission_serializer = PermissionSerializers(permission, data=data)
if permission_serializer.is_valid():
permission_serializer.save()
return Response(permission_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(permission_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, *args, **kwargs):
id = kwargs['id']
permission = get_object_or_404(
Permission, id=id)
permission_serializer = PermissionSerializers(permission, many=False)
return Response(permission_serializer.data, status=status.HTTP_201_CREATED)
def delete(self, request, *args, **kwargs):
id = kwargs['id']
permission = get_object_or_404(
Permission, id=id)
permission.delete()
return Response('Success', status=status.HTTP_201_CREATED)
|
from urllib import request
from treadmill.infra import connection
def _get_ip(anywhere):
if anywhere:
_ip = '0.0.0.0/0'
else:
_ip = request.urlopen(
'http://ip.42.pl/raw'
).read().decode('utf-8') + '/32'
return _ip
def enable(port, group_id, protocol='tcp', anywhere=True):
port = int(port)
conn = connection.Connection()
conn.authorize_security_group_ingress(
CidrIp=_get_ip(anywhere),
FromPort=port,
ToPort=port,
GroupId=group_id,
IpProtocol=protocol
)
def disable(port, group_id, protocol='tcp', anywhere=True):
port = int(port)
conn = connection.Connection()
conn.revoke_security_group_ingress(
CidrIp=_get_ip(anywhere),
FromPort=port,
ToPort=port,
GroupId=group_id,
IpProtocol=protocol
)
|
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
def PrintTree(self):
if self.left:
self.left.PrintTree()
print(self.data),
if self.right:
self.right.PrintTree()
def findval(self, lkpval):
if lkpval < self.data:
if self.left is None:
return str(lkpval)+"Not Found"
return self.left.findval(lkpval)
elif lkpval > self.data:
if self.right is None:
return str(lkpval)+"Not Found"
return self.right.findval(lkpval)
else:
print(str(self.data)+" is Found")
def inorderTraversal(self, root):
res = []
if root:
res = self.inorderTraversal(root.left)
res.append(root.data)
res = res + self.inorderTraversal(root.right)
return res
def PreorderTraversal(self, root):
res = []
if root:
res.append(root.data)
res = res + self.PreorderTraversal(root.left)
res = res + self.PreorderTraversal(root.right)
return res
def PostorderTrraversal(self, root):
res = []
if root:
res = self.PostorderTrraversal(root.left)
res = res+self.PostorderTrraversal(root.right)
res.append(root.data)
return res
def goNode(self, node):
go = node
while(go.left is not None):
go = go.left
return go
def delete(self, data):
if self is None:
return None
if data < self.data:
self.left = self.left.delete(data)
elif data > self.data:
self.right = self.right.delete(data)
else:
if self.left is None:
data1 = self.right
self = None
return data1
elif self.right is None:
data1 = self.left
self = None
print(data1)
return data1
data1 = self.goNode(self.right)
self.data = data1.data
self.right = self.right.delete(data1.data)
return self
root = Node(100)
root.insert(50)
root.insert(60)
root.insert(30)
root.insert(150)
root.insert(120)
root.delete(150)
print(root.inorderTraversal(root))
# print(root.PreorderTraversal(root))
# print(root.PostorderTrraversal(root))
# for i in range(51):
# root.insert(i)
root.PrintTree()
# print(root.findval(56))
|
import xadmin as admin
from teaman.tea import models
class SupplierAdmin(object):
list_display=('name','tel')
class ProductTypeAdmin(object):
list_display=('name',)
class ProductAdmin(object):
list_display=('name','supplier','status','price','pub_date','year','type','last_modify')
list_filter=('status','type','supplier')
class ProductImageAdmin(object):
list_display=('img','product')
class ProductTemplateAdmin(object):
pass
admin.site.register(models.Supplier,SupplierAdmin)
admin.site.register(models.ProductType,ProductTypeAdmin)
admin.site.register(models.Product,ProductAdmin)
admin.site.register(models.ProductImage,ProductImageAdmin)
admin.site.register(models.ProductTemplate,ProductTemplateAdmin)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import re
from dataclasses import dataclass
from typing import Iterable
from urllib.parse import quote_plus as url_quote_plus
from pants.engine.collection import DeduplicatedCollection
from pants.engine.target import Target
from pants.jvm.target_types import (
JvmArtifactArtifactField,
JvmArtifactExclusionsField,
JvmArtifactFieldSet,
JvmArtifactGroupField,
JvmArtifactJarSourceField,
JvmArtifactUrlField,
JvmArtifactVersionField,
)
from pants.util.ordered_set import FrozenOrderedSet
class InvalidCoordinateString(Exception):
"""The coordinate string being passed is invalid or malformed."""
def __init__(self, coords: str) -> None:
super().__init__(f"Received invalid artifact coordinates: {coords}")
@dataclass(frozen=True, order=True)
class Coordinate:
"""A single Maven-style coordinate for a JVM dependency.
Coursier uses at least two string serializations of coordinates:
1. A format that is accepted by the Coursier CLI which uses trailing attributes to specify
optional fields like `packaging`/`type`, `classifier`, `url`, etc. See `to_coord_arg_str`.
2. A format in the JSON report, which uses token counts to specify optional fields. We
additionally use this format in our own lockfile. See `to_coord_str` and `from_coord_str`.
"""
REGEX = re.compile("([^: ]+):([^: ]+)(:([^: ]*)(:([^: ]+))?)?:([^: ]+)")
group: str
artifact: str
version: str
packaging: str = "jar"
classifier: str | None = None
# True to enforce that the exact declared version of a coordinate is fetched, rather than
# allowing dependency resolution to adjust the version when conflicts occur.
strict: bool = True
@staticmethod
def from_json_dict(data: dict) -> Coordinate:
return Coordinate(
group=data["group"],
artifact=data["artifact"],
version=data["version"],
packaging=data.get("packaging", "jar"),
classifier=data.get("classifier", None),
)
def to_json_dict(self) -> dict:
ret = {
"group": self.group,
"artifact": self.artifact,
"version": self.version,
"packaging": self.packaging,
"classifier": self.classifier,
}
return ret
@classmethod
def from_coord_str(cls, s: str) -> Coordinate:
"""Parses from a coordinate string with optional `packaging` and `classifier` coordinates.
See the classdoc for more information on the format.
Using Aether's implementation as reference
http://www.javased.com/index.php?source_dir=aether-core/aether-api/src/main/java/org/eclipse/aether/artifact/DefaultArtifact.java
${organisation}:${artifact}[:${packaging}[:${classifier}]]:${version}
See also: `to_coord_str`.
"""
parts = Coordinate.REGEX.match(s)
if parts is not None:
packaging_part = parts.group(4)
return cls(
group=parts.group(1),
artifact=parts.group(2),
packaging=packaging_part if packaging_part is not None else "jar",
classifier=parts.group(6),
version=parts.group(7),
)
else:
raise InvalidCoordinateString(s)
def as_requirement(self) -> ArtifactRequirement:
"""Creates a `RequirementCoordinate` from a `Coordinate`."""
return ArtifactRequirement(coordinate=self)
def to_coord_str(self, versioned: bool = True) -> str:
"""Renders the coordinate in Coursier's JSON-report format, which does not use attributes.
See also: `from_coord_str`.
"""
unversioned = f"{self.group}:{self.artifact}"
if self.classifier is not None:
unversioned += f":{self.packaging}:{self.classifier}"
elif self.packaging != "jar":
unversioned += f":{self.packaging}"
version_suffix = ""
if versioned:
version_suffix = f":{self.version}"
return f"{unversioned}{version_suffix}"
def to_coord_arg_str(self, extra_attrs: dict[str, str] | None = None) -> str:
"""Renders the coordinate in Coursier's CLI input format.
The CLI input format uses trailing key-val attributes to specify `packaging`, `url`, etc.
See https://github.com/coursier/coursier/blob/b5d5429a909426f4465a9599d25c678189a54549/modules/coursier/shared/src/test/scala/coursier/parse/DependencyParserTests.scala#L7
"""
attrs = dict(extra_attrs or {})
if self.packaging != "jar":
# NB: Coursier refers to `packaging` as `type` internally.
attrs["type"] = self.packaging
if self.classifier:
attrs["classifier"] = self.classifier
attrs_sep_str = "," if attrs else ""
attrs_str = ",".join((f"{k}={v}" for k, v in attrs.items()))
return f"{self.group}:{self.artifact}:{self.version}{attrs_sep_str}{attrs_str}"
class Coordinates(DeduplicatedCollection[Coordinate]):
"""An ordered list of `Coordinate`s."""
@dataclass(frozen=True)
class ArtifactRequirement:
"""A single Maven-style coordinate for a JVM dependency, along with information of how to fetch
the dependency if it is not to be fetched from a Maven repository."""
coordinate: Coordinate
url: str | None = None
jar: JvmArtifactJarSourceField | None = None
excludes: frozenset[str] | None = None
@classmethod
def from_jvm_artifact_target(cls, target: Target) -> ArtifactRequirement:
if not JvmArtifactFieldSet.is_applicable(target):
raise AssertionError(
"`ArtifactRequirement.from_jvm_artifact_target()` only works on targets with "
"`JvmArtifactFieldSet` fields present."
)
exclusions = target[JvmArtifactExclusionsField].value or ()
return ArtifactRequirement(
coordinate=Coordinate(
group=target[JvmArtifactGroupField].value,
artifact=target[JvmArtifactArtifactField].value,
version=target[JvmArtifactVersionField].value,
),
url=target[JvmArtifactUrlField].value,
jar=(
target[JvmArtifactJarSourceField]
if target[JvmArtifactJarSourceField].value
else None
),
excludes=frozenset([*(exclusion.to_coord_str() for exclusion in exclusions)]) or None,
)
def with_extra_excludes(self, *excludes: str) -> ArtifactRequirement:
"""Creates a copy of this `ArtifactRequirement` with `excludes` provided.
Mostly useful for testing (`Coordinate(...).as_requirement().with_extra_excludes(...)`).
"""
return dataclasses.replace(
self, excludes=self.excludes.union(excludes) if self.excludes else frozenset(excludes)
)
def to_coord_arg_str(self) -> str:
return self.coordinate.to_coord_arg_str(
{"url": url_quote_plus(self.url)} if self.url else {}
)
def to_metadata_str(self) -> str:
attrs = {
"url": self.url or "not_provided",
"jar": self.jar.address.spec if self.jar else "not_provided",
}
if self.excludes:
attrs["excludes"] = ",".join(sorted(self.excludes))
return self.coordinate.to_coord_arg_str(attrs)
# TODO: Consider whether to carry classpath scope in some fashion via ArtifactRequirements.
class ArtifactRequirements(DeduplicatedCollection[ArtifactRequirement]):
"""An ordered list of Coordinates used as requirements."""
@classmethod
def from_coordinates(cls, coordinates: Iterable[Coordinate]) -> ArtifactRequirements:
return ArtifactRequirements(coord.as_requirement() for coord in coordinates)
@dataclass(frozen=True)
class GatherJvmCoordinatesRequest:
"""A request to turn strings of coordinates (`group:artifact:version`) and/or addresses to
`jvm_artifact` targets into `ArtifactRequirements`."""
artifact_inputs: FrozenOrderedSet[str]
option_name: str
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
def findmax_depth(root):
if not root:
return 0
else:
return 1 + max(findmax_depth(root.left), findmax_depth(root.right))
ans = findmax_depth(root)
print ans
|
import requests
from bs4 import BeautifulSoup
import pymysql
kc_list = [
'hEX Lite',
'hEX',
'RB2011iL-RM',
'RB2011UiAS-RM',
'CCR1009-7G-1C-1S+',
'CCR1016-12G',
'CCR1016-12S-1S+',
'CCR1036-12G-4S',
'CCR1036-8G-2S+',
'CCR1072-1G-8S+',
'RB450',
'RB450G',
'CRS326-24G-2S+RM',
'CRS317-1G-16S+RM',
'wAP',
]
html = requests.get('https://mikrotik.com/products/matrix').text
soup = BeautifulSoup(html,'html.parser')
description_list = []
product_list = []
# column값으로 사용될 부분
for th_tag in soup.select('thead th'):
description_list.append('`'+th_tag.text+'`')
# row값으로 사용될 부분
for tr_tag in soup.select('tbody tr'):
product_name = tr_tag.find('a').text
if product_name not in kc_list:
continue
detail_list=[]
for td_tag in tr_tag.select('td'):
# if td_tag.text == '' or 'No': 이렇게 하면 'No'는 bool True니 참으로 값이 되더라
if td_tag.text == '' or td_tag.text == 'No' or td_tag.text == '0' or td_tag.text=='None':
detail_list.append('0')
else:
detail_list.append(td_tag.text)
product_list.append(dict(zip(description_list, detail_list)))
sql_column = ','.join(description_list)
sql_row=[]
for product in product_list:
value_list=[]
for key, value in product.items():
try:
value = int(value)
except:
try:
value = float(value)
except:
pass
value_list.append(value)
print(key, ':', value, type(value))
sql_row.append(value_list)
var_string = ','.join(['%s' for i in range(len(sql_row[0]))])
conn = pymysql.connect(host='192.168.42.71', user='root', password='tjrwn12',db='mikrotik')
curs = conn.cursor()
sql = "INSERT INTO spec(%s) VALUES (%s);" % (sql_column, var_string)
curs.executemany(sql, sql_row)
conn.commit()
conn.close()
# for i in product_list:
# del_key_list = ['CPU', 'Suggested price (USD)']
# del_key_list += [value_is_none for value_is_none in i.keys() if i[value_is_none] == 'None']
# for del_key in del_key_list:
# print(i['Product name'] ,del_key,'will be removed')
# try:
# del i[del_key]
# except:
# pass
# print('=========valuable keys========')
# print(i['Product name'])
# for j,k in i.items():
# print(j,':',k, type(k))
# print('\n')
|
msg=input("Enter a string:")
z=msg.split()
print(z)
i=0
for x in z:
m=x.split()
for y in m:
if y in "aeiou":
del x[i]
i+=1
print(msg)
|
#!/usr/bin/python
import os
import sys
from logic.parking import Parking
class Commands(object):
"""
class to handle command line operations
"""
def __init__(self):
self.parking = Parking()
@staticmethod
def script_usage():
"""
script usage
:return:
"""
print("""
Usage(commands):
create_parking_lot <number>
park <car number> <colour>
leave <parked_number>
status
registration_numbers_for_cars_with_colour <colour>
slot_numbers_for_cars_with_colour <colour>
slot_number_for_registration_number <car_number>
""")
def process_file(self, command_file):
"""
read input command file
:param command_file: filepath
:return:
"""
if not os.path.exists(command_file):
print("Given file {} does not exist".format(command_file))
try:
with open(command_file, 'r') as file_obj:
for line in file_obj:
if line != '\n':
self.process_command(line)
except Exception as ex:
print("Error occurred while processing input file {}".format(ex))
def process_input(self):
print("Interactive mode:")
self.script_usage()
try:
while True:
stdin_input = input("Enter command: ")
self.process_command(stdin_input)
except (KeyboardInterrupt, SystemExit):
return
except Exception as ex:
print("Error occured while processing input {}".format(ex))
def process_command(self, stdin_input):
inputs = stdin_input.split()
command = inputs[0]
params = inputs[1:]
if hasattr(self.parking, command):
command_function = getattr(self.parking, command)
command_function(*params)
else:
print("Got wrong command.")
if __name__ == "__main__":
# main script to call backend script
args = sys.argv
if len(args) == 1:
p_command = Commands()
p_command.process_input()
elif len(args) == 2:
p_command = Commands()
p_command.process_file(args[1])
else:
print("""
Wrong number of arguments.
Usage:
./parking_lot.py <filename> OR
./parking_lot.py
""")
|
#!/usr/bin/env python2
import __builtin__
import os
__builtin__.process = 'client'
# Temporary hack patch:
__builtin__.__dict__.update(__import__('pandac.PandaModules', fromlist = ['*']).__dict__)
from direct.extensions_native import HTTPChannel_extensions
from direct.extensions_native import Mat3_extensions
from direct.extensions_native import VBase3_extensions
from direct.extensions_native import VBase4_extensions
from direct.extensions_native import NodePath_extensions
from panda3d.core import loadPrcFile
# if __debug__:
# loadPrcFile('config/general.prc')
# loadPrcFile('config/release/dev.prc')
# else:
# config = niraidata.CONFIG
# config = aes.decrypt(config, key, iv)
config = """# Window settings:
window-title Project Altis [BETA 1.2.0]
win-origin -2 -2
icon-filename phase_3/etc/icon.ico
cursor-filename phase_3/etc/toonmono.cur
show-frame-rate-meter #f
# Altis Engine 3.0
want-vive #f
want-android #f
want-headless #f
want-live-updates #f
want-cuda #t
loader-num-threads 25
# Debug
default-directnotify-level info
notify-level-DistributedNPCScientistAI info
notify-level-DistributedPetAI info
want-pstats #f
# Audio:
audio-library-name p3fmod_audio
# Graphics:
aux-display pandagl
aux-display pandadx9
aux-display p3tinydisplay
# Models:
model-cache-models #f
model-cache-textures #f
default-model-extension .bam
# Performance
smooth-enable-prediction 1
smooth-enable-smoothing 1
smooth-lag 0.4
smooth-max-future 1.0
smooth-min-suggest-resync 0
average-frame-rate-interval 60.0
clock-frame-rate 60.0
# Preferences:
preferences-filename preferences.json
# Backups:
backups-filepath backups/
backups-extension .json
# Server:
server-timezone EST/EDT/-5
server-port 7198
account-bridge-filename astron/databases/account-bridge.db
# Performance:
sync-video #f
texture-power-2 none
gl-check-errors #f
garbage-collect-states #f
# Egg object types:
egg-object-type-barrier <Scalar> collide-mask { 0x01 } <Collide> { Polyset descend }
egg-object-type-trigger <Scalar> collide-mask { 0x01 } <Collide> { Polyset descend intangible }
egg-object-type-sphere <Scalar> collide-mask { 0x01 } <Collide> { Sphere descend }
egg-object-type-trigger-sphere <Scalar> collide-mask { 0x01 } <Collide> { Sphere descend intangible }
egg-object-type-floor <Scalar> collide-mask { 0x02 } <Collide> { Polyset descend }
egg-object-type-dupefloor <Scalar> collide-mask { 0x02 } <Collide> { Polyset keep descend }
egg-object-type-camera-collide <Scalar> collide-mask { 0x04 } <Collide> { Polyset descend }
egg-object-type-camera-collide-sphere <Scalar> collide-mask { 0x04 } <Collide> { Sphere descend }
egg-object-type-camera-barrier <Scalar> collide-mask { 0x05 } <Collide> { Polyset descend }
egg-object-type-camera-barrier-sphere <Scalar> collide-mask { 0x05 } <Collide> { Sphere descend }
egg-object-type-model <Model> { 1 }
egg-object-type-dcs <DCS> { 1 }
# Safe zones:
want-safe-zones #t
want-toontown-central #t
want-donalds-dock #t
want-daisys-garden #t
want-minnies-melodyland #t
want-the-burrrgh #t
want-donalds-dreamland #t
want-goofy-speedway #t
want-outdoor-zone #t
want-golf-zone #t
# Weather system
want-weather #f
# Options Page
change-display-settings #t
change-display-api #t
# Safe zone settings:
want-treasure-planners #t
want-suit-planners #t
want-butterflies #f
# Classic characters:
want-classic-chars #f
want-mickey #f
want-donald-dock #f
want-daisy #f
want-minnie #f
want-pluto #f
want-donald-dreamland #f
want-chip-and-dale #f
want-goofy #f
# Trolley minigames:
want-minigames #t
want-photo-game #f
want-travel-game #f
# Picnic table board games:
want-game-tables #f
# Cog Battles
base-xp-multiplier 5.0
# Cog headquarters:
want-cog-headquarters #t
want-sellbot-headquarters #t
want-cashbot-headquarters #t
want-lawbot-headquarters #t
want-bossbot-headquarters #t
# Cashbot boss:
want-resistance-toonup #f
want-resistance-restock #f
# Cog buildings:
want-cogbuildings #t
# Optional:
show-total-population #f
want-mat-all-tailors #t
want-long-pattern-game #f
show-population #t
show-total-population #t
# Animated Props
zero-pause-mult 1.0
# Interactive Props
randomize-interactive-idles #t
interactive-prop-random-idles #t
interactive-prop-info #f
props-buff-battles #t
prop-idle-pause-time 0.0
# Events
want-charity-screen #t
# Developer options:
want-dev #f
want-pstats #f
want-directtools #f
want-tk #f
# Holidays
active-holidays 64, 65, 66 #128, 116, 63
# Temporary:
want-old-fireworks #t
# Live updates:
want-live-updates #t
# Server:
server-version TTPA-Beta-1.2.0
shard-low-pop 50
shard-mid-pop 80
# Core features:
want-pets #t
want-parties #f
want-cogdominiums #t
want-achievements #t
# Chat:
want-whitelist #t
# Cashbot boss:
want-resistance-toonup #t
want-resistance-restock #t
# Developer options:
want-dev #f
"""
production_model_path = '/'
if(os.environ.get('model-path', production_model_path) == production_model_path):
config += '\nmodel-path ' + production_model_path
else:
config += '\nmodel-path ' + os.environ.get('model-path', production_model_path)
import sys
from panda3d.core import *
import StringIO
io = StringIO.StringIO(config)
vfs = VirtualFileSystem.getGlobalPtr()
import glob
for line in io.readlines():
# check if the current line is a comment...
if line.startswith('#'):
continue
# print line
# load the prc file value
loadPrcFileData('', line)
del config
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.settings.Settings import Settings
notify = directNotify.newCategory('AltisClient')
notify.setInfo(True)
preferencesFilename = ConfigVariableString(
'preferences-filename', 'preferences.json').getValue()
notify.info('Reading %s...' % preferencesFilename)
__builtin__.settings = Settings(preferencesFilename)
from toontown.settings import ToontownSettings
__builtin__.ttsettings = ToontownSettings
for setting in ttsettings.DefaultSettings:
if setting not in settings:
settings[setting] = ttsettings.DefaultSettings[setting]
loadPrcFileData('Settings: res', 'win-size %d %d' % tuple(settings.get('res', (1280, 720))))
loadPrcFileData('Settings: fullscreen', 'fullscreen %s' % settings['fullscreen'])
loadPrcFileData('Settings: music', 'audio-music-active %s' % settings['music'])
loadPrcFileData('Settings: sfx', 'audio-sfx-active %s' % settings['sfx'])
loadPrcFileData('Settings: musicVol', 'audio-master-music-volume %s' % settings['musicVol'])
loadPrcFileData('Settings: sfxVol', 'audio-master-sfx-volume %s' % settings['sfxVol'])
loadPrcFileData('Settings: loadDisplay', 'load-display %s' % settings['loadDisplay'])
loadPrcFileData('Settings: toonChatSounds', 'toon-chat-sounds %s' % settings['toonChatSounds'])
loadPrcFileData('', 'texture-anisotropic-degree %d' % settings['anisotropic-filtering'])
loadPrcFileData('', 'framebuffer-multisample %s' % settings['anti-aliasing'])
loadPrcFileData('', 'sync-video %s' % settings['vertical-sync'])
vfs = VirtualFileSystem.getGlobalPtr()
DefaultPhases = (3, 3.5, 4, 5, 5.5, 6, 7, 8, 9, 10, 11, 12, 13)
import glob
notify.info("Loading Default Pack...")
for file in glob.glob('resources/default/*.mf'):
if float(file.replace('.mf', '').replace('resources/default\phase_', '')) in DefaultPhases:
mf = Multifile()
mf.openReadWrite(Filename(file))
names = mf.getSubfileNames()
vfs.mount(mf, Filename('/'), 0)
notify.info('Successfully Mounted:' + file)
notify.info("Default Pack Loaded!")
from toontown.toonbase.ContentPackManager import ContentPackManager
__builtin__.ContentPackMgr = ContentPackManager()
ContentPackMgr.loadAll()
loadDisplay = settings.get('loadDisplay', 'pandagl')
loadPrcFileData('', 'load-display %s' % settings['loadDisplay'])
import os
import time
import sys
import random
import __builtin__
try:
from toontown.launcher.TTALauncher import TTALauncher
launcher = TTALauncher()
__builtin__.launcher = launcher
except Exception as e:
raise (e)
if launcher.isDummy():
http = HTTPClient()
else:
http = launcher.http
from toontown.toonbase import ToontownGlobals
tempLoader = Loader()
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import *
from toontown.pgui import DirectGuiGlobals as PGUIGlobals
DirectGuiGlobals.setDefaultFontFunc(ToontownGlobals.getInterfaceFont)
PGUIGlobals.setDefaultFontFunc(ToontownGlobals.getInterfaceFont)
launcher.setPandaErrorCode(7)
notify.info('Loading AltisBase...')
from toontown.toonbase import ToonBase
ToonBase.ToonBase()
from panda3d.core import *
if base.win is None:
notify.error('Unable to open window; aborting.')
launcher.setPandaErrorCode(0)
launcher.setPandaWindowOpen()
ConfigVariableDouble('decompressor-step-time').setValue(0.01)
ConfigVariableDouble('extractor-step-time').setValue(0.01)
backgroundNode = tempLoader.loadSync(Filename('phase_3/models/gui/loading-background'))
backgroundNodePath = aspect2d.attachNewNode(backgroundNode, 0)
backgroundNodePath.setPos(0.0, 0.0, 0.0)
backgroundNodePath.setScale(render2d, VBase3(1))
backgroundNodePath.find('**/fg').hide()
logo = OnscreenImage(
image = 'phase_3/maps/toontown-logo.png',
scale = (1 / (4.0 / 3.0), 1, 1 / (4.0 / 3.0)),
pos = backgroundNodePath.find('**/fg').getPos())
logo.setTransparency(TransparencyAttrib.MAlpha)
logo.setBin('fixed', 20)
logo.reparentTo(backgroundNodePath)
backgroundNodePath.find('**/bg').setBin('fixed', 10)
base.graphicsEngine.renderFrame()
DirectGuiGlobals.setDefaultRolloverSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
DirectGuiGlobals.setDefaultClickSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
DirectGuiGlobals.setDefaultDialogGeom(loader.loadModel('phase_3/models/gui/dialog_box_gui'))
PGUIGlobals.setDefaultRolloverSound(base.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
PGUIGlobals.setDefaultClickSound(base.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
PGUIGlobals.setDefaultDialogGeom(loader.loadModel('phase_3/models/gui/dialog_box_gui'))
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
OTPGlobals.setDefaultProductPrefix(TTLocalizer.ProductPrefix)
# For Devs only. (The below)
'''from direct.stdpy import threading, thread
def __inject_wx(_):
code = textbox.GetValue()
exec (code, globals())
def openInjector_wx():
import wx
app = wx.App(redirect = False)
frame = wx.Frame(None, title = "TTPA Dev Injector", size=(640, 400), style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX)
panel = wx.Panel(frame)
button = wx.Button(parent = panel, id = -1, label = "Inject", size = (50, 20), pos = (295, 0))
global textbox
textbox = wx.TextCtrl(parent = panel, id = -1, pos = (20, 22), size = (600, 340), style = wx.TE_MULTILINE)
frame.Bind(wx.EVT_BUTTON, __inject_wx, button)
frame.Show()
app.SetTopWindow(frame)
textbox.AppendText(" ")
threading.Thread(target = app.MainLoop).start()
openInjector_wx()'''
if base.musicManagerIsValid:
music = base.loader.loadMusic('phase_3/audio/bgm/tt_theme.ogg')
if music:
music.setLoop(1)
music.setVolume(0.9)
music.play()
notify.info('Loading the default GUI sounds...')
DirectGuiGlobals.setDefaultRolloverSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
DirectGuiGlobals.setDefaultClickSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
else:
music = None
from toontown.toonbase import ToontownLoader
from direct.gui.DirectGui import *
serverVersion = base.config.GetString('server-version', 'no_version_set')
'''
Let's have these here so you can tell if dev or debug mode is enabled or not
easily.
'''
if __dev__:
serverVersionText = serverVersion + "-dev"
elif __debug__:
serverVersionText = serverVersion + "-debug"
else:
serverVersionText = serverVersion
version = OnscreenText(serverVersionText, pos = (-1.3, -0.975), scale = 0.06, fg = Vec4(0, 0, 0, 1), align = TextNode.ALeft)
version.setPos(0.03, 0.03)
version.reparentTo(base.a2dBottomLeft)
from toontown.suit import Suit
Suit.loadModels()
loader.beginBulkLoad('init', TTLocalizer.LoaderLabel, 138, 0, TTLocalizer.TIP_NONE, 0)
from toontown.toonbase.ToonBaseGlobal import *
from direct.showbase.MessengerGlobal import *
from toontown.distributed import ToontownClientRepository
cr = ToontownClientRepository.ToontownClientRepository(serverVersion, launcher)
cr.music = music
del music
base.initNametagGlobals()
base.cr = cr
loader.endBulkLoad('init')
from otp.friends import FriendManager
from otp.distributed.OtpDoGlobals import *
cr.generateGlobalObject(OTP_DO_ID_FRIEND_MANAGER, 'FriendManager')
if not launcher.isDummy():
base.startShow(cr, launcher.getGameServer())
else:
base.startShow(cr)
backgroundNodePath.reparentTo(hidden)
backgroundNodePath.removeNode()
del backgroundNodePath
del backgroundNode
del tempLoader
version.cleanup()
del version
base.loader = base.loader
__builtin__.loader = base.loader
autoRun = ConfigVariableBool('toontown-auto-run', 1)
base.run()
class LogAndOutput:
def __init__(self, orig, log):
self.orig = orig
self.log = log
def write(self, str):
self.log.write(str)
self.log.flush()
self.orig.write(str)
self.orig.flush()
def flush(self):
self.log.flush()
self.orig.flush()
class TTILauncher(LauncherBase):
notify = DirectNotifyGlobal.directNotify.newCategory('ToontownDummyLauncher')
def __init__(self):
self.http = HTTPClient()
self.logPrefix = 'toontown-'
ltime = 1 and time.localtime()
logSuffix = '%02d%02d%02d_%02d%02d%02d' % (ltime[0] - 2000, ltime[1], ltime[2],
ltime[3], ltime[4], ltime[5])
if not os.path.exists('logs/'):
os.mkdir('logs/')
self.notify.info('Made new directory to save logs.')
logfile = os.path.join('logs', self.logPrefix + logSuffix + '.log')
log = open(logfile, 'a')
logOut = LogAndOutput(sys.stdout, log)
logErr = LogAndOutput(sys.stderr, log)
sys.stdout = logOut
sys.stderr = logErr
def getPlayToken(self):
# This is a temporary fix for dealing with getting the same play-
# cookie on both Windows and Linux platforms.
if __debug__:
token = self.getValue('TTI_PLAYCOOKIE')
else:
try:
token = sys.argv[1]
except:
token = ''
return token
print ('WARNING Something went wrong, using NULL cookie.')
return token
def getGameServer(self):
return self.getValue('TTI_GAMESERVER')
def setPandaErrorCode(self, code):
pass
def getGame2Done(self):
return True
def getLogFileName(self):
return 'toontown'
def getValue(self, key, default = None):
return os.environ.get(key, default)
def setValue(self, key, value):
os.environ[key] = str(value)
def getVerifyFiles(self):
return config.GetInt('launcher-verify', 0)
def getTestServerFlag(self):
return self.getValue('IS_TEST_SERVER', 0)
def isDownloadComplete(self):
return 1
def isTestServer(self):
return 0
def getPhaseComplete(self, phase):
return 1
def startGame(self):
self.newTaskManager()
eventMgr.restart()
from toontown.toonbase import ToontownStart
class GameStarter():
def __init__(self):
# Start game internal globals
if CURRENT_PLATFORM == 'Linux':
self.python_path = PYTHON_PATH
else:
self.python_path = '"'+ CURRENT_PATH + PYTHON_PATH + '"'
def launchGame(self):
# This still needs work but I guess this is slightly better
cookie = str((self.uiCallback.uName + self.uiCallback.pWord))
### Game starting commands ###
if CURRENT_PLATFORM == 'Linux':
cmd_00 = 'export ttiUsername=' + self.uiCallback.uName + ' && export ttiPassword=' + self.uiCallback.pWord + ' && export TTI_GAMESERVER=' + GAME_SERVER + ' && cd src/ && ' + self.python_path + ' -O -m toontown.toonbase.ClientStart ' + cookie
else:
cmd_00 = 'set ttiUsername=' + self.uiCallback.uName + ' && set ttiPassword=' + self.uiCallback.pWord + ' && set TTI_GAMESERVER=' + GAME_SERVER + ' && cd "src" && ' + self.python_path + ' -O -m toontown.toonbase.ClientStart ' + cookie
# Before we run the command lets set the username variables to null
self.uiCallback.uName = False
self.uiCallback.pWord = False
cookie = False
subprocess.call(cmd_00, shell=True)
return
|
# Generated by Django 2.1.7 on 2019-03-09 11:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('milliard', '0003_auto_20190309_1422'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='milliard.Question', unique=True),
),
]
|
#!/usr/bin/python3
def printinfo( arg1, *vartuple ):
print("output: ")
print(arg1)
for var in vartuple:
print(var)
return
# call printinfo method
printinfo(10)
printinfo(70, 60, 50)
|
arr = [16,10,3,20,11]
print(arr.index(min(arr)))
print(arr.index(max(arr)))
|
from django.apps import AppConfig
class TusomeConfig(AppConfig):
name = 'tusome'
|
import unittest
from katas.beta.kontti_language import kontti
class KonttiTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(kontti('lamppu'), 'komppu-lantti')
def test_equals_2(self):
self.assertEqual(kontti('lamppu sofia'), 'komppu-lantti kofia-sontti')
def test_equals_3(self):
self.assertEqual(kontti('silly game'), 'kolly-sintti kome-gantti')
def test_equals_4(self):
self.assertEqual(kontti('aeiou'), 'koeiou-antti')
def test_equals_5(self):
self.assertEqual(kontti('xyz lamppu'), 'koz-xyntti komppu-lantti')
def test_equals_6(self):
self.assertEqual(kontti(''), '')
def test_equals_7(self):
self.assertEqual(kontti('lAmppU'), 'komppU-lAntti')
def test_equals_8(self):
self.assertEqual(kontti('silly grrr'), 'kolly-sintti grrr')
|
import random
import string
import pyperclip
def generate(Number: int, Symbols: str):
return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits + Symbols) for _ in range(Number))
def copy(Variable: vars):
pyperclip.copy(Variable)
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
import numpy as np
from fastai.vision import *
from fastai.vision.image import *
__all__ = ['RandomErasing']
def _random_erasing(x, probability=0.5, sl=0.02, sh=0.4, r1=0.3,
mean=(np.array(imagenet_stats[1]) + 1) * imagenet_stats[0]):
if random.uniform(0, 1) > probability:
return x
for attempt in range(100):
area = x.size()[1] * x.size()[2]
target_area = random.uniform(sl, sh) * area
aspect_ratio = random.uniform(r1, 1 / r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < x.size()[2] and h < x.size()[1]:
x1 = random.randint(0, x.size()[1] - h)
y1 = random.randint(0, x.size()[2] - w)
if x.size()[0] == 3:
x[0, x1:x1 + h, y1:y1 + w] = mean[0]
x[1, x1:x1 + h, y1:y1 + w] = mean[1]
x[2, x1:x1 + h, y1:y1 + w] = mean[2]
else:
x[0, x1:x1 + h, y1:y1 + w] = mean[0]
return x
RandomErasing = TfmPixel(_random_erasing)
|
"""Top-level package for SALT API Server."""
__author__ = """SALT Astronomy"""
__version__ = "0.1.0"
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, current_app
from flask_sqlalchemy import SQLAlchemy
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from config import config
def log_exception(e):
"""
Log an exception to Flask's logger and Sentry.
Parameters
----------
e : Exception
The exception to log.
"""
current_app.logger.exception(e)
sentry_sdk.capture_exception(e)
db = SQLAlchemy()
from app.dataloader import (
ObservationLoader,
ObservingWindowLoader,
ProposalLoader,
BlockLoader,
InvestigatorLoader,
) # noqa E402
loaders = {
"proposal_loader": ProposalLoader(),
"observation_loader": ObservationLoader(),
"observing_window_loader": ObservingWindowLoader(),
"block_loader": BlockLoader(),
"investigator_loader": InvestigatorLoader(),
}
# these imports can only happen here as otherwise there might be import errors
from app.auth import verify_token # noqa E402
from app.main import main # noqa E402
from app.graphql import graphql # noqa E402
def create_app(config_name):
app = Flask("__name__")
app.config.from_object(config[config_name])
db.init_app(app)
# logging to file
log_file_path = app.config["LOG_FILE_PATH"]
if not log_file_path:
raise Exception("The environment variable LOG_FILE_PATH is not defined")
handler = RotatingFileHandler(log_file_path, maxBytes=1000000, backupCount=10)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s in %(module)s: %(" "message)s"
)
handler.setFormatter(formatter)
app.logger.addHandler(handler)
# setting up Sentry
sentry_dsn = app.config["SENTRY_DSN"]
if not sentry_dsn:
app.logger.info(
"No value is defined for SENTRY_DSN. Have you defined an "
"environment variable with this name?"
)
sentry_sdk.init(dsn=sentry_dsn, integrations=[FlaskIntegration()])
app.register_blueprint(graphql)
app.register_blueprint(main)
app.before_request(verify_token)
return app
|
from rest_framework import serializers
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def validate(self, args):
email = args.get('email', None)
username = args.get('username', None)
if User.objects.filter(email=email).exists():
raise serializers.ValidationError({'email': ('email already exists')})
if User.objects.filter(username=username).exists():
raise serializers.ValidationError({'username': ('username already exists')})
return super().validate(args)
def create(self, validated_data):
return User.objects.create_user(**validated_data)
class ChangePasswordSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=True)
password2 = serializers.CharField(write_only=True, required=True)
old_password = serializers.CharField(write_only=True, required=True)
class Meta:
model = User
fields = ('old_password', 'password', 'password2')
def validate(self, attrs):
if attrs['password'] != attrs['password2']:
raise serializers.ValidationError({"password": "Password fields didn't match."})
return attrs
def validate_old_password(self, value):
user = self.context['request'].user
# user = self.context['request'].user.is_authenticated()
if not user.check_password(value):
raise serializers.ValidationError({"old_password": "Old password is not correct"})
return value
def update(self, instance, validated_data):
instance.set_password(validated_data['password'])
instance.save()
return instance
|
"""Tests for the 'the' plugin"""
import unittest
from test import _common
from beets import config
from beetsplug.the import ThePlugin, PATTERN_A, PATTERN_THE, FORMAT
class ThePluginTest(_common.TestCase):
def test_unthe_with_default_patterns(self):
self.assertEqual(ThePlugin().unthe('', PATTERN_THE), '')
self.assertEqual(ThePlugin().unthe('The Something', PATTERN_THE),
'Something, The')
self.assertEqual(ThePlugin().unthe('The The', PATTERN_THE),
'The, The')
self.assertEqual(ThePlugin().unthe('The The', PATTERN_THE),
'The, The')
self.assertEqual(ThePlugin().unthe('The The X', PATTERN_THE),
'The X, The')
self.assertEqual(ThePlugin().unthe('the The', PATTERN_THE),
'The, the')
self.assertEqual(ThePlugin().unthe('Protected The', PATTERN_THE),
'Protected The')
self.assertEqual(ThePlugin().unthe('A Boy', PATTERN_A),
'Boy, A')
self.assertEqual(ThePlugin().unthe('a girl', PATTERN_A),
'girl, a')
self.assertEqual(ThePlugin().unthe('An Apple', PATTERN_A),
'Apple, An')
self.assertEqual(ThePlugin().unthe('An A Thing', PATTERN_A),
'A Thing, An')
self.assertEqual(ThePlugin().unthe('the An Arse', PATTERN_A),
'the An Arse')
self.assertEqual(ThePlugin().unthe('TET - Travailleur', PATTERN_THE),
'TET - Travailleur')
def test_unthe_with_strip(self):
config['the']['strip'] = True
self.assertEqual(ThePlugin().unthe('The Something', PATTERN_THE),
'Something')
self.assertEqual(ThePlugin().unthe('An A', PATTERN_A), 'A')
def test_template_function_with_defaults(self):
ThePlugin().patterns = [PATTERN_THE, PATTERN_A]
self.assertEqual(ThePlugin().the_template_func('The The'),
'The, The')
self.assertEqual(ThePlugin().the_template_func('An A'), 'A, An')
def test_custom_pattern(self):
config['the']['patterns'] = ['^test\\s']
config['the']['format'] = FORMAT
self.assertEqual(ThePlugin().the_template_func('test passed'),
'passed, test')
def test_custom_format(self):
config['the']['patterns'] = [PATTERN_THE, PATTERN_A]
config['the']['format'] = '{1} ({0})'
self.assertEqual(ThePlugin().the_template_func('The A'), 'The (A)')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from flask import Flask, request, jsonify
from github import Github, GithubException
app = Flask(__name__)
@app.route("/", methods = ['POST'])
def createFiles():
PostData = request.json
templateAttr = PostData.get("struct")
g = Github(PostData.get("username"), PostData.get("password"))
user = g.get_user("Shreyasgujjar/example2")
repo = g.get_repo()
try:
for item in templateAttr:
print(item + " ")
if type(templateAttr.get(item)) is dict:
itemsInsideFolder = templateAttr.get(item)
for items in itemsInsideFolder:
if type(templateAttr.get(items)) is str:
repo.create_file(item+"/"+itemsInsideFolder.get(items), "test", "")
print("file " + itemsInsideFolder.get(items) + " is created")
print("------------------Folder is created-----------------------")
else:
repo.create_file(templateAttr.get(item), "test", "test")
print("file " + templateAttr.get(item) + " is created")
print("-"*10)
print("peace")
except GithubException as e:
return "There was some error creating the files " + str(e)
return "The necessary file is created"
if __name__ == '__main__':
app.run(debug = True)
|
import os
import subprocess
import logging
from math import floor
from itertools import repeat
from pymongo import MongoClient
from multiprocessing.dummy import Pool
from tqdm import tqdm
def main():
db = db_connect()
outdir = './'
threads = 4
process_list = []
for element in db.find():
process_list.append(element)
logging.info('sending the list to be processed')
if threads == 1:
for element in process_list[:10]:
segment(element, outdir)
else:
with Pool(threads) as pool:
with tqdm(total=len(process_list)) as pbar:
for i, _ in tqdm(enumerate(pool.imap(segment_all_star,
zip(process_list,
repeat(outdir))))):
pbar.update()
pass
def segment(element, outdir):
uri = element['value']['urls'][0][1]
outfile = element['Innerfield']['segment_path']
start = element['Innerfield']['start']
end = element['Innerfield']['end']
cut(uri, start, end, outfile)
def cut(uri, start, end, outfile):
try:
duration = end - start
except:
raise ValueError('start or end for segmentation is not given.')
outpath = os.path.dirname(outfile)
if not os.path.isdir(outpath):
try:
os.makedirs(outpath)
except FileExistsError:
# might be trying to mkdir simultaneously
pass
audio_tool = 'ffmpeg'
seek = floor(start)
seek_start = start - seek
filename = os.path.basename(uri)
basename, extension = filename.split('.')
# TODO check basename fits the outfile
if outfile.find(basename) == -1:
print('basename does not fit the outfile\n %s vs %s'\
%(basename, outfile))
return False
args = [audio_tool, '-hide_banner', '-loglevel', 'panic',
'-ss', str(seek), '-i', uri, '-ss', \
str(seek_start), '-t', str(duration), '-ac', '1', '-ar', '16000', \
outfile]
if os.path.isfile(outfile):
logging.info("%s already exists skipping"%outfile)
return True
else:
logging.info('creating %s'%outfile)
logging.debug(' '.join(args))
subprocess.call(args)
if not os.path.isfile(outfile):
raise IOError("File not created from %s operation"
" %s"%(audio_tool, outfile))
return True
def segment_all_star(process_outdir):
return segment(*process_outdir)
def db_connect():
client = MongoClient('localhost',27017)
dbname = 'parlament'
colname = 'aggregate_v3'
db = client[dbname]
return db[colname]
if __name__ == "__main__":
logging_level = logging.INFO
log_file = 'clean.log'
logging.basicConfig(filename=log_file,
format="%(asctime)s-%(levelname)s: %(message)s",
level=logging_level,
filemode='w')
main()
|
def write_list_to_file(fname, lst):
fo = open(fname, "a+") #append
for w in lst:
fo.write("%s\n" % w)
fo.close()
def write_dict_to_file(fname, _dict):
fo = open(fname, "a+") #append
for k,v in _dict.items():
fo.write(" %s , %s\n " % (k , v))
fo.write("\n")
fo.close()
def print_dict(dct):
print "\n\nStarting to Print Dictionary \n\n\n"
for k,v in dct.items():
print k,v
def print_list(lst):
for l in lst:
print l
def print_dictionary(dic):
for key in dic:
print key, dic.get(key)
|
#! /usr/bin/env python3
import datetime, sys, glob, os, re, subprocess, numpy as np
# prints error to std error
def eprint( *args, **kwargs ):
print(*args, file = sys.stderr, **kwargs )
def collect_files(directory = './', filetype = '*', recursive = True, verbose = False):
if verbose == True:
print('\n\nCompiling files ...')
if type(filetype) == list:
filetypes = filetype.split()
else:
filetypes = [filetype]
directory = os.path.normpath(os.path.expanduser(directory)) + '/'
filelist = []
for filetype in filetypes:
if recursive == True:
filelist = filelist + (glob.glob(directory + "/**/*." + filetype, recursive = recursive))
elif recursive == False:
filelist = filelist + (glob.glob(directory + "/*." + filetype, recursive = recursive))
else:
raise TypeError
return filelist
def collect_folders( directory, path = True ):
get_folders = subprocess.run( 'ls ' + directory, shell = True, stdout = subprocess.PIPE )
folders = get_folders.stdout.decode( 'UTF-8' ).split( '\n' )
if path:
folders = [ os.path.abspath( directory ) + '/' + folder for folder in folders if folder != '' ]
else:
folders = [ folder for folder in folders if folder != '' ]
folders = [ folder for folder in folders if os.path.isdir(folder) ]
return folders
def dictSplit( Dict, factor ):
list_dict = []
keys_list = np.array_split( list(Dict.keys()), factor )
for keys in keys_list:
list_dict.append( {} )
for key in keys:
list_dict[-1][key] = Dict[key]
return list_dict
def intro(script_name,args_dict,credit='', log = False):
start_time = datetime.datetime.now()
date = start_time.strftime( '%Y%m%d' )
out_str = '\n' + script_name + '\n' + credit + '\nExecution began: ' + str(start_time)
# print('\n' + script_name)
# print(credit)
# print('\nExecution began:', start_time)
for arg in args_dict:
# print('{:<30}'.format(arg + ':') + str(args_dict[arg]))
out_str += '\n' + '{:<30}'.format(arg + ':') + str(args_dict[arg])
print( out_str )
if log:
out_file = os.path.abspath( log ) + '/' + date + '.log'
count = 0
while os.path.exists( out_file ):
count += 1
out_file = os.path.abspath( log ) + '/' + date + '_' + str(count) + '.log'
with open(out_file, 'w') as out:
out.write( out_str )
return start_time
def outro(start_time, log = False):
end_time = datetime.datetime.now()
duration = end_time - start_time
dur_min = duration.seconds/60
print('\n\nExecution finished:', str(end_time) + '\t' + '\n\t{:.2}'.format(dur_min), 'minutes')
# out_str = '\n\nExecution finished:', str(end_time) + '\t' + '\n\t{:.2}'.format(dur_min), 'minutes'
sys.exit(0)
def prep_output(output, mkdir=1, require_newdir=0, cd=1):
output = os.path.realpath(output)
if os.path.isdir(output):
if require_newdir == 1:
print('\n\nERROR: directory exists. Exit code 172')
sys.exit( 172 )
elif os.path.exists(output):
output = os.path.dirname(output)
else:
if mkdir == 0:
print('\n\nERROR: directory does not exist. Exit code 173')
sys.exit( 173 )
os.mkdir(output)
if cd == 1:
os.chdir(output)
return output
def file2list(file_, sep = '\n', col = None):
if sep not in ['\n', '\t', ',', ' ']:
print('\n\nERROR: invalid separator.\nExit code 229')
sys.exit( 229 )
with open(file_, 'r') as raw_data:
data = raw_data.read()
if col:
check = data.split('\n')
check_col = check[0].split(sep).index( col )
data_list = [ x.rstrip() for x[check_col] in data[1:].split(sep) ]
else:
data_list = data.split( sep = sep )
data_list = [ x.rstrip() for x in data_list if x != '' ]
return data_list
def check_cmd( cmd ):
exit_code = 0
cmd_check = subprocess.call( 'which ' + str(cmd), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
if cmd_check != 0:
exit_code = 1
return exit_code
|
import os
import sys
import subprocess
import shutil
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/families')
import fam
import experiments as exp
def uncompress(archive):
archive_name = os.path.basename(os.path.normpath(archive)).replace(".tar.gz", "")
datadir = fam.get_datadir(archive_name)
families_path = os.path.abspath(fam.get_datasets_family_path())
if (os.path.isdir(datadir)):
print(" [ERROR] " + datadir + " already exists. Abording!")
sys.exit(1)
print(" [INFO] Starting extracting the archive " + archive)
os.mkdir(datadir)
command = []
command.append("tar")
command.append("-xzf")
command.append(archive)
command.append("-C")
command.append(families_path)
subprocess.check_call(command)
print(" ".join(command))
if (not os.path.isdir(datadir)):
print(" [ERROR] Failed to extract the archive to " + datadir)
sys.exit(1)
print(" [INFO] Archive successfully extract into " + datadir)
def unarchive(archive):
if (not os.path.isfile(archive) or not archive.endswith(".tar.gz")):
print(" [ERROR] " + archive + " is not a valid archive")
sys.exit(1)
uncompress(archive)
if (__name__ == "__main__"):
if (len(sys.argv) != 2):
print("Syntax python " + os.path.basename(__file__) + " datadir")
sys.exit(1)
unarchive(sys.argv[1])
|
#!/usr/bin/python -tt
#
# Copyright (c) 2008, 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys, re
import fcntl
import struct
import termios
import rpm
from mic import msger
from .errors import CreatorError
from .proxy import get_proxy_for
import runner
from urlgrabber import grabber, __version__ as grabber_version
if rpm.labelCompare(grabber_version.split('.'), '3.9.0'.split('.')) == -1:
msger.warning("Version of python-urlgrabber is %s, lower than '3.9.0', "
"you may encounter some network issues" % grabber_version)
def myurlgrab(url, filename, proxies, progress_obj = None):
g = grabber.URLGrabber()
if progress_obj is None:
progress_obj = TextProgress()
if url.startswith("file:/"):
file = url.replace("file:", "")
if not os.path.exists(file):
raise CreatorError("URLGrabber error: can't find file %s" % file)
runner.show(['cp', "-f", file, filename])
else:
try:
filename = g.urlgrab(url = url, filename = filename,
ssl_verify_host = False, ssl_verify_peer = False,
proxies = proxies, http_headers = (('Pragma', 'no-cache'),),
quote = 0, progress_obj = progress_obj)
except grabber.URLGrabError, e:
raise CreatorError("URLGrabber error: %s" % url)
return filename
def terminal_width(fd=1):
""" Get the real terminal width """
try:
buf = 'abcdefgh'
buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf)
return struct.unpack('hhhh', buf)[1]
except: # IOError
return 80
def truncate_url(url, width):
return os.path.basename(url)[0:width]
class TextProgress(object):
# make the class as singleton
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(TextProgress, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, totalnum = None):
self.total = totalnum
self.counter = 1
def start(self, filename, url, *args, **kwargs):
self.url = url
self.termwidth = terminal_width()
msger.info("\r%-*s" % (self.termwidth, " "))
if self.total is None:
msger.info("\rRetrieving %s ..." % truncate_url(self.url, self.termwidth - 15))
else:
msger.info("\rRetrieving %s [%d/%d] ..." % (truncate_url(self.url, self.termwidth - 25), self.counter, self.total))
def update(self, *args):
pass
def end(self, *args):
if self.counter == self.total:
msger.raw("\n")
if self.total is not None:
self.counter += 1
class RPMInstallCallback:
""" Command line callback class for callbacks from the RPM library.
"""
def __init__(self, ts, output=1):
self.output = output
self.callbackfilehandles = {}
self.total_actions = 0
self.total_installed = 0
self.installed_pkg_names = []
self.total_removed = 0
self.mark = "+"
self.marks = 40
self.lastmsg = None
self.tsInfo = None # this needs to be set for anything else to work
self.ts = ts
self.filelog = False
self.logString = []
self.headmsg = "Installing"
def _dopkgtup(self, hdr):
tmpepoch = hdr['epoch']
if tmpepoch is None: epoch = '0'
else: epoch = str(tmpepoch)
return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release'])
def _makeHandle(self, hdr):
handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'],
hdr['release'], hdr['arch'])
return handle
def _localprint(self, msg):
if self.output:
msger.info(msg)
def _makefmt(self, percent, progress = True):
l = len(str(self.total_actions))
size = "%s.%s" % (l, l)
fmt_done = "[%" + size + "s/%" + size + "s]"
done = fmt_done % (self.total_installed + self.total_removed,
self.total_actions)
marks = self.marks - (2 * l)
width = "%s.%s" % (marks, marks)
fmt_bar = "%-" + width + "s"
if progress:
bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
fmt = "\r %-10.10s: %-20.20s " + bar + " " + done
else:
bar = fmt_bar % (self.mark * marks, )
fmt = " %-10.10s: %-20.20s " + bar + " " + done
return fmt
def _logPkgString(self, hdr):
"""return nice representation of the package for the log"""
(n,a,e,v,r) = self._dopkgtup(hdr)
if e == '0':
pkg = '%s.%s %s-%s' % (n, a, v, r)
else:
pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r)
return pkg
def callback(self, what, bytes, total, h, user):
if what == rpm.RPMCALLBACK_TRANS_START:
if bytes == 6:
self.total_actions = total
elif what == rpm.RPMCALLBACK_TRANS_PROGRESS:
pass
elif what == rpm.RPMCALLBACK_TRANS_STOP:
pass
elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
self.lastmsg = None
hdr = None
if h is not None:
try:
hdr, rpmloc = h
except:
rpmloc = h
hdr = readRpmHeader(self.ts, h)
handle = self._makeHandle(hdr)
fd = os.open(rpmloc, os.O_RDONLY)
self.callbackfilehandles[handle]=fd
self.total_installed += 1
self.installed_pkg_names.append(hdr['name'])
return fd
else:
self._localprint("No header - huh?")
elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
hdr = None
if h is not None:
try:
hdr, rpmloc = h
except:
rpmloc = h
hdr = readRpmHeader(self.ts, h)
handle = self._makeHandle(hdr)
os.close(self.callbackfilehandles[handle])
fd = 0
# log stuff
#pkgtup = self._dopkgtup(hdr)
self.logString.append(self._logPkgString(hdr))
elif what == rpm.RPMCALLBACK_INST_PROGRESS:
if h is not None:
percent = (self.total_installed*100L)/self.total_actions
if total > 0:
try:
hdr, rpmloc = h
except:
rpmloc = h
m = re.match("(.*)-(\d+.*)-(\d+\.\d+)\.(.+)\.rpm", os.path.basename(rpmloc))
if m:
pkgname = m.group(1)
else:
pkgname = os.path.basename(rpmloc)
if self.output:
fmt = self._makefmt(percent)
msg = fmt % (self.headmsg, pkgname)
if msg != self.lastmsg:
self.lastmsg = msg
msger.info(msg)
if self.total_installed == self.total_actions:
msger.raw('')
msger.verbose('\n'.join(self.logString))
elif what == rpm.RPMCALLBACK_UNINST_START:
pass
elif what == rpm.RPMCALLBACK_UNINST_PROGRESS:
pass
elif what == rpm.RPMCALLBACK_UNINST_STOP:
self.total_removed += 1
elif what == rpm.RPMCALLBACK_REPACKAGE_START:
pass
elif what == rpm.RPMCALLBACK_REPACKAGE_STOP:
pass
elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
pass
def readRpmHeader(ts, filename):
""" Read an rpm header. """
fd = os.open(filename, os.O_RDONLY)
h = ts.hdrFromFdno(fd)
os.close(fd)
return h
def splitFilename(filename):
""" Pass in a standard style rpm fullname
Return a name, version, release, epoch, arch, e.g.::
foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386
1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64
"""
if filename[-4:] == '.rpm':
filename = filename[:-4]
archIndex = filename.rfind('.')
arch = filename[archIndex+1:]
relIndex = filename[:archIndex].rfind('-')
rel = filename[relIndex+1:archIndex]
verIndex = filename[:relIndex].rfind('-')
ver = filename[verIndex+1:relIndex]
epochIndex = filename.find(':')
if epochIndex == -1:
epoch = ''
else:
epoch = filename[:epochIndex]
name = filename[epochIndex + 1:verIndex]
return name, ver, rel, epoch, arch
def getCanonX86Arch(arch):
#
if arch == "i586":
f = open("/proc/cpuinfo", "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("model name") and line.find("Geode(TM)") != -1:
return "geode"
return arch
# only athlon vs i686 isn't handled with uname currently
if arch != "i686":
return arch
# if we're i686 and AuthenticAMD, then we should be an athlon
f = open("/proc/cpuinfo", "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("vendor") and line.find("AuthenticAMD") != -1:
return "athlon"
# i686 doesn't guarantee cmov, but we depend on it
elif line.startswith("flags") and line.find("cmov") == -1:
return "i586"
return arch
def getCanonX86_64Arch(arch):
if arch != "x86_64":
return arch
vendor = None
f = open("/proc/cpuinfo", "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("vendor_id"):
vendor = line.split(':')[1]
break
if vendor is None:
return arch
if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:
return "amd64"
if vendor.find("GenuineIntel") != -1:
return "ia32e"
return arch
def getCanonArch():
arch = os.uname()[4]
if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):
return getCanonX86Arch(arch)
if arch == "x86_64":
return getCanonX86_64Arch(arch)
return arch
# Copy from libsatsolver:poolarch.c, with cleanup
archPolicies = {
"x86_64": "x86_64:i686:i586:i486:i386",
"i686": "i686:i586:i486:i386",
"i586": "i586:i486:i386",
"ia64": "ia64:i686:i586:i486:i386",
"armv7tnhl": "armv7tnhl:armv7thl:armv7nhl:armv7hl",
"armv7thl": "armv7thl:armv7hl",
"armv7nhl": "armv7nhl:armv7hl",
"armv7hl": "armv7hl",
"armv7l": "armv7l:armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv6l": "armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv5tejl": "armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv5tel": "armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv5l": "armv5l:armv4tl:armv4l:armv3l",
}
# dict mapping arch -> ( multicompat, best personality, biarch personality )
multilibArches = {
"x86_64": ( "athlon", "x86_64", "athlon" ),
}
# from yumUtils.py
arches = {
# ia32
"athlon": "i686",
"i686": "i586",
"geode": "i586",
"i586": "i486",
"i486": "i386",
"i386": "noarch",
# amd64
"x86_64": "athlon",
"amd64": "x86_64",
"ia32e": "x86_64",
# arm
"armv7tnhl": "armv7nhl",
"armv7nhl": "armv7hl",
"armv7hl": "noarch",
"armv7l": "armv6l",
"armv6l": "armv5tejl",
"armv5tejl": "armv5tel",
"armv5tel": "noarch",
#itanium
"ia64": "noarch",
}
def isMultiLibArch(arch=None):
"""returns true if arch is a multilib arch, false if not"""
if arch is None:
arch = getCanonArch()
if not arches.has_key(arch): # or we could check if it is noarch
return False
if multilibArches.has_key(arch):
return True
if multilibArches.has_key(arches[arch]):
return True
return False
def getBaseArch():
myarch = getCanonArch()
if not arches.has_key(myarch):
return myarch
if isMultiLibArch(arch=myarch):
if multilibArches.has_key(myarch):
return myarch
else:
return arches[myarch]
if arches.has_key(myarch):
basearch = myarch
value = arches[basearch]
while value != 'noarch':
basearch = value
value = arches[basearch]
return basearch
def checkRpmIntegrity(bin_rpm, package):
return runner.quiet([bin_rpm, "-K", "--nosignature", package])
def checkSig(ts, package):
""" Takes a transaction set and a package, check it's sigs,
return 0 if they are all fine
return 1 if the gpg key can't be found
return 2 if the header is in someway damaged
return 3 if the key is not trusted
return 4 if the pkg is not gpg or pgp signed
"""
value = 0
currentflags = ts.setVSFlags(0)
fdno = os.open(package, os.O_RDONLY)
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error, e:
if str(e) == "public key not availaiable":
value = 1
if str(e) == "public key not available":
value = 1
if str(e) == "public key not trusted":
value = 3
if str(e) == "error reading package header":
value = 2
else:
error, siginfo = getSigInfo(hdr)
if error == 101:
os.close(fdno)
del hdr
value = 4
else:
del hdr
try:
os.close(fdno)
except OSError:
pass
ts.setVSFlags(currentflags) # put things back like they were before
return value
def getSigInfo(hdr):
""" checks signature from an hdr hand back signature information and/or
an error code
"""
import locale
locale.setlocale(locale.LC_ALL, 'C')
string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|'
siginfo = hdr.sprintf(string)
if siginfo != '(none)':
error = 0
sigtype, sigdate, sigid = siginfo.split(',')
else:
error = 101
sigtype = 'MD5'
sigdate = 'None'
sigid = 'None'
infotuple = (sigtype, sigdate, sigid)
return error, infotuple
def checkRepositoryEULA(name, repo):
""" This function is to check the EULA file if provided.
return True: no EULA or accepted
return False: user declined the EULA
"""
import tempfile
import shutil
import urlparse
import urllib2 as u2
import httplib
from errors import CreatorError
def _check_and_download_url(u2opener, url, savepath):
try:
if u2opener:
f = u2opener.open(url)
else:
f = u2.urlopen(url)
except u2.HTTPError, httperror:
if httperror.code in (404, 503):
return None
else:
raise CreatorError(httperror)
except OSError, oserr:
if oserr.errno == 2:
return None
else:
raise CreatorError(oserr)
except IOError, oserr:
if hasattr(oserr, "reason") and oserr.reason.errno == 2:
return None
else:
raise CreatorError(oserr)
except u2.URLError, err:
raise CreatorError(err)
except httplib.HTTPException, e:
raise CreatorError(e)
# save to file
licf = open(savepath, "w")
licf.write(f.read())
licf.close()
f.close()
return savepath
def _pager_file(savepath):
if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
pagers = ('w3m', 'links', 'lynx', 'less', 'more')
else:
pagers = ('less', 'more')
file_showed = False
for pager in pagers:
cmd = "%s %s" % (pager, savepath)
try:
os.system(cmd)
except OSError:
continue
else:
file_showed = True
break
if not file_showed:
f = open(savepath)
msger.raw(f.read())
f.close()
msger.pause()
# when proxy needed, make urllib2 follow it
proxy = repo.proxy
proxy_username = repo.proxy_username
proxy_password = repo.proxy_password
if not proxy:
proxy = get_proxy_for(repo.baseurl[0])
handlers = []
auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
u2opener = None
if proxy:
if proxy_username:
proxy_netloc = urlparse.urlsplit(proxy).netloc
if proxy_password:
proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
else:
proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
else:
proxy_url = proxy
proxy_support = u2.ProxyHandler({'http': proxy_url,
'https': proxy_url,
'ftp': proxy_url})
handlers.append(proxy_support)
# download all remote files to one temp dir
baseurl = None
repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
for url in repo.baseurl:
tmphandlers = handlers[:]
(scheme, host, path, parm, query, frag) = urlparse.urlparse(url.rstrip('/') + '/')
if scheme not in ("http", "https", "ftp", "ftps", "file"):
raise CreatorError("Error: invalid url %s" % url)
if '@' in host:
try:
user_pass, host = host.split('@', 1)
if ':' in user_pass:
user, password = user_pass.split(':', 1)
except ValueError, e:
raise CreatorError('Bad URL: %s' % url)
msger.verbose("adding HTTP auth: %s, %s" %(user, password))
auth_handler.add_password(None, host, user, password)
tmphandlers.append(auth_handler)
url = scheme + "://" + host + path + parm + query + frag
if tmphandlers:
u2opener = u2.build_opener(*tmphandlers)
# try to download
repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
repo_eula_path = _check_and_download_url(
u2opener,
repo_eula_url,
os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
if repo_eula_path:
# found
baseurl = url
break
if not baseurl:
shutil.rmtree(repo_lic_dir) #cleanup
return True
# show the license file
msger.info('For the software packages in this yum repo:')
msger.info(' %s: %s' % (name, baseurl))
msger.info('There is an "End User License Agreement" file that need to be checked.')
msger.info('Please read the terms and conditions outlined in it and answer the followed qustions.')
msger.pause()
_pager_file(repo_eula_path)
# Asking for the "Accept/Decline"
if not msger.ask('Would you agree to the terms and conditions outlined in the above End User License Agreement?'):
msger.warning('Will not install pkgs from this repo.')
shutil.rmtree(repo_lic_dir) #cleanup
return False
# try to find support_info.html for extra infomation
repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
repo_info_path = _check_and_download_url(
u2opener,
repo_info_url,
os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
if repo_info_path:
msger.info('There is one more file in the repo for additional support information, please read it')
msger.pause()
_pager_file(repo_info_path)
#cleanup
shutil.rmtree(repo_lic_dir)
return True
|
import numpy as np
import re
import os
import gudhi as gd
import time
#import dionysus as d
#from scipy.spatial.distance import squareform
import sys
import csv
import argparse
import networkx as nx
def read_raw_HiC_data(file):
resolution=re.split('[_.]',os.path.basename(file).strip())[1]
if(resolution[-2:]=='kb'):
resolution=int(resolution[:-2])*1000
elif(resolution[-2:]=='mb'):
resolution=int(resolution[:-2])*1000000
print(resolution)
all_data_list=[]
with open(file,"r")as f:
for line in f:
line = line.strip()
line = re.sub(r'\s+',' ', line)
line = line.split(' ')
#print(line)
all_data_list.append([float(line[i]) for i in range(len(line))])
raw_data_matrix=np.array(all_data_list)
dim=int(max(raw_data_matrix[:,0].max(),raw_data_matrix[:,1].max())/resolution+1)
#print(dim)
data_frequency_matrix=np.zeros((dim,dim))
for x,y,freq in all_data_list:
data_frequency_matrix[int(x/resolution),int(y/resolution)]=freq
data_frequency_matrix[int(y/resolution),int(x/resolution)]=freq
#print(data_frequency_matrix)
return (resolution,data_frequency_matrix)
def read_raw_HiC_data_no_split(file,reso):
resolution=int(reso)
print('resolution: ',resolution)
all_data_list=[]
with open(file,"r")as f:
for line in f:
line = line.strip()
line = re.sub(r'\s+',' ', line)
line = line.split(' ')
#print(line)
all_data_list.append([float(line[i]) for i in range(len(line))])
raw_data_matrix=np.array(all_data_list)
temp_min=min(raw_data_matrix[:,0].min(),raw_data_matrix[:,1].min())
temp_max=max(raw_data_matrix[:,0].max(),raw_data_matrix[:,1].max())
dim=int((temp_max-temp_min)/resolution+1)
#print('Hi-C matrix size =',dim)
data_frequency_matrix=np.zeros((dim,dim))
for x,y,freq in all_data_list:
data_frequency_matrix[int((x-temp_min)/resolution),int((y-temp_min)/resolution)]=freq
data_frequency_matrix[int((y-temp_min)/resolution),int((x-temp_min)/resolution)]=freq
#print(data_frequency_matrix)
return (resolution,data_frequency_matrix,temp_min,temp_max)
def split_TAD(freq_matrix,TAD_result_file,resol):
TAD_matrix_list=[]
with open(TAD_result_file,"r")as f:
for line in f:
line = line.strip()
line = re.sub(r'\s+',' ', line)
line = line.split(' ')
#print([line[1],line[2]])
index_x=int(int(line[1])/resol)
index_y=int(int(line[2])/resol)+1
TAD_matrix_list.append(freq_matrix[index_x:index_y,index_x:index_y])
#print(freq_matrix[index_x:index_y,index_x:index_y])
print(freq_matrix[index_x:index_y,index_x:index_y].shape)
return TAD_matrix_list
def matrix_normalize(TAD_matrix_all):
distance_matrix_all=[]
for TAD_matrix in TAD_matrix_all:
TAD_matrix=np.log(TAD_matrix+1)
max_num = TAD_matrix.max()
#print(max_num)
TAD_matrix = TAD_matrix/(1.01*max_num)
#print(TAD_matrix.shape[0])
for i in range(TAD_matrix.shape[0]):
TAD_matrix[i,i]=1.0
#print(TAD_matrix)
distance_matrix=1-TAD_matrix
distance_matrix_all.append(distance_matrix)
#print(distance_matrix)
return distance_matrix_all
def TDA_func(distance_matrix, persfilename,skelfilename):
print('distance matrix size =',distance_matrix.shape)
rips_complex = gd.RipsComplex(distance_matrix=distance_matrix,max_edge_length=1.1)
simplex_tree = rips_complex.create_simplex_tree(max_dimension=2)
print('done creating simplex tree')
#filtration = simplex_tree.get_filtration()
skeleton = simplex_tree.get_skeleton(2)
writeSkeletonToFile(skeleton,skelfilename)
diag = simplex_tree.persistence()
pairs = simplex_tree.persistence_pairs()
fullpersinfo = []
for pair in pairs:
#print(pair)
btime = simplex_tree.filtration(pair[0])
dtime = simplex_tree.filtration(pair[1])
#print(btime,dtime)
try:
diag.index((0,(btime,dtime)))
htype = 0
fullpersinfo.append([htype, btime, dtime, pair])
except:
diag.index((1,(btime,dtime)))
htype = 1
fullpersinfo.append([htype, btime, dtime, pair])
#print('couldnt find persistence pair matching birth/death times', pair, (btime,dtime))
writePersistencePairsToFile(fullpersinfo,persfilename)
#simplex_tree.write_persistence_diagram(persfilename)
#print('wrote persistence diagram to',persfilename)
#print(diag)
#data_0_dim=np.array([list(diag[i][1]) for i in range(len(diag)) if diag[i][0]==0])
#data_1_dim=np.array([list(diag[i][1]) for i in range(len(diag)) if diag[i][0]==1])
return (skeleton,diag,fullpersinfo)
def writeSkeletonToFile(skeleton, filename):
with open(filename,'w') as f:
fwriter = csv.writer(f, delimiter='\t')
for simplex in skeleton:
if len(simplex) > 1:
fwriter.writerow(simplex)
print('wrote simplex skeleton to',filename)
return
def writePersistencePairsToFile(perspairs, filename):
with open(filename,'w') as f:
fwriter = csv.writer(f, delimiter='\t')
for pers in perspairs:
fwriter.writerow(pers)
print('wrote persistence pairs to',filename)
return
def randomlyPermuteDistMat(distance_matrix,flag=''):
distmatsize = distance_matrix.shape
n = distmatsize[0]
permmat = np.zeros(distmatsize)
if flag == 'edge':
# randomly permute by row
randperm = np.random.permutation(n)
for rownum in range(n):
permmat[randperm[rownum],:] = distance_matrix[rownum,randperm]
elif flag == 'rand':
# randomly permute all dist values (in upper section, to preserve symmetry)
permidx = np.triu_indices(n,1)
alldistvals = distance_matrix[permidx]
permvals = np.random.permutation(alldistvals)
permmat[permidx] = permvals
permidx_lower = np.tril_indices(n,-1)
permmat[permidx_lower] = permmat.T[permidx_lower]
elif flag == 'dist':
# matrix is purely distance dependent (same averages along non-main diagonals as original) + noise
for diagnum in range(n-1):
diagvals = np.diag(distance_matrix,diagnum+1)
avgval = np.mean(diagvals)
std = np.std(diagvals)
newdiag = np.random.normal(avgval,std,len(diagvals))
diagmat = np.diag(newdiag,diagnum+1)
permmat += diagmat
permmat += diagmat.T
return permmat
def writeDistMatToFile(distmat,filename):
with open(filename,'w') as f:
fwriter = csv.writer(f,delimiter='\t')
for row in distmat:
fwriter.writerow(row)
print('wrote distance matrix to',filename)
return
def writeLoopToFile(path_data,filename):
with open(filename,'w')as f:
fwriter = csv.writer(f,delimiter='\t')
for path in path_data:
fwriter.writerow([path[0],"other edges with the same length:"+str(path[1])])
fwriter.writerow(path[2])
print('wrote the information of loops to ', filename)
return
def generate_1_dim_simp_list_from_dist_mat(dist_matrix):
mat_dim = dist_matrix.shape[0]
dim_1_simp_list = [[i,j,dist_matrix[i,j]] for i in range(0,mat_dim-1) for j in range(i+1,mat_dim)]
dim_1_simp_list.sort(key=lambda x: [x[2],max(x[0],x[1]),min(x[0],x[1])])
return dim_1_simp_list
def get_one_dim_persis(fullpersis):
persis_1_dim = []
persis_1_dim_dict = {}
for dim,birth,death,simp_pair in fullpersis:
if(dim==1):
persis_1_dim.append([birth,death,simp_pair[0]])
if(tuple(simp_pair[0]) not in persis_1_dim_dict.keys()):
persis_1_dim_dict[tuple(simp_pair[0])] = [[birth,death,simp_pair[1]]]
else:
persis_1_dim_dict[tuple(simp_pair[0])].append([birth,death,simp_pair[1]])
persis_1_dim.sort(key=lambda x: [x[0]-x[1]])
return persis_1_dim,persis_1_dim_dict
def check_same_length_edge(index,length,dim_1_simp_list):
edge_num = 1
temp_index = index-1
all_simp = len(dim_1_simp_list)
while True:
if(dim_1_simp_list[temp_index][2]-length==0):
edge_num+=1
temp_index-=1
if(temp_index<0):
break
else:
break
temp_index = index+1
while True:
if(dim_1_simp_list[temp_index][2]-length==0):
edge_num+=1
temp_index+=1
if(temp_index>=all_simp):
break
else:
break
return edge_num
def get_loop(reso,minimal_bin,dist_mat,dim_1_simp_list,persis_1_dim_list,output_filename):
loop_num = len(persis_1_dim_list)
path_info = []
for i in range(loop_num):
essen_edge = persis_1_dim_list[i][2]
essen_index = dim_1_simp_list.index([min(essen_edge[0],essen_edge[1]),max(essen_edge[0],essen_edge[1]),dist_mat[essen_edge[0],essen_edge[1]]])
#check if there are other edges having the same length as the essential edge
multi_edge = check_same_length_edge(essen_index,dist_mat[essen_edge[0],essen_edge[1]],dim_1_simp_list)
if(multi_edge>1):
print("essential edge: bin: %d bin: %d length: %f"%(int(minimal_bin+reso*essen_edge[0]),int(minimal_bin+reso*essen_edge[1]),dist_mat[essen_edge[0],essen_edge[1]]))
print("another %d edges have the same length as the essential edge..."%(multi_edge-1))
#get shortest path
G=nx.Graph()
G.add_weighted_edges_from(dim_1_simp_list[:essen_index])
path_info.append([[int(minimal_bin+reso*i) for i in essen_edge],multi_edge-1,[int(minimal_bin+reso*i) for i in nx.shortest_path(G, source=essen_edge[0], target=essen_edge[1],weight="weight")]])
writeLoopToFile(path_info,output_filename)
return
def main(input_file,output_path,output_name,resol):
t0 = time.time()
resolut,freq_mat,min_bin,_=read_raw_HiC_data_no_split(input_file,resol)
print("Generate distance matrix...")
distance_matrix=matrix_normalize([freq_mat])[0]
writeDistMatToFile(distance_matrix,output_path+output_name+'_distmat.txt')
print("Calculate persistent homology...")
simplex_skel,persisdiag,full_persis_info=TDA_func(distance_matrix,output_path+output_name+'_persisdiagram.txt',output_path+output_name+'_skeleton.txt')
print("Loop trace back...")
persis_dim_1_list,_ = get_one_dim_persis(full_persis_info)
dim_1_simp = generate_1_dim_simp_list_from_dist_mat(distance_matrix)
get_loop(resolut,min_bin,distance_matrix,dim_1_simp,persis_dim_1_list,output_path+output_name+'_loop_information.txt')
print('total time (s):',time.time()-t0)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', type=str,help= "input file of HiC contact matrix")
parser.add_argument('-o',type=str, help='the name of output files')
parser.add_argument('-p',type=str,help="the path of output files")
parser.add_argument('-r',type=str,help="resolution of HiC input file")
args=parser.parse_args()
main(args.i,args.p,args.o,args.r)
|
#hw6 problem 2
#I pledge my honor that I have abided by the Stevens honor system -Maya O
def main():
weight = int(input("Please enter your weight in pounds: "))
height = int(input("Please enter your height in inches: "))
bmi = (720 * weight)/(height ** 2)
print()
if bmi <19:
print("Based on your height and weight, your BMI is below the healthy range")
elif bmi >=19 and bmi <= 25:
print("Based on your height and weight, your BMI is within the healthy range")
elif bmi >25:
print("Based on your height and weight, your BMI is above the healthy range")
main()
|
from flask_api import status
from family_foto.app import add_user
from family_foto.models import db
from family_foto.models.photo import Photo
from tests.base_login_test_case import BaseLoginTestCase
from tests.base_photo_test_case import BasePhotoTestCase
class ImageViewTestCase(BaseLoginTestCase, BasePhotoTestCase):
"""
Tests the image route.
"""
def setUp(self):
super().setUp()
self.photo.user = self.mock_current_user.id
def test_route(self):
"""
Tests if the route works for an example image.
"""
db.session.add(self.photo)
db.session.commit()
response = self.client.get('/image/example.jpg')
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_only_show_allowed_photos(self):
"""
Tests if the user can only see photos he/she/it has permission to view.
"""
owner = add_user('owner', '123')
other_photo = Photo(filename='example.jpg', url='/photos/example.jpg',
user=owner.id)
db.session.add(other_photo)
db.session.commit()
response = self.client.get('/image/example.jpg')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_shared_individual_photo(self):
"""
Tests sharing an individual photo with another user.
"""
db.session.add(self.photo)
db.session.commit()
other_user = add_user('other', 'user')
response = self.client.post('/image/example.jpg', data=dict(share_with=[other_user.id]))
self.assertEqual(status.HTTP_200_OK, response.status_code)
|
#!/usr/bin/python
import sys, os, os.path
import simplejson as json
from urllib import urlopen
"""
this scripts the commandlinefu's from the site, commandlinefu.com in
json format and displays them in a neat fashion.
"""
def fetchcommands(command):
""" fetch the commadline snippets based on the command passd."""
requesturl = "http://www.commandlinefu.com/commands/%s/json" % (command)
""" make the network connection; fetch the data."""
try:
httpdata = urlopen(requesturl).read()
except IOError:
print "Failed to connect to the network, maybe it's down."
sys.exit(-2)
jsondata = json.loads(httpdata)
print
print "%s results fetched from commandlinefu.com" % (len(jsondata))
print "-------------------"
for result in jsondata:
print "command: %s" % result['command']
print "summary: %s" % result['summary']
print
if __name__ == '__main__':
fetchcommands("browse/sort-by-votes")
|
# vim: encoding=utf-8
""" Localization table
"""
LITS = {
'en': ["New Moon", "First Quarter", "Full Moon", "Last Quarter"],
'be': ["Маладзік", "Першая чвэрць", "Поўня", "Апошняя чвэрць"],
'bg': ["Новолуние", "Първа четвърт", "Пълнолуние", "Последна четвърт"],
'ca': ["Noviluni", "Quart creixent", "Pleniluni", "Lluna minvant"],
'cs': ["Nov", "První čtvrť", "Úplněk", "Poslední čtvrť"],
'da': ["Nymåne", "Tiltagende", "Fuldmåne", "Aftagende"],
'de': ["Neumond", "Erstes Viertel", "Vollmond", "Letztes Viertel"],
'et': ["Noorkuu", "Esimene veerand", "Täiskuu", "Viimane veerand"],
'el': ["Νέα Σελήνη", "Πρώτο τέταρτο", "Πανσέληνος", "Τελευταίο τέταρτο"],
'es': ["Luna nueva", "Cuarto creciente", "Luna Llena", "Cuarto menguante"],
'eo': ["Novluno", "Unua lunduono", "Plenluno", "Lasta lunduono"],
'fi': ["Uusikuu", "Kasvava puolikuu", "Täysikuu", "Laskeva puolikuu"],
'fr': ["Nouvelle lune", "Premier quartier", "Pleine lune", "Dernier quartier"],
'hr': ["Mlađak", "Prva četvrt", "Uštap", "Zadnja četvrt"],
'ia': ["Nove lunio", "Primo quarte", "Plenilunio", "Ultime quarte"],
'it': ["Luna nuova", "Primo quarto", "Luna piena", "Ultimo quarto"],
'ja': ["新月", "上弦", "満月", "下弦"],
'nl': ["Nieuwe maan", "Eerste kwartier", "Volle maan", "Laatste kwartier"],
'ru': ["Новолуние", "Первая четверть", "Полнолуние", "Последняя четверть"],
'pl': ["Nów", "Pierwsza kwadra", "Pełnia", "Ostatnia kwadra"],
'pt': ["Lua nova", "Quarto crescente", "Lua cheia", "Quarto minguante"],
'ro': ["Lună nouă", "Primul pătrar", "Lună plină", "Ultimul pătrar"],
'sk': ["Nov", "Prvá štvrť", "Úplnok", "Posledná štvrť"],
'sr': ["Mlađak", "Prva četvrt", "Uštap", "Poslednja četvrt"],
'uk': ["Молодик", "Перша чверть", "Повня", "Остання чверть"],
'th': ["เดือนมืด", "ข้างขึ้น", "วันเพ็ญ", "ข้างแรม"],
'ko': ["초승달", "상현달", "보름달", "하현달"],
'kn': ["ಅಮಾವಾಸ್ಯೆ", "ಕೃಷ್ಣಪಕ್ಷ, ಅಷ್ಟಮಿ ", "ಹುಣ್ಣಿಮೆ", "ಶುಕ್ಲಪಕ್ಷ, ಅಷ್ಟಮಿ"],
'zh_TW': ["新月", "上弦", "滿月", "下弦"],
'ar': ["المحاق", "الربع الأول", "البدر", "الربع الأخير"],
'nb': ["Nymåne", "Første kvarter", "Fullmåne", "Siste kvarter"],
'nn': ["Nymåne", "Første kvarter", "Fullmåne", "Siste kvarter"],
'cy': ["Lleuad Newydd", "Chwarter Cyntaf", "Lleuad Llawn", "Chwarter Olaf"],
'tr': ["Yeni Ay", "İlk Dördün", "Dolunay", "Son Dördün"],
}
|
data = []
with open("../phrase-table.hi-en") as f:
for line in f:
temp = line.strip().split(" ||| ")
scores = temp[2]
direct_phrase_translation_probability = float(scores.split()[2].strip())
data.append((direct_phrase_translation_probability, line))
data.sort()
with open("../sorted_phrase_table.hi-en", "w") as f:
for line in data[::-1]:
f.write(line[1].strip() + "\n")
|
class LaserDiode:
# not implemented
pass
|
#/usr/bin/python
import vault_utils
import time
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
import argparse
parser = argparse.ArgumentParser(description='Vault Exporter')
parser.add_argument('--cert','-c', help='Client certificate.', required=True)
parser.add_argument('--key','-k', help='Client key.', required=True)
parser.add_argument('--interval','-t', help='Time interval between scrapes.', required=True)
parser.add_argument('--hash','-v', help='Current hash value.', required=True)
args = parser.parse_args()
class VaultCollector(object):
def collect(self):
yield GaugeMetricFamily('vault_init', 'Vault init check', value=vault.healthCheck(args.cert, args.key))
yield GaugeMetricFamily('vault_seal', 'Vault seal check', value=vault.sealCheck(args.cert, args.key))
yield GaugeMetricFamily('vault_api', 'Vault API check', value=vault.apiCheck(args.cert, args.key))
yield GaugeMetricFamily('vault_hash', 'Vault Hash check', value=vault.hashCheck(args.hash))
if __name__ == "__main__":
REGISTRY.register(VaultCollector())
start_http_server(9118)
while True: time.sleep(float(args.interval))
|
dictionary = [["线", "长方形", "正方形", "圆", "多边形", "線", "長方形", "正方形", "円", "多辺形","line", "rectangle", "square", "circle", "polygon"],
["画", "描", "Draw", "为", "是", "在", "从",
"到", "は", "を", "に", "から", "まで", "is","at","the","from","to"],
["宽", "高", "颜色", "大小", "背景色", "底色", "半径", "位置", "边框", "顺序", "太さ", "高さ", "色", "大さ", "背景色",
"バックグラウンド", "半径", "ところ", "枠", "順番", "width", "height", "Color", "Size", "background", "background color","radius","Location","border","order" ]
]
def objName(name):
objList = dictionary[0]
for i in objList:
x = name.find(i)
# print(x)
if x != -1:
return objList.index(i)
return -1
def command(command):
commandList = dictionary[2]
for i in commandList:
x = command.find(i)
# print(x)
if x != -1:
return commandList.index(i)
return -1
def option(option):
optionList = dictionary[2]
for i in optionList:
x = option.find(i)
# print(x)
if x != -1:
return optionList.index(i)
return -1
def mes(ID):
mesList = ["No error", "Can't create file","Draw object succesed"]
return mesList[ID]
|
import clustering.scripts as clustering_scripts
import numpy as np
import bisect
import classes
from globals import (
BATTERY_LIMIT,
LOST_TRIP_REWARD,
ITERATION_LENGTH_MINUTES,
WHITE,
DISCOUNT_RATE,
)
from decision.get_policy import get_policy
from progress.bar import IncrementalBar
class World:
class WorldMetric:
def __init__(self):
self.lost_demand = []
self.average_deviation_ideal_state = []
self.deficient_battery = []
self.time = []
def add_analysis_metrics(self, world):
"""
Add data to analysis
:param world: world object to record state from
"""
self.lost_demand.append(
sum([1 for reward in world.rewards if reward == LOST_TRIP_REWARD])
if len(world.rewards) > 0
else 0
)
self.average_deviation_ideal_state.append(
sum(
[
abs(
(sum([1 for _ in cluster.get_available_scooters()]))
- cluster.ideal_state
)
for cluster in world.state.clusters
]
)
/ len(world.state.clusters)
)
self.deficient_battery.append(
sum(
[
cluster.ideal_state * 100
- (
sum(
[
scooter.battery
for scooter in cluster.get_available_scooters()
]
)
)
for cluster in world.state.clusters
if len(cluster.scooters) < cluster.ideal_state
]
)
)
self.time.append(world.time)
def get_lost_demand(self):
"""
Returns list of all lost demand
"""
return self.lost_demand
def get_deviation_ideal_state(self):
"""
Returns list of average deviation from ideal state during the time analysed
"""
return self.average_deviation_ideal_state
def get_deficient_battery(self):
"""
Returns list of total deficient battery in the system during the analysed time
"""
return self.deficient_battery
def get_time_array(self):
"""
Returns a list of all timestamps when when data used for analysis is recorded
"""
return self.time
def get_all_metrics(self):
"""
Returns all metrics recorded for analysis
"""
return (
self.lost_demand,
self.average_deviation_ideal_state,
self.deficient_battery,
)
def __init__(
self,
shift_duration: int,
sample_size=100,
number_of_clusters=20,
initial_state=None,
policy="RandomRolloutPolicy",
initial_location_depot=True,
verbose=False,
):
self.shift_duration = shift_duration
if initial_state:
self.state = initial_state
else:
self.state = clustering_scripts.get_initial_state(
sample_size=sample_size,
number_of_clusters=number_of_clusters,
initial_location_depot=initial_location_depot,
)
self.stack = []
self.time = 0
self.rewards = []
self.cluster_flow = {
(start, end): 0
for start in np.arange(len(self.state.clusters))
for end in np.arange(len(self.state.clusters))
if start != end
}
self.policy = get_policy(policy)
self.metrics = World.WorldMetric()
self.verbose = verbose
if verbose:
self.progress_bar = IncrementalBar(
"Running World",
check_tty=False,
max=round(shift_duration / ITERATION_LENGTH_MINUTES) + 1,
color=WHITE,
suffix="%(percent)d%% - ETA %(eta)ds",
)
def run(self):
while self.time < self.shift_duration:
event = self.stack.pop(0)
event.perform(self)
if isinstance(event, classes.GenerateScooterTrips) and self.verbose:
self.progress_bar.next()
if self.verbose:
self.progress_bar.finish()
def get_remaining_time(self) -> int:
"""
Computes the remaining time by taking the difference between the shift duration
and the current time of the world object.
:return: the remaining time as a float
"""
return self.shift_duration - self.time
def add_reward(self, reward: float, discount=False) -> None:
"""
Adds the input reward to the rewards list of the world object
:param discount: boolean if the reward is to be discounted
:param reward: reward given
"""
self.rewards.append(reward * self.get_discount() if discount else reward)
def get_total_reward(self) -> float:
"""
Get total accumulated reward at current point of time
:return:
"""
return sum(self.rewards)
def add_event(self, event) -> None:
"""
Adds event to the sorted stack.
Avoids calling sort on every iteration by using the bisect package
:param event: event to insert
"""
insert_index = bisect.bisect([event.time for event in self.stack], event.time)
self.stack.insert(insert_index, event)
def add_trip_to_flow(self, start: int, end: int) -> None:
"""
Adds a trip from start to end for cluster flow
:param start: departure cluster
:param end: arrival cluster
"""
self.cluster_flow[(start, end)] += 1
def get_cluster_flow(self) -> [(int, int, int)]:
"""
Get all flows between cluster since last vehicle arrival
:return: list: tuple (start, end, flow) flow from departure cluster to arrival cluster
"""
return [(start, end, flow) for (start, end), flow in self.cluster_flow.items()]
def clear_flow_dict(self) -> None:
"""
Clears the cluster flow dict
"""
for key in self.cluster_flow.keys():
self.cluster_flow[key] = 0
def get_scooters_on_trip(self) -> [(int, int, int)]:
"""
Get all scooters that are currently out on a trip
:return: list of all scooters that are out on a trip
"""
return [
(event.departure_cluster_id, event.arrival_cluster_id, event.scooter.id)
for event in self.stack
if isinstance(event, classes.ScooterArrival)
]
def get_discount(self):
# Divide by 60 as there is 60 minutes in an hour. We want this number in hours to avoid big numbers is the power
return DISCOUNT_RATE ** (self.time / 60)
|
import pygame
import random
from math import *
from yume import *
from yume import gfx
from yume.gfx import get_gfx
from yume.resource import *
from pygame import Rect
class Projectile(gfx.Drawable):
def __init__(self, origin, target):
gfx.Drawable.__init__(self)
self.origin = origin
self.target = target
self.x, self.y = origin.rect.center
def draw(self, screen):
screen.blit(self.gfx.surface, (self.x, self.y))
self.gfx.next_frame()
def destroy(self):
try:
Global.arena.projectiles.remove(self)
except:
pass
class ProjectileDumb(Projectile):
aoe = 7
hits = 1
def __init__(self, origin, target):
Projectile.__init__(self, origin, target)
if hasattr(target, 'rect'):
self.tx, self.ty = target.rect.center
else:
self.tx, self.ty = target
direction = atan2(self.ty - self.y, self.tx - self.x)
self.dx = cos(direction) * self.speed
self.dy = sin(direction) * self.speed
self.damage = origin.damage
self.distance = origin.range / self.speed
self.traveled_distance = random.randint(0, 5)
def update(self):
self.x += self.dx
self.y += self.dy
self.traveled_distance += 1
if self.traveled_distance >= self.distance:
return self.destroy()
area = Rect(self.x - self.aoe, self.y - self.aoe, 2 * self.aoe, 2 * self.aoe)
# area = Rect(self.x-self.aoe/2, self.y-self.aoe/2, self.aoe, self.aoe)
monsters = list(Global.face.get_monsters_in_rect(area))
if monsters:
i = 0
for monster in monsters:
self.origin.hit(monster, self)
i += 1
if i >= self.hits:
break
self.destroy()
class ProjectileBullet(ProjectileDumb):
aoe = 7
hits = 1
speed = 3
graphic = gfx.Bubble
def __init__(self, origin, target):
ProjectileDumb.__init__(self, origin, target)
if hasattr(target, 'rect'):
self.tx, self.ty = target.rect.center
distance = sqrt((self.tx-self.x) ** 2 + (self.ty-self.y) ** 2) / self.speed
self.tx += target.vector_x * target.speed * distance
self.ty += target.vector_y * target.speed * distance
else:
self.tx, self.ty = target
direction = atan2(self.ty - self.y, self.tx - self.x)
# self.gfx = get_gfx(gfx.B, (1, 1))
self.dx = cos(direction) * self.speed
self.dy = sin(direction) * self.speed
self.damage = origin.damage
self.distance = origin.range / self.speed
self.traveled_distance = random.randint(0, 5)
class ProjectileGuardianBullet(ProjectileBullet):
graphic = gfx.GuardianBullet
speed = 4
aoe = 10
class ProjectileVirus(ProjectileDumb):
speed = 2
graphic = gfx.Virus
|
"""
Various utils to retreive from database and export to file
"""
import config
from lib.Database import Database
import os
import shutil
from uuid import UUID
from dateutil.parser import parse as dateparse
async def main(args):
if len(args) == 0:
print("What you want to export to? [syncrude|pymot|particlesVelocitiesLatents]")
else:
if args[0] == "syncrude":
if args[1] == "all":
await exportSyncrudeAll(args[2:])
else:
await exportSyncrude(args[1:])
if args[0] == "particles":
await exportParticles(args[1:])
if args[0] == "particlesVelocitiesLatents":
await exportParticlesVelocitiesLatents(args[1:])
if args[0] == "Syncrude2018":
await exportSyncrude2018(*args[1:])
if args[0] == "DensityExperiment":
await exportDensityTest(*args[1:])
else:
print("Invalid export sub-command")
async def exportParticles(args):
directory = args[0]
limit = args[1]
db = Database()
q = """
SELECT e.experiment, f.frame, t.track
FROM experiment e, frame f, track t, particle p
WHERE e.experiment = f.experiment
AND p.particle = t.particle
AND f.frame = t.frame
AND e.experiment = 'b6734bad-2dfc-4502-9260-a7d71e72f6a9'
AND p.area > 100
AND p.category in (2,3)
ORDER BY RANDOM()
LIMIT {limit}
"""
s = q.format(limit=limit)
crops = []
async for result in db.query(s):
srcFile = os.path.join(
config.experiment_dir,
str(result["experiment"]),
str(result["frame"]),
str(result["track"]) + ".jpg",
)
dstFile = os.path.join(directory, str(result["track"]) + ".jpg")
shutil.copyfile(srcFile, dstFile)
async def exportParticlesVelocitiesLatents(args):
directory = args[0]
limit = args[1]
db = Database()
q = """
SELECT e.experiment, f1.frame, t1.track, t1.latent, t2.location-t1.location as delta
FROM experiment e, frame f1, frame f2, track t1, track t2, particle p
WHERE e.experiment = f1.experiment
AND e.experiment = f2.experiment
AND p.particle = t1.particle
AND f1.frame = t1.frame
AND f2.frame = t2.frame
AND t1.particle = t2.particle
AND f1.number = f2.number-1
AND e.experiment = '3a24cfcf-bef5-40a1-a477-6e7007bcd7ae'
AND p.area > 100
AND f1.number > 200
AND f1.number < 500
AND p.category in (2,3)
ORDER BY RANDOM()
LIMIT {limit}
"""
s = q.format(limit=limit)
crops = []
line = "{track}, {dx}, {dy}, {latent}\n"
outFile = os.path.join(directory, "data.txt")
with open(outFile, "w+") as f:
async for result in db.query(s):
srcFile = os.path.join(
config.experiment_dir,
str(result["experiment"]),
str(result["frame"]),
str(result["track"]) + ".jpg",
)
dstFile = os.path.join(directory, str(result["track"]) + ".jpg")
shutil.copyfile(srcFile, dstFile)
dx = result["delta"][0]
dy = result["delta"][1]
f.write(
line.format(
track=result["track"], dx=dx, dy=dy, latent=result["latent"]
)
)
async def exportSyncrudeAll(args):
"""
dir [prefix]
"""
pass
async def exportSyncrude(args):
"""
experiment dir [prefix]
"""
experiment_uuid = args[0]
directory = args[1]
if len(args) == 3:
prefix = args[2]
else:
prefix = ""
db = Database()
q = """
SELECT *
FROM experiment
WHERE experiment = $1
"""
day, name, method = None, None, None
async for record in db.query(q, UUID(experiment_uuid)):
day = str(record["day"])
name = record["name"].strip()
method = record["method"].strip()
dayDirectory = os.path.join(directory, day)
file = name + "_" + method + ".txt"
if not os.path.exists(dayDirectory):
print("created new day directory", dayDirectory)
os.mkdir(dayDirectory)
categoryMap = await db.category_map()
outfile = os.path.join(dayDirectory, file)
with open(outfile, "w+") as f:
q = """
SELECT f2.number as fnum,
t2.particle as pid,
p.area as area,
p.intensity as intensity,
t2.location-t1.location as delta,
p.category as category
FROM frame f1, frame f2, track t1, track t2, particle p
WHERE f1.number = f2.number-1
AND f1.frame = t1.frame
AND f2.frame = t2.frame
AND t1.particle = t2.particle
AND t2.particle = p.particle
AND f1.experiment = $1
ORDER BY t2.particle, f1.number
"""
async for r in db.query(q, UUID(experiment_uuid)):
categoryName = categoryMap[r["category"]]
dx = r["delta"].x
dy = -r["delta"].y
l = [r["fnum"], r["pid"], r["area"], r["intensity"], dx, dy, categoryName]
sl = [str(i) for i in l]
f.write("{}\n".format(", ".join(sl)))
async def exportDensityTest(experiment_uuid):
"""
The purpose of this function is to test if there is a correloation between
the "density" of paarticles in a frame and
the number of "out-of-control" particles
"""
db = Database()
s1 = """
SELECT number, frame from frame where experiment ='{experiment}' ORDER BY number;
"""
s2 = """
SELECT count(*) as count
FROM particle p, frame f, track t
WHERE p.particle = t.particle
AND f.frame = '{frame}'
AND f.frame = t.frame
"""
s3 = """
SELECT count(*) as count
FROM particle p, frame f, track t
WHERE p.particle = t.particle
AND f.frame = '{frame}'
AND f.frame = t.frame
AND (2*p.radius < 194
OR 2*p.radius > 224)
"""
all_particles_by_frame = []
ooc_particles_by_frame = []
async for record in db.query(s1.format(experiment=experiment_uuid)):
async for r in db.query(s2.format(frame=record["frame"])):
all_particles_by_frame.append(r["count"])
async for r in db.query(s3.format(frame=record["frame"])):
ooc_particles_by_frame.append(r["count"])
print(
"Frame:",
record["number"],
all_particles_by_frame[-1],
ooc_particles_by_frame[-1],
)
# print(all_particles_by_frame)
# print(ooc_particles_by_frame)
# DO what ya want :)
import matplotlib.pyplot as plt
plt.scatter(all_particles_by_frame, ooc_particles_by_frame)
plt.show()
async def exportSyncrude2018(experiment_uuid, directory):
db = Database()
q = """
SELECT *
FROM experiment
WHERE experiment = $1
"""
day, name, method = None, None, None
async for record in db.query(q, UUID(experiment_uuid)):
day = str(record["day"])
name = record["name"].strip()
method = record["method"].strip()
file = name + "_" + method + ".txt"
if not os.path.exists(directory):
print("created new day directory", directory)
os.mkdir(directory)
categoryMap = await db.category_map()
s = """
SELECT f2.number as fnum,
f2.frame as fid,
p.particle as pid,
p.area as area,
p.intensity as intensity,
p.perimeter as perimeter,
t2.location[1]-t1.location[1] as delta_y,
t2.location[0] as xpos,
t2.location[1] as ypos,
p.major as major,
p.minor as minor,
p.orientation as ori,
p.solidity as solid,
p.eccentricity as ecc
FROM frame f1, frame f2, track t1, track t2, particle p, segment s
WHERE f1.number = f2.number-1
AND f1.frame = t1.frame
AND f2.frame = t2.frame
AND t1.particle = t2.particle
AND t2.particle = p.particle
AND s.segment = f1.segment
AND f1.experiment = '{experiment}'
"""
q = s.format(experiment=experiment_uuid)
# CSV Headers: Frame ID, Particle ID, Particle Area, Particle Velocity, Particle Intensity, Particle Perimeter, X Position, Y Position, Major Axis Length, Minor Axis Length, Orientation, Solidity, Eccentricity.
l = (
"{fid},{pid},{area},{vel},{inten},{per},{xPos},{yPos},{major},{minor},{ori},{solid},{ecc}\n"
)
outfile = os.path.join(directory, file)
with open(outfile, "w+") as f:
async for r in db.query(q):
s = l.format(
fid=r["fid"],
pid=r["pid"],
area=r["area"],
vel=r["delta_y"],
inten=r["intensity"],
per=r["perimeter"],
xPos=r["xpos"],
yPos=r["ypos"],
major=r["major"],
minor=r["minor"],
ori=r["ori"],
solid=r["solid"],
ecc=r["ecc"],
)
f.write(s)
file = name + "_" + method + "_particleCounts.txt"
s = """
SELECT
f.number as number,
count(*) as count
FROM frame f
LEFT JOIN track t USING (frame)
LEFT JOIN particle p USING (particle)
WHERE f.experiment = '{experiment}'
GROUP BY f.frame
ORDER BY f.number ASC
"""
q = s.format(experiment=experiment_uuid)
l = "{segment},{count}\n"
outfile = os.path.join(directory, file)
with open(outfile, "w+") as f:
async for r in db.query(q):
s = l.format(segment=r["number"], count=r["count"])
f.write(s)
|
"""Live tests"""
import unittest
import json
from heatmiserV3 import heatmiser, connection
class TestLiveHeatmiserThermostat(unittest.TestCase):
"""Testing an actual thermostat"""
def setUp(self):
"""Creates serial con and thermostat"""
self.con = connection.hmserial('192.168.1.57', '102')
self.con.open()
self.thermostat1 = heatmiser.HeatmiserThermostat(1, 'prt', self.con)
def test_read_dcb(self):
"""This test makes sure that the values map correctly"""
data = self.thermostat1.get_target_temperature()
print(json.dumps(data, indent=2))
assert data[11]['value'] == 1
def tearDown(self):
"""Shutdown serial conn"""
self.con.close()
|
from ortools.constraint_solver import pywrapcp
import pudb
# pudb.set_trace()
def main():
# Create Solver
solver = pywrapcp.Solver("ProcessOrder")
# Data Feed
machines_count = 1
all_machines = range(0, machines_count)
# ["Knife", 5] means it takes 5 minutes to setup machine M1 for production
# of "Knife"
all_products = [["Knife", 3],
["Spoon", 2],
["Fork", 5]]
# 30, [1,5] means 5 unit time is needed for product 1 on deadline 30th.
all_orders = [[60,[[0, 5], [2, 6]]],
[90,[[0, 15], [1, 10], [2,10]]],
[70,[[0, 7],[2, 4]]],
[25,[[1, 7],[2, 8]]],
[30,[[0, 9],[1, 5],[2, 2]]],
]
machines = [[0, 0],
[0, 0, 0],
[0, 0],
[0, 0],
[0, 0, 0],
]
products = [[0, 2],
[0, 1, 2],
[0, 2],
[1, 2],
[0, 1, 2]
]
setup_times = [3, 2, 5]
orders = []
# Separate product processing times
processing_times = []
deadlines = []
for i, order in enumerate(all_orders):
productwise_time = []
for j, products in enumerate(order[1]):
productwise_time.append(products[1])
processing_times.append(productwise_time)
deadlines.append(order[0])
orders.append(order[1])
print("Problem Information".center(40, "*"))
print("Orders: ".center(20, '='))
for i, order in enumerate(orders):
print("Order: {0}".format(i), order[0:])
print("Processing Times: ".ljust(20), processing_times)
print("Deadlines ".ljust(20), deadlines)
#Statistics
num_machines = len(all_machines)
num_products = len(all_products)
num_orders = len(all_orders)
print("Number of Machines: ".ljust(20), num_machines)
print("Number of Products: ".ljust(20), num_products)
print("Number of Orders: ".ljust(20), num_orders)
# Compute horizon
horizon = 0
for i in range(num_orders):
horizon += sum(processing_times[i])
print("Horizon : ".ljust(20), horizon)
print("Problem Information".center(40, "*"))
# Creating TASKS
# task(i, j) = represents i-th order and j-th product task
all_tasks = {}
for i, order in enumerate(orders):
for j, product in enumerate(order):
all_tasks[(i, j)] = solver.FixedDurationIntervalVar(0,
deadlines[i],
processing_times[i][j],
False,
'Order_%i_Product_%i' % (i, j)
)
# Create Sequence Variables
all_sequences = []
all_machine_jobs = []
machine_jobs = []
for i in all_machines:
machine_jobs = []
for j, order in enumerate(orders):
for k, product in enumerate(order):
if machines[j][k] == i:
machine_jobs.append(all_tasks[(j, k)])
disj = solver.DisjunctiveConstraint(machine_jobs, 'Machine %i'%i)
all_sequences.append(disj.SequenceVar())
solver.Add(disj)
print("Machine Jobs".center(40, "*"))
print(machine_jobs)
print("Sequences: ")
print(all_sequences)
# Setting Objective
# To minimize setup lengh of
# Create search phases.
sequence_phase = solver.Phase([all_sequences[i] for i in all_machines],
solver.SEQUENCE_DEFAULT)
main_phase = solver.Compose([sequence_phase])
# Create the solution collector.
collector = solver.LastSolutionCollector()
# Add the interesting variables to the SolutionCollector.
collector.Add(all_sequences)
for i in all_machines:
sequence = all_sequences[i];
sequence_count = sequence.Size();
for j in range(0, sequence_count):
t = sequence.Interval(j)
collector.Add(t.StartExpr().Var())
collector.Add(t.EndExpr().Var())
# Solve the problem.
disp_col_width = 10
for i in all_machines:
seq = all_sequences[i]
sol_line = "Machine " + str(i) + ": "
sol_line_tasks = "Machine " + str(i) + ": "
sequence = collector.ForwardSequence(0, seq)
seq_size = len(sequence)
for j in range(0, seq_size):
t = seq.Interval(sequence[j]);
# Add spaces to output to align columns.
sol_line_tasks += t.Name() + " " * (disp_col_width - len(t.Name()))
for j in range(0, seq_size):
t = seq.Interval(sequence[j]);
sol_tmp = "[" + str(collector.Value(0, t.StartExpr().Var())) + ","
sol_tmp += str(collector.Value(0, t.EndExpr().Var())) + "] "
# Add spaces to output to align columns.
sol_line += sol_tmp + " " * (disp_col_width - len(sol_tmp))
sol_line += "\n"
sol_line_tasks += "\n"
print(sol_line_tasks)
print("Time Intervals for Tasks\n")
print(sol_line)
if __name__=='__main__':
main()
|
import verifier
import sys
import time
def main(argv):
for k in range(83, 100):
start = time.time()
verifier.main(['microvisor.hex', '/dev/ttyACM0', 1])
print(int(k) + ":" + str(time.time() - start))
if __name__ == "__main__":
main(sys.argv[1:])
|
import zmq
import sys
import threading
global user
global lastUser
lastUser = ""
def sendRequest(): # to send message requests to server
context = zmq.Context()
sock = context.socket(zmq.PUSH) #list of queued items need to be routed for the one asking for it
sock.connect("tcp://127.0.0.1:5678")
while True:
try:
msg = promptMessage()
except KeyboardInterrupt:
break
if msg != '':
sock.send_string("[{}]: ".format(user) + msg)
def promptMessage():
msg = input("[%s] > " % user)
return msg
def receiveMessage(): #subscriber receives message
context = zmq.Context()
sock = context.socket(zmq.SUB) #subscribe to server
sock.setsockopt_string(zmq.SUBSCRIBE, '') #subscribe to all messages
sock.connect("tcp://127.0.0.1:5677")
output = sock.recv()
printOutput(output.decode())
threading.Thread(target=receiveMessage).start()
def printOutput(m):
if m.find(user) == -1: #print all outputs other than the user's output again
print('\n' + m + ("\n[%s] > " % user), end='')
if __name__ == "__main__":
global user
try:
user = " ".join(sys.argv[1:]) #name, if two names or more
print("User[%s] Connected to the chat server." % user)
#Process(target=receiveMessage).start()
threading.Thread(target=receiveMessage).start()
sendRequest()
except Exception as e:
print(e)
"""
SAMPLE OUTPUT:
(my-venv) MacBook-Pro:lab3 Aditi$ python3 client.py Bob
User[Bob] Connected to the chat server.
[Bob] > What's up guys?
[Bob] >
[Alice]: Not much, you?
[Bob] >
[Smith]: You guys want food?
[Bob] > Yeah let's get something
[Bob] >
[Alice]: I'm also hungry
[Bob] >
"""
|
from __future__ import print_function
from __future__ import division
from pyLM import *
from pyLM.units import *
from pySTDLM import *
from pySTDLM.PostProcessing import *
from pySTDLM.StandardReactionSystems import *
# from pySTDLM.StandardCells import *
import h5py
import numpy as np
import os
from lib.lmUtils import buildAnyShape
from setMolecules import setMolecules
filename_lm='CA1_small_sim.lm'
filename_morph='CA1_small.h5'
if os.path.exists(filename_lm):
os.system('rm '+filename_lm)
ext = 'default'
cyt = 'cytoplasm'
psd = 'psd'
domains = {ext: 0, cyt: 1, psd: 2}
NA = 6.022e23
##
##
##
|
import os
import queue
import collections
import random
from enum import Enum, unique
import alive_util
import util_channel as channel
import alive_mem as am
@unique
class FsTargetEnum(Enum):
unknown = 0
regular_file = 1
directory = 2
@unique
class FsCommandEnum(Enum):
reset = 0
get_prop = 2
list_dir = 3
walk = 4
class FilesystemSensor(alive_util.AliveThread):
def __init__(self):
super().__init__()
self.__ctx_target = None
self.__chan_from_mem = channel.mem2fs
self.__chan_to_mem = channel.any2mem
def __del__(self):
self.__chan_from_mem.join()
@alive_util.reported
def __step_check_prop(self):
target = self.__ctx_target
prop = None
if target is not None:
target_type = FsTargetEnum.regular_file if os.path.isfile(target) else \
FsTargetEnum.directory if os.path.isdir(target) else FsTargetEnum.unknown
prop = target, target_type
feedback = am.MemoryInfoEnum.fs_target_prop_done, prop
self.__chan_to_mem.put(feedback)
@alive_util.reported
def __step_walk_directory(self):
data = None
if self.__ctx_target is None:
pass
feedback = am.MemoryInfoEnum.fs_target_list_done, data
self.__chan_to_mem.put(feedback)
@alive_util.reported
def __step_list_target(self):
data = None
if self.__ctx_target is None:
pass
feedback = am.MemoryInfoEnum.fs_target_list_done, data
self.__chan_to_mem.put(feedback)
@alive_util.reported
def __step_reset_target(self):
self.__ctx_target = os.getcwd()
feedback = am.MemoryInfoEnum.fs_target_reset_done, self.__ctx_target
self.__chan_to_mem.put(feedback)
@staticmethod
def __walking_fullname(cur_dir):
for root, dirs, files in os.walk(cur_dir):
for filename in files:
fullname = os.path.join(root, filename)
yield fullname
def deprecated__step_walk_dir(self):
is_broken = False
cur_dir = self.__ctx_target
for root, dirs, files in os.walk(cur_dir):
for filename in files:
fullname = os.path.join(root, filename)
self.__ctx_target = fullname
print(f'walk dir @ {self.__ctx_target}')
is_broken = random.choice([True, False])
if is_broken:
break
if is_broken:
break
if not is_broken:
self.__ctx_target = None
# override the method of supper class
def run(self):
action = {
FsCommandEnum.reset: self.__step_reset_target,
FsCommandEnum.list_dir: self.__step_list_target,
FsCommandEnum.get_prop: self.__step_check_prop,
FsCommandEnum.walk: self.__step_walk_directory
}
while True:
idle_flag = False
try:
cmd = self.__chan_from_mem.get(timeout=1)
except queue.Empty:
idle_flag = True
finally:
if idle_flag:
self.__chan_to_mem.put((am.MemoryInfoEnum.fs_sensor_idle, None))
else:
self.__chan_from_mem.task_done()
action[cmd]()
# print(f'{cmd}')
def __example_codes(self):
date_from_name = {}
dir_list = []
for name in os.listdir(self.__ctx_target):
fullname = os.path.join(self.__ctx_target, name)
if os.path.isfile(fullname):
date_from_name[fullname] = os.path.getatime(fullname)
print(f'{fullname}, {date_from_name[fullname]}')
elif os.path.isdir(fullname):
dir_list += [fullname]
def __example_codes_2(self):
data = collections.defaultdict(list)
for root, dirs, files in os.walk(self.__ctx_target):
for filename in files:
fullname = os.path.join(root, filename)
key = (os.path.getsize(fullname), filename)
data[key].append(fullname)
_fs_sensor = FilesystemSensor.create()
def instance():
return _fs_sensor
|
from django.contrib import admin
from serial_configuration.models import SerialPort
class SerialPortAdmin(admin.ModelAdmin):
readonly_fields = ('device_file',)
fieldsets = [
('Serial Options', {'fields' : ['device_file', 'block_mode', 'lock_file', 'baud', 'raw_mode', 'echo_mode']}),
('TCP Logging', {'fields' : ['enable_tcp', 'port']}),
('File Logging', {'fields' : ['enable_file']}),
]
# Register your models here.
admin.site.register(SerialPort, SerialPortAdmin)
|
#!/usr/bin/env python
import argparse
from ace import core
from ace import plugins
parser = argparse.ArgumentParser()
parser.add_argument('command')
parser.add_argument('subcommand',nargs='?',default='__default__')
parser.add_argument('--endpoint',dest='endpoint',default='https://www.axilent.net') # for debugging
core.seed_parser(parser)
plugins.seed_parser(parser)
commands = {}
core.seed_commands(commands)
plugins.seed_commands(commands)
help = {}
core.seed_help(help)
#plugins.seed_help(help)
def usage(args):
print 'Usage: ace <command> <subcommand>'
def do_help(args):
"""
Gets help.
"""
try:
help_message = help[args.command][args.subcommand]
print help_message
except KeyError:
usage(args)
def main():
"""
Mainline.
"""
args = parser.parse_args()
# if args.help:
# do_help(args)
# else:
try:
com = commands[args.command][args.subcommand]
com(args)
except KeyError:
usage(args)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.